hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f73e7df5fa56ae235391f6dfbd43c39fa49dea73 | 7,179 | py | Python | custom_components/waste_collection_schedule/waste_collection_schedule/wizard/abfall_io.py | ranseyer/hacs_waste_collection_schedule | ad450b9bac4be371a091e7814bdd1c8d2983df4e | [
"MIT"
] | null | null | null | custom_components/waste_collection_schedule/waste_collection_schedule/wizard/abfall_io.py | ranseyer/hacs_waste_collection_schedule | ad450b9bac4be371a091e7814bdd1c8d2983df4e | [
"MIT"
] | null | null | null | custom_components/waste_collection_schedule/waste_collection_schedule/wizard/abfall_io.py | ranseyer/hacs_waste_collection_schedule | ad450b9bac4be371a091e7814bdd1c8d2983df4e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import re
from html.parser import HTMLParser
import inquirer
import requests
MODUS_KEY = "d6c5855a62cf32a4dadbc2831f0f295f"
HEADERS = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"}
# IDs of CONFIG VARIABLES
CONFIG_VARIABLES = [
"f_id_kommune",
"f_id_bezirk",
"f_id_strasse",
"f_id_strasse_hnr",
"f_abfallarten",
]
ACTION_EXTRACTOR_PATTERN = re.compile(
'(?<=awk-data-onchange-submit-waction=")[^\\n\\r"]+'
)
DISTRICT_CHOICES = [
("ALBA Berlin", "9583a2fa1df97ed95363382c73b41b1b"),
("Böblingen", "8215c62763967916979e0e8566b6172e"),
("Calw", "690a3ae4906c52b232c1322e2f88550c"),
("eBe Essen", "9b5390f095c779b9128a51db35092c9c"),
("Freudenstadt", "595f903540a36fe8610ec39aa3a06f6a"),
("Göppingen", "365d791b58c7e39b20bb8f167bd33981"),
("Heilbronn (Landkreis)", "1a1e7b200165683738adddc4bd0199a2"),
("Kitzingen", "594f805eb33677ad5bc645aeeeaf2623"),
("Landsberg am Lech", "7df877d4f0e63decfb4d11686c54c5d6"),
("Landshut", "bd0c2d0177a0849a905cded5cb734a6f"),
("Ludwigshafen am Rhein", "6efba91e69a5b454ac0ae3497978fe1d"),
("MüllALARM / Schönmackers", "e5543a3e190cb8d91c645660ad60965f"),
("Rhein-Neckar-Kreis", "914fb9d000a9a05af4fd54cfba478860"),
("Rotenburg (Wümme)", "645adb3c27370a61f7eabbb2039de4f1"),
("Sigmaringen", "39886c5699d14e040063c0142cd0740b"),
("Unterallgäu", "c22b850ea4eff207a273e46847e417c5"),
("Westerwaldkreis", "248deacbb49b06e868d29cb53c8ef034"),
]
class OptionParser(HTMLParser):
"""Parser for HTML option list."""
TEXTBOXES = "textboxes"
def error(self, message):
pass
def __init__(self, target_var):
super().__init__()
self._target_var = target_var
self._within_option = False
self._option_name = ""
self._option_value = "-1"
self._choices = []
self._is_selector = False
self._is_text_input = False
self._text_field_id = ""
self._text_hint = ""
self._text_name = ""
self._label_for_id = ""
self._label_contents = {}
@property
def choices(self):
return self._choices
def handle_starttag(self, tag, attrs):
attributes = dict(attrs)
if tag == "label":
if "for" in attributes:
self._label_for_id = attributes["for"]
if tag == "input":
if "type" in attributes:
if attributes["type"] == "hidden":
if (
"name" in attributes
and "value" in attributes
and attributes["name"] == self._target_var
):
# self._within_option = True
self._is_selector = True
self._option_value = attributes["value"]
self._choices.append((attributes["value"], attributes["value"]))
elif (
self._target_var == OptionParser.TEXTBOXES
and attributes["type"] == "text"
):
self._is_text_input = True
if "id" in attributes:
self._text_field_id = attributes["id"]
if "placeholder" in attributes:
self._text_hint = attributes["placeholder"]
if "name" in attributes:
self._text_name = attributes["name"]
if tag == "select":
if "name" in attributes and attributes["name"] == self._target_var:
self._is_selector = True
if tag == "option" and self._is_selector:
self._within_option = True
if "value" in attributes:
self._option_value = attributes["value"]
def handle_endtag(self, tag):
if (
self._within_option
and len(self._option_name) > 0
and self._option_value != "-1"
):
self._choices.append((self._option_name, self._option_value))
self._within_option = False
self._option_name = ""
self._option_value = "-1"
def handle_data(self, data):
if self._within_option:
self._option_name += data
if len(self._label_for_id) > 0:
self._label_contents[self._label_for_id] = data
self._label_for_id = ""
@property
def is_selector(self):
return self._is_selector
@property
def is_text_input(self):
return self._is_text_input
@property
def text_name(self):
return self._text_name
@property
def text_field_id(self):
return self._text_field_id
@property
def label_contents(self):
return self._label_contents
@property
def text_hint(self):
return self._text_hint
def select_and_query(data, answers):
relevant_config_vars = []
for config_var in CONFIG_VARIABLES:
if config_var not in answers and config_var in data:
relevant_config_vars.append(config_var)
for target_var in relevant_config_vars:
# parser HTML option list
parser = OptionParser(target_var)
parser.feed(data)
if parser.is_selector:
questions = [
inquirer.List(
target_var, choices=parser.choices, message=f"Select {target_var}",
)
]
answers.update(inquirer.prompt(questions))
# Search for Textboxes (currently just supports one textbox per request)
parser = OptionParser(OptionParser.TEXTBOXES)
parser.feed(data)
if parser.is_text_input:
message = parser.label_contents[parser.text_field_id]
if parser.text_hint != "":
message = message + " (" + parser.text_hint + ")"
questions = [inquirer.Text(parser.text_name, message=message)]
answers.update(inquirer.prompt(questions))
args = {
"key": answers["key"],
"modus": MODUS_KEY,
"waction": ACTION_EXTRACTOR_PATTERN.findall(data)[0],
}
r = requests.post(
"https://api.abfall.io", params=args, data=answers, headers=HEADERS
)
return r.text
def main():
questions = [
inquirer.List(
"key", choices=DISTRICT_CHOICES, message="Select service provider"
)
]
answers = inquirer.prompt(questions)
# prompt for first level
args = {"key": answers["key"], "modus": MODUS_KEY, "waction": "init"}
r = requests.get("https://api.abfall.io", params=args, headers=HEADERS)
data = r.text
while True:
data = select_and_query(data, answers)
if "f_id_abfalltyp" in data:
break
print("Copy the following statements into your configuration.yaml:\n")
print("# waste_collection_schedule source configuration")
print("waste_collection_schedule:")
print(" sources:")
print(" - name: abfall_io")
print(" args:")
for key, value in answers.items():
if key in CONFIG_VARIABLES or key == "key":
print(f" {key}: {value}")
if __name__ == "__main__":
main()
| 31.213043 | 88 | 0.599248 |
import re
from html.parser import HTMLParser
import inquirer
import requests
MODUS_KEY = "d6c5855a62cf32a4dadbc2831f0f295f"
HEADERS = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"}
CONFIG_VARIABLES = [
"f_id_kommune",
"f_id_bezirk",
"f_id_strasse",
"f_id_strasse_hnr",
"f_abfallarten",
]
ACTION_EXTRACTOR_PATTERN = re.compile(
'(?<=awk-data-onchange-submit-waction=")[^\\n\\r"]+'
)
DISTRICT_CHOICES = [
("ALBA Berlin", "9583a2fa1df97ed95363382c73b41b1b"),
("Böblingen", "8215c62763967916979e0e8566b6172e"),
("Calw", "690a3ae4906c52b232c1322e2f88550c"),
("eBe Essen", "9b5390f095c779b9128a51db35092c9c"),
("Freudenstadt", "595f903540a36fe8610ec39aa3a06f6a"),
("Göppingen", "365d791b58c7e39b20bb8f167bd33981"),
("Heilbronn (Landkreis)", "1a1e7b200165683738adddc4bd0199a2"),
("Kitzingen", "594f805eb33677ad5bc645aeeeaf2623"),
("Landsberg am Lech", "7df877d4f0e63decfb4d11686c54c5d6"),
("Landshut", "bd0c2d0177a0849a905cded5cb734a6f"),
("Ludwigshafen am Rhein", "6efba91e69a5b454ac0ae3497978fe1d"),
("MüllALARM / Schönmackers", "e5543a3e190cb8d91c645660ad60965f"),
("Rhein-Neckar-Kreis", "914fb9d000a9a05af4fd54cfba478860"),
("Rotenburg (Wümme)", "645adb3c27370a61f7eabbb2039de4f1"),
("Sigmaringen", "39886c5699d14e040063c0142cd0740b"),
("Unterallgäu", "c22b850ea4eff207a273e46847e417c5"),
("Westerwaldkreis", "248deacbb49b06e868d29cb53c8ef034"),
]
class OptionParser(HTMLParser):
TEXTBOXES = "textboxes"
def error(self, message):
pass
def __init__(self, target_var):
super().__init__()
self._target_var = target_var
self._within_option = False
self._option_name = ""
self._option_value = "-1"
self._choices = []
self._is_selector = False
self._is_text_input = False
self._text_field_id = ""
self._text_hint = ""
self._text_name = ""
self._label_for_id = ""
self._label_contents = {}
@property
def choices(self):
return self._choices
def handle_starttag(self, tag, attrs):
attributes = dict(attrs)
if tag == "label":
if "for" in attributes:
self._label_for_id = attributes["for"]
if tag == "input":
if "type" in attributes:
if attributes["type"] == "hidden":
if (
"name" in attributes
and "value" in attributes
and attributes["name"] == self._target_var
):
self._is_selector = True
self._option_value = attributes["value"]
self._choices.append((attributes["value"], attributes["value"]))
elif (
self._target_var == OptionParser.TEXTBOXES
and attributes["type"] == "text"
):
self._is_text_input = True
if "id" in attributes:
self._text_field_id = attributes["id"]
if "placeholder" in attributes:
self._text_hint = attributes["placeholder"]
if "name" in attributes:
self._text_name = attributes["name"]
if tag == "select":
if "name" in attributes and attributes["name"] == self._target_var:
self._is_selector = True
if tag == "option" and self._is_selector:
self._within_option = True
if "value" in attributes:
self._option_value = attributes["value"]
def handle_endtag(self, tag):
if (
self._within_option
and len(self._option_name) > 0
and self._option_value != "-1"
):
self._choices.append((self._option_name, self._option_value))
self._within_option = False
self._option_name = ""
self._option_value = "-1"
def handle_data(self, data):
if self._within_option:
self._option_name += data
if len(self._label_for_id) > 0:
self._label_contents[self._label_for_id] = data
self._label_for_id = ""
@property
def is_selector(self):
return self._is_selector
@property
def is_text_input(self):
return self._is_text_input
@property
def text_name(self):
return self._text_name
@property
def text_field_id(self):
return self._text_field_id
@property
def label_contents(self):
return self._label_contents
@property
def text_hint(self):
return self._text_hint
def select_and_query(data, answers):
relevant_config_vars = []
for config_var in CONFIG_VARIABLES:
if config_var not in answers and config_var in data:
relevant_config_vars.append(config_var)
for target_var in relevant_config_vars:
parser = OptionParser(target_var)
parser.feed(data)
if parser.is_selector:
questions = [
inquirer.List(
target_var, choices=parser.choices, message=f"Select {target_var}",
)
]
answers.update(inquirer.prompt(questions))
parser = OptionParser(OptionParser.TEXTBOXES)
parser.feed(data)
if parser.is_text_input:
message = parser.label_contents[parser.text_field_id]
if parser.text_hint != "":
message = message + " (" + parser.text_hint + ")"
questions = [inquirer.Text(parser.text_name, message=message)]
answers.update(inquirer.prompt(questions))
args = {
"key": answers["key"],
"modus": MODUS_KEY,
"waction": ACTION_EXTRACTOR_PATTERN.findall(data)[0],
}
r = requests.post(
"https://api.abfall.io", params=args, data=answers, headers=HEADERS
)
return r.text
def main():
questions = [
inquirer.List(
"key", choices=DISTRICT_CHOICES, message="Select service provider"
)
]
answers = inquirer.prompt(questions)
args = {"key": answers["key"], "modus": MODUS_KEY, "waction": "init"}
r = requests.get("https://api.abfall.io", params=args, headers=HEADERS)
data = r.text
while True:
data = select_and_query(data, answers)
if "f_id_abfalltyp" in data:
break
print("Copy the following statements into your configuration.yaml:\n")
print("# waste_collection_schedule source configuration")
print("waste_collection_schedule:")
print(" sources:")
print(" - name: abfall_io")
print(" args:")
for key, value in answers.items():
if key in CONFIG_VARIABLES or key == "key":
print(f" {key}: {value}")
if __name__ == "__main__":
main()
| true | true |
f73e7e9f2df0e0ed4eecc630393e2c3672e5ff98 | 1,029 | py | Python | IJCAI19/SemColHNN_Codes/Cache/cache_classes.py | vishalbelsare/SemAIDA | 9f4f65b845822d8dda14e94b3c274a79c5d1937c | [
"Apache-2.0"
] | 34 | 2018-11-22T16:22:10.000Z | 2022-02-24T13:50:16.000Z | IJCAI19/SemColHNN_Codes/Cache/cache_classes.py | vishalbelsare/SemAIDA | 9f4f65b845822d8dda14e94b3c274a79c5d1937c | [
"Apache-2.0"
] | 11 | 2019-03-22T11:50:03.000Z | 2021-12-11T12:32:23.000Z | IJCAI19/SemColHNN_Codes/Cache/cache_classes.py | vishalbelsare/SemAIDA | 9f4f65b845822d8dda14e94b3c274a79c5d1937c | [
"Apache-2.0"
] | 13 | 2018-12-26T09:58:10.000Z | 2022-02-13T10:36:56.000Z | """
This file is to cache the classes and super classes of given entities
"""
import os
import json
from util.util_kb import query_complete_classes_of_entity
# file to save
cache_file = 'cache_classes.json'
# read the input entities
ents = list()
cache_ents = json.load(open('cache_ents_T2D_Limaye.json'))
for v in cache_ents.values():
ents += v
cache_ents = json.load(open('cache_ents_Wikipedia.json'))
for v in cache_ents.values():
ents += v
ents = set(ents)
# load the existing cache
# this program can support incremental caching
ent_classes = json.load(open(cache_file)) if os.path.exists(cache_file) else dict()
print('%d left' % (len(ents) - len(ent_classes.keys())))
for i, ent in enumerate(ents):
if ent not in ent_classes:
classes = query_complete_classes_of_entity(ent)
ent_classes[ent] = list(classes)
if i % 50 == 0:
print('i: %d done' % i)
json.dump(ent_classes, open(cache_file, 'w'))
json.dump(ent_classes, open(cache_file, 'w'))
print('all done')
| 28.583333 | 83 | 0.697765 | import os
import json
from util.util_kb import query_complete_classes_of_entity
cache_file = 'cache_classes.json'
ents = list()
cache_ents = json.load(open('cache_ents_T2D_Limaye.json'))
for v in cache_ents.values():
ents += v
cache_ents = json.load(open('cache_ents_Wikipedia.json'))
for v in cache_ents.values():
ents += v
ents = set(ents)
ent_classes = json.load(open(cache_file)) if os.path.exists(cache_file) else dict()
print('%d left' % (len(ents) - len(ent_classes.keys())))
for i, ent in enumerate(ents):
if ent not in ent_classes:
classes = query_complete_classes_of_entity(ent)
ent_classes[ent] = list(classes)
if i % 50 == 0:
print('i: %d done' % i)
json.dump(ent_classes, open(cache_file, 'w'))
json.dump(ent_classes, open(cache_file, 'w'))
print('all done')
| true | true |
f73e7f88e6002e1b353a4473b18574f6782bb4f8 | 841 | py | Python | evaluate/evalVPRH.py | UditSinghParihar/d2-net | b3592beebe6759cf4cc1acdfd23d603ef059ef30 | [
"BSD-3-Clause-Clear"
] | 6 | 2020-09-04T04:06:58.000Z | 2021-11-29T08:41:37.000Z | evaluate/evalVPRH.py | UditSinghParihar/d2-net | b3592beebe6759cf4cc1acdfd23d603ef059ef30 | [
"BSD-3-Clause-Clear"
] | null | null | null | evaluate/evalVPRH.py | UditSinghParihar/d2-net | b3592beebe6759cf4cc1acdfd23d603ef059ef30 | [
"BSD-3-Clause-Clear"
] | 3 | 2020-09-24T04:36:20.000Z | 2022-01-12T08:40:49.000Z | from sys import argv, exit
import csv
import matplotlib.pyplot as plt
import os
def readCSV(file):
with open(file) as csvFile:
csvReader = csv.reader(csvFile, delimiter=',')
data = []
for i, row in enumerate(csvReader):
if(i == 0):
continue
else:
data.append(row)
return data
if __name__ == '__main__':
gtFile = argv[1]
vprFile = argv[2]
gtData = readCSV(gtFile)
vprData = readCSV(vprFile)
correctPair = 0
for vpr, gt in zip(vprData, gtData):
predId = int(vpr[1])
gtStId = int(os.path.basename(gt[1]).replace('.png', ''))
gtEndId = int(os.path.basename(gt[2]).replace('.png', ''))
if(gtStId < predId < gtEndId):
correctPair += 1
print("Number of correct retrived top-1 pair: {} out of total {} pairs ({:.2f}%)".format(correctPair,
len(vprData), correctPair*100.0/len(vprData)))
| 19.113636 | 103 | 0.650416 | from sys import argv, exit
import csv
import matplotlib.pyplot as plt
import os
def readCSV(file):
with open(file) as csvFile:
csvReader = csv.reader(csvFile, delimiter=',')
data = []
for i, row in enumerate(csvReader):
if(i == 0):
continue
else:
data.append(row)
return data
if __name__ == '__main__':
gtFile = argv[1]
vprFile = argv[2]
gtData = readCSV(gtFile)
vprData = readCSV(vprFile)
correctPair = 0
for vpr, gt in zip(vprData, gtData):
predId = int(vpr[1])
gtStId = int(os.path.basename(gt[1]).replace('.png', ''))
gtEndId = int(os.path.basename(gt[2]).replace('.png', ''))
if(gtStId < predId < gtEndId):
correctPair += 1
print("Number of correct retrived top-1 pair: {} out of total {} pairs ({:.2f}%)".format(correctPair,
len(vprData), correctPair*100.0/len(vprData)))
| true | true |
f73e7f91755d7d59437cc829e627d65def4c240d | 27,536 | py | Python | ens_sdk/model/easy_flow/deploy_strategy_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | ens_sdk/model/easy_flow/deploy_strategy_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | ens_sdk/model/easy_flow/deploy_strategy_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: deploy_strategy.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from ens_sdk.model.easy_flow import deploy_target_pb2 as ens__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2
from ens_sdk.model.cmdb import cluster_info_pb2 as ens__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2
from ens_sdk.model.easy_flow import target_info_pb2 as ens__sdk_dot_model_dot_easy__flow_dot_target__info__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='deploy_strategy.proto',
package='easy_flow',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/easy_flow'),
serialized_pb=_b('\n\x15\x64\x65ploy_strategy.proto\x12\teasy_flow\x1a+ens_sdk/model/easy_flow/deploy_target.proto\x1a%ens_sdk/model/cmdb/cluster_info.proto\x1a)ens_sdk/model/easy_flow/target_info.proto\"\xd6\t\n\x0e\x44\x65ployStrategy\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x12\n\napiVersion\x18\x03 \x01(\t\x12\x0b\n\x03org\x18\x04 \x01(\x05\x12*\n\x03\x61pp\x18\x05 \x01(\x0b\x32\x1d.easy_flow.DeployStrategy.App\x12\x0c\n\x04type\x18\x06 \x01(\t\x12>\n\rbatchStrategy\x18\x07 \x01(\x0b\x32\'.easy_flow.DeployStrategy.BatchStrategy\x12\r\n\x05scope\x18\x08 \x01(\t\x12#\n\x08\x63lusters\x18\t \x03(\x0b\x32\x11.cmdb.ClusterInfo\x12)\n\ntargetList\x18\n \x03(\x0b\x32\x15.easy_flow.TargetInfo\x12\x1a\n\x12\x63lusterEnvironment\x18\x0b \x01(\t\x12\x13\n\x0b\x63lusterType\x18\x0c \x01(\t\x12:\n\x0bpackageList\x18\r \x03(\x0b\x32%.easy_flow.DeployStrategy.PackageList\x12\x30\n\x06status\x18\x0e \x01(\x0b\x32 .easy_flow.DeployStrategy.Status\x1a\"\n\x03\x41pp\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05\x61ppId\x18\x02 \x01(\t\x1a\xc7\x03\n\rBatchStrategy\x12\x44\n\tautoBatch\x18\x01 \x01(\x0b\x32\x31.easy_flow.DeployStrategy.BatchStrategy.AutoBatch\x12H\n\x0bmanualBatch\x18\x02 \x01(\x0b\x32\x33.easy_flow.DeployStrategy.BatchStrategy.ManualBatch\x12\x0c\n\x04type\x18\x03 \x01(\t\x1aH\n\tAutoBatch\x12\x10\n\x08\x62\x61tchNum\x18\x01 \x01(\x05\x12\x15\n\rbatchInterval\x18\x02 \x01(\x05\x12\x12\n\nfailedStop\x18\x03 \x01(\x08\x1a\xcd\x01\n\x0bManualBatch\x12L\n\x07\x62\x61tches\x18\x01 \x03(\x0b\x32;.easy_flow.DeployStrategy.BatchStrategy.ManualBatch.Batches\x12\x10\n\x08\x62\x61tchNum\x18\x02 \x01(\x05\x12\x15\n\rbatchInterval\x18\x03 \x01(\x05\x12\x12\n\nfailedStop\x18\x04 \x01(\x08\x1a\x33\n\x07\x42\x61tches\x12(\n\x07targets\x18\x01 \x03(\x0b\x32\x17.easy_flow.DeployTarget\x1a\x85\x02\n\x0bPackageList\x12\x13\n\x0bpackageName\x18\x01 \x01(\t\x12\"\n\x07\x63luster\x18\x02 \x01(\x0b\x32\x11.cmdb.ClusterInfo\x12\x15\n\rtargetVersion\x18\x03 \x01(\t\x12\x0f\n\x07preStop\x18\x04 \x01(\x08\x12\x13\n\x0bpostRestart\x18\x05 \x01(\x08\x12\x11\n\tautoStart\x18\x06 \x01(\x08\x12\x11\n\tuserCheck\x18\x07 \x01(\x08\x12\x12\n\nfullUpdate\x18\x08 \x01(\x08\x12\x11\n\tpackageId\x18\t \x01(\t\x12\x13\n\x0binstallPath\x18\n \x01(\t\x12\x0c\n\x04type\x18\x0b \x01(\x05\x12\x10\n\x08platform\x18\x0c \x01(\t\x1a\x1b\n\x06Status\x12\x11\n\toutOfDate\x18\x01 \x01(\x08\x42\x45ZCgo.easyops.local/contracts/protorepo-models/easyops/model/easy_flowb\x06proto3')
,
dependencies=[ens__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2.DESCRIPTOR,ens__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2.DESCRIPTOR,ens__sdk_dot_model_dot_easy__flow_dot_target__info__pb2.DESCRIPTOR,])
_DEPLOYSTRATEGY_APP = _descriptor.Descriptor(
name='App',
full_name='easy_flow.DeployStrategy.App',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='easy_flow.DeployStrategy.App.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='appId', full_name='easy_flow.DeployStrategy.App.appId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=617,
serialized_end=651,
)
_DEPLOYSTRATEGY_BATCHSTRATEGY_AUTOBATCH = _descriptor.Descriptor(
name='AutoBatch',
full_name='easy_flow.DeployStrategy.BatchStrategy.AutoBatch',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batchNum', full_name='easy_flow.DeployStrategy.BatchStrategy.AutoBatch.batchNum', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchInterval', full_name='easy_flow.DeployStrategy.BatchStrategy.AutoBatch.batchInterval', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='failedStop', full_name='easy_flow.DeployStrategy.BatchStrategy.AutoBatch.failedStop', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=829,
serialized_end=901,
)
_DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH_BATCHES = _descriptor.Descriptor(
name='Batches',
full_name='easy_flow.DeployStrategy.BatchStrategy.ManualBatch.Batches',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='targets', full_name='easy_flow.DeployStrategy.BatchStrategy.ManualBatch.Batches.targets', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1058,
serialized_end=1109,
)
_DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH = _descriptor.Descriptor(
name='ManualBatch',
full_name='easy_flow.DeployStrategy.BatchStrategy.ManualBatch',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batches', full_name='easy_flow.DeployStrategy.BatchStrategy.ManualBatch.batches', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchNum', full_name='easy_flow.DeployStrategy.BatchStrategy.ManualBatch.batchNum', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchInterval', full_name='easy_flow.DeployStrategy.BatchStrategy.ManualBatch.batchInterval', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='failedStop', full_name='easy_flow.DeployStrategy.BatchStrategy.ManualBatch.failedStop', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH_BATCHES, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=904,
serialized_end=1109,
)
_DEPLOYSTRATEGY_BATCHSTRATEGY = _descriptor.Descriptor(
name='BatchStrategy',
full_name='easy_flow.DeployStrategy.BatchStrategy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='autoBatch', full_name='easy_flow.DeployStrategy.BatchStrategy.autoBatch', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='manualBatch', full_name='easy_flow.DeployStrategy.BatchStrategy.manualBatch', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='easy_flow.DeployStrategy.BatchStrategy.type', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DEPLOYSTRATEGY_BATCHSTRATEGY_AUTOBATCH, _DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=654,
serialized_end=1109,
)
_DEPLOYSTRATEGY_PACKAGELIST = _descriptor.Descriptor(
name='PackageList',
full_name='easy_flow.DeployStrategy.PackageList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='packageName', full_name='easy_flow.DeployStrategy.PackageList.packageName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster', full_name='easy_flow.DeployStrategy.PackageList.cluster', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targetVersion', full_name='easy_flow.DeployStrategy.PackageList.targetVersion', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='preStop', full_name='easy_flow.DeployStrategy.PackageList.preStop', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='postRestart', full_name='easy_flow.DeployStrategy.PackageList.postRestart', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='autoStart', full_name='easy_flow.DeployStrategy.PackageList.autoStart', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='userCheck', full_name='easy_flow.DeployStrategy.PackageList.userCheck', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fullUpdate', full_name='easy_flow.DeployStrategy.PackageList.fullUpdate', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packageId', full_name='easy_flow.DeployStrategy.PackageList.packageId', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='installPath', full_name='easy_flow.DeployStrategy.PackageList.installPath', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='easy_flow.DeployStrategy.PackageList.type', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='platform', full_name='easy_flow.DeployStrategy.PackageList.platform', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1112,
serialized_end=1373,
)
_DEPLOYSTRATEGY_STATUS = _descriptor.Descriptor(
name='Status',
full_name='easy_flow.DeployStrategy.Status',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='outOfDate', full_name='easy_flow.DeployStrategy.Status.outOfDate', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1375,
serialized_end=1402,
)
_DEPLOYSTRATEGY = _descriptor.Descriptor(
name='DeployStrategy',
full_name='easy_flow.DeployStrategy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='easy_flow.DeployStrategy.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='easy_flow.DeployStrategy.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='apiVersion', full_name='easy_flow.DeployStrategy.apiVersion', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='org', full_name='easy_flow.DeployStrategy.org', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='app', full_name='easy_flow.DeployStrategy.app', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='easy_flow.DeployStrategy.type', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchStrategy', full_name='easy_flow.DeployStrategy.batchStrategy', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scope', full_name='easy_flow.DeployStrategy.scope', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clusters', full_name='easy_flow.DeployStrategy.clusters', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targetList', full_name='easy_flow.DeployStrategy.targetList', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clusterEnvironment', full_name='easy_flow.DeployStrategy.clusterEnvironment', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clusterType', full_name='easy_flow.DeployStrategy.clusterType', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packageList', full_name='easy_flow.DeployStrategy.packageList', index=12,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='easy_flow.DeployStrategy.status', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DEPLOYSTRATEGY_APP, _DEPLOYSTRATEGY_BATCHSTRATEGY, _DEPLOYSTRATEGY_PACKAGELIST, _DEPLOYSTRATEGY_STATUS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=164,
serialized_end=1402,
)
_DEPLOYSTRATEGY_APP.containing_type = _DEPLOYSTRATEGY
_DEPLOYSTRATEGY_BATCHSTRATEGY_AUTOBATCH.containing_type = _DEPLOYSTRATEGY_BATCHSTRATEGY
_DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH_BATCHES.fields_by_name['targets'].message_type = ens__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2._DEPLOYTARGET
_DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH_BATCHES.containing_type = _DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH
_DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH.fields_by_name['batches'].message_type = _DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH_BATCHES
_DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH.containing_type = _DEPLOYSTRATEGY_BATCHSTRATEGY
_DEPLOYSTRATEGY_BATCHSTRATEGY.fields_by_name['autoBatch'].message_type = _DEPLOYSTRATEGY_BATCHSTRATEGY_AUTOBATCH
_DEPLOYSTRATEGY_BATCHSTRATEGY.fields_by_name['manualBatch'].message_type = _DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH
_DEPLOYSTRATEGY_BATCHSTRATEGY.containing_type = _DEPLOYSTRATEGY
_DEPLOYSTRATEGY_PACKAGELIST.fields_by_name['cluster'].message_type = ens__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2._CLUSTERINFO
_DEPLOYSTRATEGY_PACKAGELIST.containing_type = _DEPLOYSTRATEGY
_DEPLOYSTRATEGY_STATUS.containing_type = _DEPLOYSTRATEGY
_DEPLOYSTRATEGY.fields_by_name['app'].message_type = _DEPLOYSTRATEGY_APP
_DEPLOYSTRATEGY.fields_by_name['batchStrategy'].message_type = _DEPLOYSTRATEGY_BATCHSTRATEGY
_DEPLOYSTRATEGY.fields_by_name['clusters'].message_type = ens__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2._CLUSTERINFO
_DEPLOYSTRATEGY.fields_by_name['targetList'].message_type = ens__sdk_dot_model_dot_easy__flow_dot_target__info__pb2._TARGETINFO
_DEPLOYSTRATEGY.fields_by_name['packageList'].message_type = _DEPLOYSTRATEGY_PACKAGELIST
_DEPLOYSTRATEGY.fields_by_name['status'].message_type = _DEPLOYSTRATEGY_STATUS
DESCRIPTOR.message_types_by_name['DeployStrategy'] = _DEPLOYSTRATEGY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DeployStrategy = _reflection.GeneratedProtocolMessageType('DeployStrategy', (_message.Message,), {
'App' : _reflection.GeneratedProtocolMessageType('App', (_message.Message,), {
'DESCRIPTOR' : _DEPLOYSTRATEGY_APP,
'__module__' : 'deploy_strategy_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployStrategy.App)
})
,
'BatchStrategy' : _reflection.GeneratedProtocolMessageType('BatchStrategy', (_message.Message,), {
'AutoBatch' : _reflection.GeneratedProtocolMessageType('AutoBatch', (_message.Message,), {
'DESCRIPTOR' : _DEPLOYSTRATEGY_BATCHSTRATEGY_AUTOBATCH,
'__module__' : 'deploy_strategy_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployStrategy.BatchStrategy.AutoBatch)
})
,
'ManualBatch' : _reflection.GeneratedProtocolMessageType('ManualBatch', (_message.Message,), {
'Batches' : _reflection.GeneratedProtocolMessageType('Batches', (_message.Message,), {
'DESCRIPTOR' : _DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH_BATCHES,
'__module__' : 'deploy_strategy_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployStrategy.BatchStrategy.ManualBatch.Batches)
})
,
'DESCRIPTOR' : _DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH,
'__module__' : 'deploy_strategy_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployStrategy.BatchStrategy.ManualBatch)
})
,
'DESCRIPTOR' : _DEPLOYSTRATEGY_BATCHSTRATEGY,
'__module__' : 'deploy_strategy_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployStrategy.BatchStrategy)
})
,
'PackageList' : _reflection.GeneratedProtocolMessageType('PackageList', (_message.Message,), {
'DESCRIPTOR' : _DEPLOYSTRATEGY_PACKAGELIST,
'__module__' : 'deploy_strategy_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployStrategy.PackageList)
})
,
'Status' : _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), {
'DESCRIPTOR' : _DEPLOYSTRATEGY_STATUS,
'__module__' : 'deploy_strategy_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployStrategy.Status)
})
,
'DESCRIPTOR' : _DEPLOYSTRATEGY,
'__module__' : 'deploy_strategy_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployStrategy)
})
_sym_db.RegisterMessage(DeployStrategy)
_sym_db.RegisterMessage(DeployStrategy.App)
_sym_db.RegisterMessage(DeployStrategy.BatchStrategy)
_sym_db.RegisterMessage(DeployStrategy.BatchStrategy.AutoBatch)
_sym_db.RegisterMessage(DeployStrategy.BatchStrategy.ManualBatch)
_sym_db.RegisterMessage(DeployStrategy.BatchStrategy.ManualBatch.Batches)
_sym_db.RegisterMessage(DeployStrategy.PackageList)
_sym_db.RegisterMessage(DeployStrategy.Status)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 47.150685 | 2,500 | 0.75799 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from ens_sdk.model.easy_flow import deploy_target_pb2 as ens__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2
from ens_sdk.model.cmdb import cluster_info_pb2 as ens__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2
from ens_sdk.model.easy_flow import target_info_pb2 as ens__sdk_dot_model_dot_easy__flow_dot_target__info__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='deploy_strategy.proto',
package='easy_flow',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/easy_flow'),
serialized_pb=_b('\n\x15\x64\x65ploy_strategy.proto\x12\teasy_flow\x1a+ens_sdk/model/easy_flow/deploy_target.proto\x1a%ens_sdk/model/cmdb/cluster_info.proto\x1a)ens_sdk/model/easy_flow/target_info.proto\"\xd6\t\n\x0e\x44\x65ployStrategy\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x12\n\napiVersion\x18\x03 \x01(\t\x12\x0b\n\x03org\x18\x04 \x01(\x05\x12*\n\x03\x61pp\x18\x05 \x01(\x0b\x32\x1d.easy_flow.DeployStrategy.App\x12\x0c\n\x04type\x18\x06 \x01(\t\x12>\n\rbatchStrategy\x18\x07 \x01(\x0b\x32\'.easy_flow.DeployStrategy.BatchStrategy\x12\r\n\x05scope\x18\x08 \x01(\t\x12#\n\x08\x63lusters\x18\t \x03(\x0b\x32\x11.cmdb.ClusterInfo\x12)\n\ntargetList\x18\n \x03(\x0b\x32\x15.easy_flow.TargetInfo\x12\x1a\n\x12\x63lusterEnvironment\x18\x0b \x01(\t\x12\x13\n\x0b\x63lusterType\x18\x0c \x01(\t\x12:\n\x0bpackageList\x18\r \x03(\x0b\x32%.easy_flow.DeployStrategy.PackageList\x12\x30\n\x06status\x18\x0e \x01(\x0b\x32 .easy_flow.DeployStrategy.Status\x1a\"\n\x03\x41pp\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05\x61ppId\x18\x02 \x01(\t\x1a\xc7\x03\n\rBatchStrategy\x12\x44\n\tautoBatch\x18\x01 \x01(\x0b\x32\x31.easy_flow.DeployStrategy.BatchStrategy.AutoBatch\x12H\n\x0bmanualBatch\x18\x02 \x01(\x0b\x32\x33.easy_flow.DeployStrategy.BatchStrategy.ManualBatch\x12\x0c\n\x04type\x18\x03 \x01(\t\x1aH\n\tAutoBatch\x12\x10\n\x08\x62\x61tchNum\x18\x01 \x01(\x05\x12\x15\n\rbatchInterval\x18\x02 \x01(\x05\x12\x12\n\nfailedStop\x18\x03 \x01(\x08\x1a\xcd\x01\n\x0bManualBatch\x12L\n\x07\x62\x61tches\x18\x01 \x03(\x0b\x32;.easy_flow.DeployStrategy.BatchStrategy.ManualBatch.Batches\x12\x10\n\x08\x62\x61tchNum\x18\x02 \x01(\x05\x12\x15\n\rbatchInterval\x18\x03 \x01(\x05\x12\x12\n\nfailedStop\x18\x04 \x01(\x08\x1a\x33\n\x07\x42\x61tches\x12(\n\x07targets\x18\x01 \x03(\x0b\x32\x17.easy_flow.DeployTarget\x1a\x85\x02\n\x0bPackageList\x12\x13\n\x0bpackageName\x18\x01 \x01(\t\x12\"\n\x07\x63luster\x18\x02 \x01(\x0b\x32\x11.cmdb.ClusterInfo\x12\x15\n\rtargetVersion\x18\x03 \x01(\t\x12\x0f\n\x07preStop\x18\x04 \x01(\x08\x12\x13\n\x0bpostRestart\x18\x05 \x01(\x08\x12\x11\n\tautoStart\x18\x06 \x01(\x08\x12\x11\n\tuserCheck\x18\x07 \x01(\x08\x12\x12\n\nfullUpdate\x18\x08 \x01(\x08\x12\x11\n\tpackageId\x18\t \x01(\t\x12\x13\n\x0binstallPath\x18\n \x01(\t\x12\x0c\n\x04type\x18\x0b \x01(\x05\x12\x10\n\x08platform\x18\x0c \x01(\t\x1a\x1b\n\x06Status\x12\x11\n\toutOfDate\x18\x01 \x01(\x08\x42\x45ZCgo.easyops.local/contracts/protorepo-models/easyops/model/easy_flowb\x06proto3')
,
dependencies=[ens__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2.DESCRIPTOR,ens__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2.DESCRIPTOR,ens__sdk_dot_model_dot_easy__flow_dot_target__info__pb2.DESCRIPTOR,])
_DEPLOYSTRATEGY_APP = _descriptor.Descriptor(
name='App',
full_name='easy_flow.DeployStrategy.App',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='easy_flow.DeployStrategy.App.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='appId', full_name='easy_flow.DeployStrategy.App.appId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=617,
serialized_end=651,
)
_DEPLOYSTRATEGY_BATCHSTRATEGY_AUTOBATCH = _descriptor.Descriptor(
name='AutoBatch',
full_name='easy_flow.DeployStrategy.BatchStrategy.AutoBatch',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batchNum', full_name='easy_flow.DeployStrategy.BatchStrategy.AutoBatch.batchNum', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchInterval', full_name='easy_flow.DeployStrategy.BatchStrategy.AutoBatch.batchInterval', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='failedStop', full_name='easy_flow.DeployStrategy.BatchStrategy.AutoBatch.failedStop', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=829,
serialized_end=901,
)
_DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH_BATCHES = _descriptor.Descriptor(
name='Batches',
full_name='easy_flow.DeployStrategy.BatchStrategy.ManualBatch.Batches',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='targets', full_name='easy_flow.DeployStrategy.BatchStrategy.ManualBatch.Batches.targets', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1058,
serialized_end=1109,
)
_DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH = _descriptor.Descriptor(
name='ManualBatch',
full_name='easy_flow.DeployStrategy.BatchStrategy.ManualBatch',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batches', full_name='easy_flow.DeployStrategy.BatchStrategy.ManualBatch.batches', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchNum', full_name='easy_flow.DeployStrategy.BatchStrategy.ManualBatch.batchNum', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchInterval', full_name='easy_flow.DeployStrategy.BatchStrategy.ManualBatch.batchInterval', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='failedStop', full_name='easy_flow.DeployStrategy.BatchStrategy.ManualBatch.failedStop', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH_BATCHES, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=904,
serialized_end=1109,
)
_DEPLOYSTRATEGY_BATCHSTRATEGY = _descriptor.Descriptor(
name='BatchStrategy',
full_name='easy_flow.DeployStrategy.BatchStrategy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='autoBatch', full_name='easy_flow.DeployStrategy.BatchStrategy.autoBatch', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='manualBatch', full_name='easy_flow.DeployStrategy.BatchStrategy.manualBatch', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='easy_flow.DeployStrategy.BatchStrategy.type', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DEPLOYSTRATEGY_BATCHSTRATEGY_AUTOBATCH, _DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=654,
serialized_end=1109,
)
_DEPLOYSTRATEGY_PACKAGELIST = _descriptor.Descriptor(
name='PackageList',
full_name='easy_flow.DeployStrategy.PackageList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='packageName', full_name='easy_flow.DeployStrategy.PackageList.packageName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster', full_name='easy_flow.DeployStrategy.PackageList.cluster', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targetVersion', full_name='easy_flow.DeployStrategy.PackageList.targetVersion', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='preStop', full_name='easy_flow.DeployStrategy.PackageList.preStop', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='postRestart', full_name='easy_flow.DeployStrategy.PackageList.postRestart', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='autoStart', full_name='easy_flow.DeployStrategy.PackageList.autoStart', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='userCheck', full_name='easy_flow.DeployStrategy.PackageList.userCheck', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fullUpdate', full_name='easy_flow.DeployStrategy.PackageList.fullUpdate', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packageId', full_name='easy_flow.DeployStrategy.PackageList.packageId', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='installPath', full_name='easy_flow.DeployStrategy.PackageList.installPath', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='easy_flow.DeployStrategy.PackageList.type', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='platform', full_name='easy_flow.DeployStrategy.PackageList.platform', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1112,
serialized_end=1373,
)
_DEPLOYSTRATEGY_STATUS = _descriptor.Descriptor(
name='Status',
full_name='easy_flow.DeployStrategy.Status',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='outOfDate', full_name='easy_flow.DeployStrategy.Status.outOfDate', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1375,
serialized_end=1402,
)
_DEPLOYSTRATEGY = _descriptor.Descriptor(
name='DeployStrategy',
full_name='easy_flow.DeployStrategy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='easy_flow.DeployStrategy.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='easy_flow.DeployStrategy.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='apiVersion', full_name='easy_flow.DeployStrategy.apiVersion', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='org', full_name='easy_flow.DeployStrategy.org', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='app', full_name='easy_flow.DeployStrategy.app', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='easy_flow.DeployStrategy.type', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchStrategy', full_name='easy_flow.DeployStrategy.batchStrategy', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scope', full_name='easy_flow.DeployStrategy.scope', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clusters', full_name='easy_flow.DeployStrategy.clusters', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targetList', full_name='easy_flow.DeployStrategy.targetList', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clusterEnvironment', full_name='easy_flow.DeployStrategy.clusterEnvironment', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clusterType', full_name='easy_flow.DeployStrategy.clusterType', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packageList', full_name='easy_flow.DeployStrategy.packageList', index=12,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='easy_flow.DeployStrategy.status', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DEPLOYSTRATEGY_APP, _DEPLOYSTRATEGY_BATCHSTRATEGY, _DEPLOYSTRATEGY_PACKAGELIST, _DEPLOYSTRATEGY_STATUS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=164,
serialized_end=1402,
)
_DEPLOYSTRATEGY_APP.containing_type = _DEPLOYSTRATEGY
_DEPLOYSTRATEGY_BATCHSTRATEGY_AUTOBATCH.containing_type = _DEPLOYSTRATEGY_BATCHSTRATEGY
_DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH_BATCHES.fields_by_name['targets'].message_type = ens__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2._DEPLOYTARGET
_DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH_BATCHES.containing_type = _DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH
_DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH.fields_by_name['batches'].message_type = _DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH_BATCHES
_DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH.containing_type = _DEPLOYSTRATEGY_BATCHSTRATEGY
_DEPLOYSTRATEGY_BATCHSTRATEGY.fields_by_name['autoBatch'].message_type = _DEPLOYSTRATEGY_BATCHSTRATEGY_AUTOBATCH
_DEPLOYSTRATEGY_BATCHSTRATEGY.fields_by_name['manualBatch'].message_type = _DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH
_DEPLOYSTRATEGY_BATCHSTRATEGY.containing_type = _DEPLOYSTRATEGY
_DEPLOYSTRATEGY_PACKAGELIST.fields_by_name['cluster'].message_type = ens__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2._CLUSTERINFO
_DEPLOYSTRATEGY_PACKAGELIST.containing_type = _DEPLOYSTRATEGY
_DEPLOYSTRATEGY_STATUS.containing_type = _DEPLOYSTRATEGY
_DEPLOYSTRATEGY.fields_by_name['app'].message_type = _DEPLOYSTRATEGY_APP
_DEPLOYSTRATEGY.fields_by_name['batchStrategy'].message_type = _DEPLOYSTRATEGY_BATCHSTRATEGY
_DEPLOYSTRATEGY.fields_by_name['clusters'].message_type = ens__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2._CLUSTERINFO
_DEPLOYSTRATEGY.fields_by_name['targetList'].message_type = ens__sdk_dot_model_dot_easy__flow_dot_target__info__pb2._TARGETINFO
_DEPLOYSTRATEGY.fields_by_name['packageList'].message_type = _DEPLOYSTRATEGY_PACKAGELIST
_DEPLOYSTRATEGY.fields_by_name['status'].message_type = _DEPLOYSTRATEGY_STATUS
DESCRIPTOR.message_types_by_name['DeployStrategy'] = _DEPLOYSTRATEGY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DeployStrategy = _reflection.GeneratedProtocolMessageType('DeployStrategy', (_message.Message,), {
'App' : _reflection.GeneratedProtocolMessageType('App', (_message.Message,), {
'DESCRIPTOR' : _DEPLOYSTRATEGY_APP,
'__module__' : 'deploy_strategy_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployStrategy.App)
})
,
'BatchStrategy' : _reflection.GeneratedProtocolMessageType('BatchStrategy', (_message.Message,), {
'AutoBatch' : _reflection.GeneratedProtocolMessageType('AutoBatch', (_message.Message,), {
'DESCRIPTOR' : _DEPLOYSTRATEGY_BATCHSTRATEGY_AUTOBATCH,
'__module__' : 'deploy_strategy_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployStrategy.BatchStrategy.AutoBatch)
})
,
'ManualBatch' : _reflection.GeneratedProtocolMessageType('ManualBatch', (_message.Message,), {
'Batches' : _reflection.GeneratedProtocolMessageType('Batches', (_message.Message,), {
'DESCRIPTOR' : _DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH_BATCHES,
'__module__' : 'deploy_strategy_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployStrategy.BatchStrategy.ManualBatch.Batches)
})
,
'DESCRIPTOR' : _DEPLOYSTRATEGY_BATCHSTRATEGY_MANUALBATCH,
'__module__' : 'deploy_strategy_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployStrategy.BatchStrategy.ManualBatch)
})
,
'DESCRIPTOR' : _DEPLOYSTRATEGY_BATCHSTRATEGY,
'__module__' : 'deploy_strategy_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployStrategy.BatchStrategy)
})
,
'PackageList' : _reflection.GeneratedProtocolMessageType('PackageList', (_message.Message,), {
'DESCRIPTOR' : _DEPLOYSTRATEGY_PACKAGELIST,
'__module__' : 'deploy_strategy_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployStrategy.PackageList)
})
,
'Status' : _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), {
'DESCRIPTOR' : _DEPLOYSTRATEGY_STATUS,
'__module__' : 'deploy_strategy_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployStrategy.Status)
})
,
'DESCRIPTOR' : _DEPLOYSTRATEGY,
'__module__' : 'deploy_strategy_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployStrategy)
})
_sym_db.RegisterMessage(DeployStrategy)
_sym_db.RegisterMessage(DeployStrategy.App)
_sym_db.RegisterMessage(DeployStrategy.BatchStrategy)
_sym_db.RegisterMessage(DeployStrategy.BatchStrategy.AutoBatch)
_sym_db.RegisterMessage(DeployStrategy.BatchStrategy.ManualBatch)
_sym_db.RegisterMessage(DeployStrategy.BatchStrategy.ManualBatch.Batches)
_sym_db.RegisterMessage(DeployStrategy.PackageList)
_sym_db.RegisterMessage(DeployStrategy.Status)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| true | true |
f73e7fdc4728deabce9a2c0852d3596fb23c3e73 | 3,480 | py | Python | kitsune/urls.py | rlr/kitsune | 591e996a3a115a7b235cbca19f5dec58fc9b6249 | [
"BSD-3-Clause"
] | 1 | 2015-03-09T05:48:58.000Z | 2015-03-09T05:48:58.000Z | kitsune/urls.py | rlr/kitsune | 591e996a3a115a7b235cbca19f5dec58fc9b6249 | [
"BSD-3-Clause"
] | null | null | null | kitsune/urls.py | rlr/kitsune | 591e996a3a115a7b235cbca19f5dec58fc9b6249 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import include, patterns, url
from django.conf import settings
from django.views.i18n import javascript_catalog
from django.views.decorators.cache import cache_page
from django.views.generic.base import RedirectView
import authority
import badger
from waffle.views import wafflejs
# Note: This must come before importing admin because it patches the
# admin.
from kitsune.sumo.monkeypatch import patch
patch()
from django.contrib import admin
admin.autodiscover()
authority.autodiscover()
badger.autodiscover()
urlpatterns = patterns(
'',
(r'^search', include('kitsune.search.urls')),
(r'^forums', include('kitsune.forums.urls')),
(r'^questions', include('kitsune.questions.urls')),
(r'^flagged', include('kitsune.flagit.urls')),
(r'^upload', include('kitsune.upload.urls')),
(r'^kb', include('kitsune.wiki.urls')),
(r'^gallery', include('kitsune.gallery.urls')),
(r'^army-of-awesome', include('kitsune.customercare.urls')),
(r'^chat', RedirectView.as_view(url='questions/new')),
(r'^messages', include('kitsune.messages.urls')),
(r'^1', include('kitsune.inproduct.urls')),
(r'^postcrash', include('kitsune.postcrash.urls')),
(r'^groups', include('kitsune.groups.urls')),
(r'^kpi/', include('kitsune.kpi.urls')),
(r'^products', include('kitsune.products.urls')),
(r'^announcements', include('kitsune.announcements.urls')),
(r'^community', include('kitsune.community.urls')),
(r'^badges/', include('kitsune.kbadge.urls')),
# Kitsune admin (not Django admin).
(r'^admin/', include(admin.site.urls)),
# Javascript translations.
url(r'^jsi18n/.*$', cache_page(60 * 60 * 24 * 365)(javascript_catalog),
{'domain': 'javascript', 'packages': ['kitsune']}, name='jsi18n'),
# Yaocho translations. These don't need cached because Yaocho downloads
# them in a build step, not on the client.
url(r'^jsi18n-yaocho/.*$', javascript_catalog,
{'domain': 'yaocho', 'packages': ['kitsune']}, name='jsi18n-yaocho'),
# JavaScript Waffle.
url(r'^wafflejs$', wafflejs, name='wafflejs'),
(r'^', include('kitsune.dashboards.urls')),
(r'^', include('kitsune.landings.urls')),
(r'^', include('kitsune.kpi.urls_api')),
(r'^', include('kitsune.motidings.urls')),
# Users
('', include('kitsune.users.urls')),
# Services and sundry.
(r'', include('kitsune.sumo.urls')),
# v1 APIs
(r'^api/1/kb/', include('kitsune.wiki.urls_api')),
(r'^api/1/products/', include('kitsune.products.urls_api')),
(r'^api/1/customercare/', include('kitsune.customercare.urls_api')),
(r'^api/1/gallery/', include('kitsune.gallery.urls_api')),
(r'^api/1/users/', include('kitsune.users.urls_api')),
# v2 APIs
(r'^api/2/', include('kitsune.notifications.urls_api')),
(r'^api/2/', include('kitsune.questions.urls_api')),
(r'^api/2/', include('kitsune.search.urls_api')),
(r'^api/2/', include('kitsune.community.urls_api')),
# These API urls include both v1 and v2 urls.
(r'^api/', include('kitsune.users.urls_api')),
)
# Handle 404 and 500 errors
handler404 = 'kitsune.sumo.views.handle404'
handler500 = 'kitsune.sumo.views.handle500'
if settings.DEBUG:
media_url = settings.MEDIA_URL.lstrip('/').rstrip('/')
urlpatterns += patterns(
'',
(r'^%s/(?P<path>.*)$' % media_url, 'kitsune.sumo.views.serve_cors',
{'document_root': settings.MEDIA_ROOT}),
)
| 35.876289 | 77 | 0.657759 | from django.conf.urls import include, patterns, url
from django.conf import settings
from django.views.i18n import javascript_catalog
from django.views.decorators.cache import cache_page
from django.views.generic.base import RedirectView
import authority
import badger
from waffle.views import wafflejs
from kitsune.sumo.monkeypatch import patch
patch()
from django.contrib import admin
admin.autodiscover()
authority.autodiscover()
badger.autodiscover()
urlpatterns = patterns(
'',
(r'^search', include('kitsune.search.urls')),
(r'^forums', include('kitsune.forums.urls')),
(r'^questions', include('kitsune.questions.urls')),
(r'^flagged', include('kitsune.flagit.urls')),
(r'^upload', include('kitsune.upload.urls')),
(r'^kb', include('kitsune.wiki.urls')),
(r'^gallery', include('kitsune.gallery.urls')),
(r'^army-of-awesome', include('kitsune.customercare.urls')),
(r'^chat', RedirectView.as_view(url='questions/new')),
(r'^messages', include('kitsune.messages.urls')),
(r'^1', include('kitsune.inproduct.urls')),
(r'^postcrash', include('kitsune.postcrash.urls')),
(r'^groups', include('kitsune.groups.urls')),
(r'^kpi/', include('kitsune.kpi.urls')),
(r'^products', include('kitsune.products.urls')),
(r'^announcements', include('kitsune.announcements.urls')),
(r'^community', include('kitsune.community.urls')),
(r'^badges/', include('kitsune.kbadge.urls')),
(r'^admin/', include(admin.site.urls)),
url(r'^jsi18n/.*$', cache_page(60 * 60 * 24 * 365)(javascript_catalog),
{'domain': 'javascript', 'packages': ['kitsune']}, name='jsi18n'),
# them in a build step, not on the client.
url(r'^jsi18n-yaocho/.*$', javascript_catalog,
{'domain': 'yaocho', 'packages': ['kitsune']}, name='jsi18n-yaocho'),
# JavaScript Waffle.
url(r'^wafflejs$', wafflejs, name='wafflejs'),
(r'^', include('kitsune.dashboards.urls')),
(r'^', include('kitsune.landings.urls')),
(r'^', include('kitsune.kpi.urls_api')),
(r'^', include('kitsune.motidings.urls')),
# Users
('', include('kitsune.users.urls')),
# Services and sundry.
(r'', include('kitsune.sumo.urls')),
# v1 APIs
(r'^api/1/kb/', include('kitsune.wiki.urls_api')),
(r'^api/1/products/', include('kitsune.products.urls_api')),
(r'^api/1/customercare/', include('kitsune.customercare.urls_api')),
(r'^api/1/gallery/', include('kitsune.gallery.urls_api')),
(r'^api/1/users/', include('kitsune.users.urls_api')),
# v2 APIs
(r'^api/2/', include('kitsune.notifications.urls_api')),
(r'^api/2/', include('kitsune.questions.urls_api')),
(r'^api/2/', include('kitsune.search.urls_api')),
(r'^api/2/', include('kitsune.community.urls_api')),
# These API urls include both v1 and v2 urls.
(r'^api/', include('kitsune.users.urls_api')),
)
# Handle 404 and 500 errors
handler404 = 'kitsune.sumo.views.handle404'
handler500 = 'kitsune.sumo.views.handle500'
if settings.DEBUG:
media_url = settings.MEDIA_URL.lstrip('/').rstrip('/')
urlpatterns += patterns(
'',
(r'^%s/(?P<path>.*)$' % media_url, 'kitsune.sumo.views.serve_cors',
{'document_root': settings.MEDIA_ROOT}),
)
| true | true |
f73e80ea27abee7043f4be2c11413ab355bc240c | 1,279 | py | Python | src/data/make_norm.py | lcn-kul/conferencing-speech-2022 | 1089b2baaf2fcf3ac8ef44c65b80da2e5b2c331b | [
"MIT"
] | 1 | 2022-03-30T15:06:18.000Z | 2022-03-30T15:06:18.000Z | src/data/make_norm.py | lcn-kul/conferencing-speech-2022 | 1089b2baaf2fcf3ac8ef44c65b80da2e5b2c331b | [
"MIT"
] | null | null | null | src/data/make_norm.py | lcn-kul/conferencing-speech-2022 | 1089b2baaf2fcf3ac8ef44c65b80da2e5b2c331b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import click
import logging
from dotenv import find_dotenv, load_dotenv
from src.data.calculate_norm.calculate_norm import calculate_norm
from src.utils.split import Split, ALL_SPLITS, DEV_SPLITS
@click.command()
@click.option('-e', '--example', is_flag=True)
@click.option('-i', '--partition_idx', default=0)
@click.option('-n', '--num_partitions', default=1)
def main(example, partition_idx, num_partitions):
"""Calculate norm + variance of each channel over all inputs in a dataset."""
logger = logging.getLogger(__name__)
logger.info('calculating norm')
N = len(DEV_SPLITS)
start_idx = int(partition_idx*N/num_partitions)
end_idx = int((partition_idx+1)*N/num_partitions)
splits = DEV_SPLITS[start_idx:end_idx]
# Calculate norm.
for split in splits:
calculate_norm(split, example=True)
if not example:
calculate_norm(split, example=False)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
| 30.452381 | 81 | 0.701329 |
import click
import logging
from dotenv import find_dotenv, load_dotenv
from src.data.calculate_norm.calculate_norm import calculate_norm
from src.utils.split import Split, ALL_SPLITS, DEV_SPLITS
@click.command()
@click.option('-e', '--example', is_flag=True)
@click.option('-i', '--partition_idx', default=0)
@click.option('-n', '--num_partitions', default=1)
def main(example, partition_idx, num_partitions):
logger = logging.getLogger(__name__)
logger.info('calculating norm')
N = len(DEV_SPLITS)
start_idx = int(partition_idx*N/num_partitions)
end_idx = int((partition_idx+1)*N/num_partitions)
splits = DEV_SPLITS[start_idx:end_idx]
for split in splits:
calculate_norm(split, example=True)
if not example:
calculate_norm(split, example=False)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
| true | true |
f73e81d5d46bc3e54c29fa326008f7c4040f400d | 159 | py | Python | virtual/bin/django-admin.py | evelyne250/my-gallery | 890843ec8b240568d16041fbd0ea5f98f496515c | [
"MIT"
] | null | null | null | virtual/bin/django-admin.py | evelyne250/my-gallery | 890843ec8b240568d16041fbd0ea5f98f496515c | [
"MIT"
] | null | null | null | virtual/bin/django-admin.py | evelyne250/my-gallery | 890843ec8b240568d16041fbd0ea5f98f496515c | [
"MIT"
] | null | null | null | #!/home/wecode/Desktop/my-gallery/virtual/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 26.5 | 52 | 0.786164 |
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| true | true |
f73e825d6fce9be0f47cf6c0fdf66776a6873239 | 1,373 | py | Python | azure-mgmt-authorization/azure/mgmt/authorization/v2015_07_01/models/role_assignment_properties_with_scope_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-authorization/azure/mgmt/authorization/v2015_07_01/models/role_assignment_properties_with_scope_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-authorization/azure/mgmt/authorization/v2015_07_01/models/role_assignment_properties_with_scope_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RoleAssignmentPropertiesWithScope(Model):
"""Role assignment properties with scope.
:param scope: The role assignment scope.
:type scope: str
:param role_definition_id: The role definition ID.
:type role_definition_id: str
:param principal_id: The principal ID.
:type principal_id: str
"""
_attribute_map = {
'scope': {'key': 'scope', 'type': 'str'},
'role_definition_id': {'key': 'roleDefinitionId', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
}
def __init__(self, *, scope: str=None, role_definition_id: str=None, principal_id: str=None, **kwargs) -> None:
super(RoleAssignmentPropertiesWithScope, self).__init__(**kwargs)
self.scope = scope
self.role_definition_id = role_definition_id
self.principal_id = principal_id
| 37.108108 | 115 | 0.616897 |
from msrest.serialization import Model
class RoleAssignmentPropertiesWithScope(Model):
_attribute_map = {
'scope': {'key': 'scope', 'type': 'str'},
'role_definition_id': {'key': 'roleDefinitionId', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
}
def __init__(self, *, scope: str=None, role_definition_id: str=None, principal_id: str=None, **kwargs) -> None:
super(RoleAssignmentPropertiesWithScope, self).__init__(**kwargs)
self.scope = scope
self.role_definition_id = role_definition_id
self.principal_id = principal_id
| true | true |
f73e831e43ddc79e12c94b4734ebb23f9789a0bb | 2,069 | py | Python | src/ggrc/models/exceptions.py | j0gurt/ggrc-core | 84662dc85aa8864c907eabe70b8efccf92298a1f | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-01-04T10:55:14.000Z | 2019-01-04T10:55:14.000Z | src/ggrc/models/exceptions.py | farcry4998/ggrc-core | c469039dabb55033c1b379850feb19e8dda2e2a1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/ggrc/models/exceptions.py | farcry4998/ggrc-core | c469039dabb55033c1b379850feb19e8dda2e2a1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Model-related exceptions and related logic."""
import re
from logging import getLogger
from sqlalchemy.exc import IntegrityError
logger = getLogger(__name__)
def field_lookup(field_string):
"""Find relevant error field for UNIQUE violation in SQL error message."""
bad_field = 'code' # assumes this field as a default
if field_string.startswith('uq_t_'):
bad_field = 'title'
elif field_string.endswith('email'):
bad_field = 'email'
elif field_string.endswith('title'):
bad_field = 'title'
return bad_field
def translate_message(exception):
"""
Translates db exceptions to something a user can understand.
"""
message = exception.message
if isinstance(exception, IntegrityError):
# TODO: Handle not null, foreign key, uniqueness errors with compound keys
code, _ = exception.orig.args
if code == 1062: # duplicate entry ... for key ...
pattern = re.compile(r"Duplicate entry ('.*') for key '(.*)'")
matches = pattern.search(message)
if matches:
logger.exception(exception)
return (u"The value {value} is already used for another {key}. "
u"{key} values must be unique."
.format(value=matches.group(1),
key=field_lookup(matches.group(2))))
elif code == 1452: # cannod set child row: a foreign key constraint fails
pattern = re.compile(
r"foreign key constraint fails \(`.+`.`(.+)`, CONSTRAINT `.+` "
r"FOREIGN KEY \(`.+`\) REFERENCES `(.+)` \(`.+`\)\)"
)
matches = pattern.search(message)
if matches:
from_, to_ = matches.groups()
return (u"This request will break a mandatory relationship "
u"from {from_} to {to_}."
.format(from_=from_, to_=to_))
return message
class ValidationError(ValueError):
pass
class StatusValidationError(ValidationError):
pass
class ReservedNameError(ValueError):
pass
| 29.985507 | 78 | 0.650556 |
import re
from logging import getLogger
from sqlalchemy.exc import IntegrityError
logger = getLogger(__name__)
def field_lookup(field_string):
bad_field = 'code'
if field_string.startswith('uq_t_'):
bad_field = 'title'
elif field_string.endswith('email'):
bad_field = 'email'
elif field_string.endswith('title'):
bad_field = 'title'
return bad_field
def translate_message(exception):
message = exception.message
if isinstance(exception, IntegrityError):
code, _ = exception.orig.args
if code == 1062:
pattern = re.compile(r"Duplicate entry ('.*') for key '(.*)'")
matches = pattern.search(message)
if matches:
logger.exception(exception)
return (u"The value {value} is already used for another {key}. "
u"{key} values must be unique."
.format(value=matches.group(1),
key=field_lookup(matches.group(2))))
elif code == 1452:
pattern = re.compile(
r"foreign key constraint fails \(`.+`.`(.+)`, CONSTRAINT `.+` "
r"FOREIGN KEY \(`.+`\) REFERENCES `(.+)` \(`.+`\)\)"
)
matches = pattern.search(message)
if matches:
from_, to_ = matches.groups()
return (u"This request will break a mandatory relationship "
u"from {from_} to {to_}."
.format(from_=from_, to_=to_))
return message
class ValidationError(ValueError):
pass
class StatusValidationError(ValidationError):
pass
class ReservedNameError(ValueError):
pass
| true | true |
f73e840107962f302afb83c14068af287b0260fa | 3,312 | py | Python | RPi/Main.py | ritikbhatia/Multidisciplinary-Design-Project | 552285ac10296d2e74c40b3c698a742a52173479 | [
"MIT"
] | 1 | 2022-01-12T15:22:46.000Z | 2022-01-12T15:22:46.000Z | RPi/Main.py | ritikbhatia/Multidisciplinary-Design-Project | 552285ac10296d2e74c40b3c698a742a52173479 | [
"MIT"
] | null | null | null | RPi/Main.py | ritikbhatia/Multidisciplinary-Design-Project | 552285ac10296d2e74c40b3c698a742a52173479 | [
"MIT"
] | null | null | null |
from ArduinoHandler import ArduinoHandler
import logging
import time
from datetime import datetime
from Queue import Queue
import os
import sys
import multiprocessing
from PCHandler import PCHandler
from BTHandler import BTHandler
from CameraHandler import CameraHandler
from PacketsHandler import *
jobList = []
m = multiprocessing.Manager()
queueJob = m.Queue()
logsDirectory = "logs"
if not os.path.exists(logsDirectory):
os.makedirs(logsDirectory)
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s %(message)s",
filename=os.path.join(logsDirectory, datetime.now().strftime("%Y%m%d-%H%M%S") + ".log"),
filemode="w",
)
currentRunNumber = None
algoVer = 1
debugGoThru = False
waitRpi = True
sendCamera = False
fpReceived = True
fpNow = False
ph = PacketHandler()
def remvoveFilesInFolder(folderpath):
if os.path.exists(folderpath):
for filepath in os.listdir(folderpath):
os.remove(os.path.join(folderpath, filepath))
logging.info("rpi start")
pc = PCHandler("192.168.9.9", 8081, queueJob, "P")
ph.registerHandler(pc)
jobList.append(pc)
logging.info("bluetooth start")
bt = BTHandler(4, queueJob, "B", fpReceived, fpNow)
ph.registerHandler(bt)
jobList.append(bt)
logging.info("arduino start")
arduino = ArduinoManager(
"/dev/ttyACM0",
115200,
0,
queueJob,
"A",
sendCamera,
fpReceived,
fpNow,
)
ph.registerHandler(arduino)
jobList.append(arduino)
logging.info("camera start")
c = CameraHandler(queueJob, "R", sendCamera, currentRunNumber, algoVer)
ph.registerHandler(c)
jobList.append(c)
if algoVer == 1:
resultsFolder = "/home/pi/checklist-results"
imageFolder = "/home/pi/checklist-images"
statusFolder = "/home/pi/checklist-status"
remvoveFilesInFolder(resultsFolder)
remvoveFilesInFolder(imageFolder)
remvoveFilesInFolder(statusFolder)
while True:
if queueJob.qsize() != 0:
if debugGoThru:
if sys.version_info[0] == 3:
x = input("enter to cont")
else:
x = raw_input("enter to cont")
if algoVer == 1 and not fpNow:
logging.info("[FP] algo=1, fp=no")
for resultFile in os.listdir(resultsFolder):
if (
resultFile.endswith(".result")
and resultFile not in bt.proResults
):
finalFileName = resultFile.split(".")[0]
imgRecPacket = (
"R:B:map:absolute:"
+ finalFileName.split("-")[0]
+ ":"
+ finalFileName.split("-")[1]
+ ":"
+ finalFileName.split("-")[2]
)
queueJob.put(imgRecPacket)
bt.proResults.append(resultFile)
logging.info(
"[raspberry] image reg packet is in queue - %s"
% imgRecPacket
)
elif algoVer == 2 and not fpNow:
proPacket = "R:D:read_initial_processed"
queueJob.put(proPacket)
ph.handle(queueJob.get())
queueJob.task_done()
for t in jobList:
t.join()
| 23 | 92 | 0.589372 |
from ArduinoHandler import ArduinoHandler
import logging
import time
from datetime import datetime
from Queue import Queue
import os
import sys
import multiprocessing
from PCHandler import PCHandler
from BTHandler import BTHandler
from CameraHandler import CameraHandler
from PacketsHandler import *
jobList = []
m = multiprocessing.Manager()
queueJob = m.Queue()
logsDirectory = "logs"
if not os.path.exists(logsDirectory):
os.makedirs(logsDirectory)
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s %(message)s",
filename=os.path.join(logsDirectory, datetime.now().strftime("%Y%m%d-%H%M%S") + ".log"),
filemode="w",
)
currentRunNumber = None
algoVer = 1
debugGoThru = False
waitRpi = True
sendCamera = False
fpReceived = True
fpNow = False
ph = PacketHandler()
def remvoveFilesInFolder(folderpath):
if os.path.exists(folderpath):
for filepath in os.listdir(folderpath):
os.remove(os.path.join(folderpath, filepath))
logging.info("rpi start")
pc = PCHandler("192.168.9.9", 8081, queueJob, "P")
ph.registerHandler(pc)
jobList.append(pc)
logging.info("bluetooth start")
bt = BTHandler(4, queueJob, "B", fpReceived, fpNow)
ph.registerHandler(bt)
jobList.append(bt)
logging.info("arduino start")
arduino = ArduinoManager(
"/dev/ttyACM0",
115200,
0,
queueJob,
"A",
sendCamera,
fpReceived,
fpNow,
)
ph.registerHandler(arduino)
jobList.append(arduino)
logging.info("camera start")
c = CameraHandler(queueJob, "R", sendCamera, currentRunNumber, algoVer)
ph.registerHandler(c)
jobList.append(c)
if algoVer == 1:
resultsFolder = "/home/pi/checklist-results"
imageFolder = "/home/pi/checklist-images"
statusFolder = "/home/pi/checklist-status"
remvoveFilesInFolder(resultsFolder)
remvoveFilesInFolder(imageFolder)
remvoveFilesInFolder(statusFolder)
while True:
if queueJob.qsize() != 0:
if debugGoThru:
if sys.version_info[0] == 3:
x = input("enter to cont")
else:
x = raw_input("enter to cont")
if algoVer == 1 and not fpNow:
logging.info("[FP] algo=1, fp=no")
for resultFile in os.listdir(resultsFolder):
if (
resultFile.endswith(".result")
and resultFile not in bt.proResults
):
finalFileName = resultFile.split(".")[0]
imgRecPacket = (
"R:B:map:absolute:"
+ finalFileName.split("-")[0]
+ ":"
+ finalFileName.split("-")[1]
+ ":"
+ finalFileName.split("-")[2]
)
queueJob.put(imgRecPacket)
bt.proResults.append(resultFile)
logging.info(
"[raspberry] image reg packet is in queue - %s"
% imgRecPacket
)
elif algoVer == 2 and not fpNow:
proPacket = "R:D:read_initial_processed"
queueJob.put(proPacket)
ph.handle(queueJob.get())
queueJob.task_done()
for t in jobList:
t.join()
| true | true |
f73e856a0a2e5c97b95c9e04c66d81fbea8db303 | 59,458 | py | Python | tools/mo/unit_tests/mo/middle/InterpolateSequenceToInterpolate_test.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 1,127 | 2018-10-15T14:36:58.000Z | 2020-04-20T09:29:44.000Z | tools/mo/unit_tests/mo/middle/InterpolateSequenceToInterpolate_test.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 439 | 2018-10-20T04:40:35.000Z | 2020-04-19T05:56:25.000Z | tools/mo/unit_tests/mo/middle/InterpolateSequenceToInterpolate_test.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 414 | 2018-10-17T05:53:46.000Z | 2020-04-16T17:29:53.000Z | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import unittest
from openvino.tools.mo.middle.InterpolateSequenceToInterpolate import InterpolateSequenceToInterpolate
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs
from unit_tests.utils.graph import build_graph
graph_node_attrs_for_2d_case_1_opset4_case = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'size_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660])
},
'size_1_data': {'value': int64_array([660]), 'shape': [1], 'kind': 'data'},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([3.0])
},
'scale_1_data': {'value': np.array([3.0]), 'shape': [1], 'kind': 'data'},
'axes_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2])
},
'axes_1_data': {'value': int64_array([2]), 'shape': [1], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'nearest',
'shape_calculation_mode': 'scales',
'version': 'opset4'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 350]), 'kind': 'data'},
'size_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700])
},
'size_2_data': {'value': int64_array([700]), 'shape': [1], 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([2.0])
},
'scale_2_data': {'value': np.array([2.0]), 'shape': [1], 'kind': 'data'},
'axes_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([3])
},
'axes_2_data': {'value': int64_array([3]), 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'nearest',
'shape_calculation_mode': 'scales',
'version': 'opset4'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 660, 700]), 'kind': 'data'},
'size_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1320])
},
'size_3_data': {'value': int64_array([1320]), 'shape': [1], 'kind': 'data'},
'scale_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([2.0])
},
'scale_3_data': {'value': np.array([2.0]), 'shape': [1], 'kind': 'data'},
'axes_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2])
},
'axes_3_data': {'value': int64_array([2]), 'shape': [1], 'kind': 'data'},
'interpolate_3': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'nearest',
'shape_calculation_mode': 'scales',
'version': 'opset4'
},
'interpolate_3_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_2d_case_1_opset4_case = [
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate_1', {'in': 0}),
('size_1', 'size_1_data'),
('scale_1', 'scale_1_data'),
('axes_1', 'axes_1_data'),
('size_1_data', 'interpolate_1', {'in': 1}),
('scale_1_data', 'interpolate_1', {'in': 2}),
('axes_1_data', 'interpolate_1', {'in': 3}),
('interpolate_1', 'interpolate_1_data'),
('interpolate_1_data', 'interpolate_2', {'in': 0}),
('size_2', 'size_2_data'),
('scale_2', 'scale_2_data'),
('axes_2', 'axes_2_data'),
('size_2_data', 'interpolate_2', {'in': 1}),
('scale_2_data', 'interpolate_2', {'in': 2}),
('axes_2_data', 'interpolate_2', {'in': 3}),
('interpolate_2', 'interpolate_2_data'),
('interpolate_2_data', 'interpolate_3', {'in': 0}),
('size_3', 'size_3_data'),
('scale_3', 'scale_3_data'),
('axes_3', 'axes_3_data'),
('size_3_data', 'interpolate_3', {'in': 1}),
('scale_3_data', 'interpolate_3', {'in': 2}),
('axes_3_data', 'interpolate_3', {'in': 3}),
('interpolate_3', 'interpolate_3_data'),
('interpolate_3_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
ref_graph_node_attrs_for_2d_case_1_opset4_case = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'size_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660, 700])
},
'size_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([3.0, 2.0])
},
'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'axes_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 3])
},
'axes_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'nearest',
'shape_calculation_mode': 'scales',
'antialias': 0,
'pads_begin': int64_array([0]),
'pads_end': int64_array([0]),
'coordinate_transformation_mode': 'half_pixel',
'nearest_mode': 'round_prefer_floor',
'cube_coeff': -0.75,
'version': 'opset4'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 700]), 'kind': 'data'},
'size_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1320])
},
'size_3_data': {'value': None, 'shape': [1], 'kind': 'data'},
'scale_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([2.0])
},
'scale_3_data': {'value': None, 'shape': [1], 'kind': 'data'},
'axes_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2])
},
'axes_3_data': {'value': int64_array([2]), 'shape': [1], 'kind': 'data'},
'interpolate_3': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'nearest',
'shape_calculation_mode': 'scales',
'version': 'opset4'
},
'interpolate_3_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
ref_edges_for_2d_case_1_opset4_case = [
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate_1', {'in': 0}),
('size_1', 'size_1_data'),
('scale_1', 'scale_1_data'),
('axes_1', 'axes_1_data'),
('size_1_data', 'interpolate_1', {'in': 1}),
('scale_1_data', 'interpolate_1', {'in': 2}),
('axes_1_data', 'interpolate_1', {'in': 3}),
('interpolate_1', 'interpolate_1_data'),
('interpolate_1_data', 'interpolate_3', {'in': 0}),
('size_3', 'size_3_data'),
('scale_3', 'scale_3_data'),
('axes_3', 'axes_3_data'),
('size_3_data', 'interpolate_3', {'in': 1}),
('scale_3_data', 'interpolate_3', {'in': 2}),
('axes_3_data', 'interpolate_3', {'in': 3}),
('interpolate_3', 'interpolate_3_data'),
('interpolate_3_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
graph_node_attrs_for_2d_case_1 = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660])
},
'scale_1_data': {'value': int64_array([660]), 'shape': [1], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 350]), 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700])
},
'scale_2_data': {'value': int64_array([700]), 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([3]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 660, 700]), 'kind': 'data'},
'scale_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1320])
},
'scale_3_data': {'value': int64_array([1320]), 'shape': [1], 'kind': 'data'},
'interpolate_3': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_3_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_2d_case_1 = [
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate_1', {'in': 0}),
('scale_1', 'scale_1_data'),
('scale_1_data', 'interpolate_1', {'in': 1}),
('interpolate_1', 'interpolate_1_data'),
('interpolate_1_data', 'interpolate_2', {'in': 0}),
('scale_2', 'scale_2_data'),
('scale_2_data', 'interpolate_2', {'in': 1}),
('interpolate_2', 'interpolate_2_data'),
('interpolate_2_data', 'interpolate_3', {'in': 0}),
('scale_3', 'scale_3_data'),
('scale_3_data', 'interpolate_3', {'in': 1}),
('interpolate_3', 'interpolate_3_data'),
('interpolate_3_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
graph_node_attrs_for_2d_case_2 = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660])
},
'scale_1_data': {'value': None, 'shape': [1], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 350]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 660, 350]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_2d_case_2 = [
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate_1', {'in': 0}),
('scale_1', 'scale_1_data'),
('scale_1_data', 'interpolate_1', {'in': 1}),
('interpolate_1', 'interpolate_1_data'),
('interpolate_1_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
graph_node_attrs_for_2d_case_3 = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660])
},
'scale_1_data': {'value': None, 'shape': [1], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 350]), 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700])
},
'scale_2_data': {'value': None, 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([3]),
'mode': 'linear',
'version': 'opset1'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 660, 700]), 'kind': 'data'},
'scale_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1320])
},
'scale_3_data': {'value': None, 'shape': [1], 'kind': 'data'},
'interpolate_3': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2]),
'mode': 'cubic',
'version': 'opset1'
},
'interpolate_3_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_2d_case_3 = edges_for_2d_case_1
new_graph_node_attrs_for_2d_case_4_opset4_case = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'size_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2200])
},
'size_1_data': {'value': int64_array([2200]), 'shape': [1], 'kind': 'data'},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([10.0])
},
'scale_1_data': {'value': np.array([10.0]), 'shape': [1], 'kind': 'data'},
'axes_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2])
},
'axes_1_data': {'value': int64_array([2]), 'shape': [1], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'linear',
'coordinate_transformation_mode': 'asymmetric',
'nearest_mode': 'simple',
'cube_coeff': -0.4,
'antialias': 1,
'shape_calculation_mode': 'scales',
'version': 'opset4'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 2200, 350]), 'kind': 'data'},
'size_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700])
},
'size_2_data': {'value': int64_array([700]), 'shape': [1], 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([2.0])
},
'scale_2_data': {'value': np.array([2.0]), 'shape': [1], 'kind': 'data'},
'axes_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([3])
},
'axes_2_data': {'value': int64_array([3]), 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'linear',
'coordinate_transformation_mode': 'asymmetric',
'nearest_mode': 'simple',
'cube_coeff': -0.4,
'antialias': 1,
'shape_calculation_mode': 'scales',
'version': 'opset4'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
new_edges_for_2d_case_4_opset4_case = [
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate_1', {'in': 0}),
('size_1', 'size_1_data'),
('size_1_data', 'interpolate_1', {'in': 1}),
('scale_1', 'scale_1_data'),
('scale_1_data', 'interpolate_1', {'in': 2}),
('axes_1', 'axes_1_data'),
('axes_1_data', 'interpolate_1', {'in': 3}),
('interpolate_1', 'interpolate_1_data'),
('interpolate_1_data', 'interpolate_2', {'in': 0}),
('size_2', 'size_2_data'),
('size_2_data', 'interpolate_2', {'in': 1}),
('scale_2', 'scale_2_data'),
('scale_2_data', 'interpolate_2', {'in': 2}),
('axes_2', 'axes_2_data'),
('axes_2_data', 'interpolate_2', {'in': 3}),
('interpolate_2', 'interpolate_2_data'),
('interpolate_2_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
new_ref_graph_node_attrs_for_2d_case_4_opset4_case = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'size_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2200, 700])
},
'size_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([10.0, 2.0])
},
'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'axes_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 3])
},
'axes_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'linear',
'coordinate_transformation_mode': 'asymmetric',
'nearest_mode': 'simple',
'cube_coeff': -0.4,
'antialias': 1,
'shape_calculation_mode': 'scales',
'version': 'opset4'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
new_ref_edges_for_2d_case_4_opset4_case = [
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate_1', {'in': 0}),
('size_1', 'size_1_data'),
('size_1_data', 'interpolate_1', {'in': 1}),
('scale_1', 'scale_1_data'),
('scale_1_data', 'interpolate_1', {'in': 2}),
('axes_1', 'axes_1_data'),
('axes_1_data', 'interpolate_1', {'in': 3}),
('interpolate_1', 'interpolate_1_data'),
('interpolate_1_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
graph_node_attrs_for_2d_case_4_opset4_case = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2200])
},
'scale_1_data': {'value': None, 'shape': [1], 'kind': 'data'},
'axes_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2])
},
'axes_1_data': {'value': int64_array([2]), 'shape': [1], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'linear',
'coordinate_transformation_mode': 'asymmetric',
'nearest_mode': 'simple',
'cube_coeff': -0.4,
'antialias': 1,
'version': 'opset4'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 2200, 350]), 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700])
},
'scale_2_data': {'value': None, 'shape': [1], 'kind': 'data'},
'axes_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([3])
},
'axes_2_data': {'value': int64_array([3]), 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'linear',
'coordinate_transformation_mode': 'asymmetric',
'nearest_mode': 'simple',
'cube_coeff': -0.4,
'antialias': 1,
'version': 'opset4'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_2d_case_4_opset4_case = [
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate_1', {'in': 0}),
('scale_1', 'scale_1_data'),
('scale_1_data', 'interpolate_1', {'in': 1}),
('axes_1', 'axes_1_data'),
('axes_1_data', 'interpolate_1', {'in': 2}),
('interpolate_1', 'interpolate_1_data'),
('interpolate_1_data', 'interpolate_2', {'in': 0}),
('scale_2', 'scale_2_data'),
('scale_2_data', 'interpolate_2', {'in': 1}),
('axes_2', 'axes_2_data'),
('axes_2_data', 'interpolate_2', {'in': 2}),
('interpolate_2', 'interpolate_2_data'),
('interpolate_2_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
graph_node_attrs_for_2d_case_4 = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2200])
},
'scale_1_data': {'value': int64_array([2200]), 'shape': [1], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2]),
'mode': 'linear',
'align_corners': 0,
'antialias': 1,
'pads_begin': 5,
'pads_end': 3,
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 2200, 350]), 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700])
},
'scale_2_data': {'value': int64_array([700]), 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([3]),
'mode': 'linear',
'align_corners': 0,
'antialias': 1,
'pads_begin': 5,
'pads_end': 3,
'version': 'opset1'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_2d_case_4 = [
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate_1', {'in': 0}),
('scale_1', 'scale_1_data'),
('scale_1_data', 'interpolate_1', {'in': 1}),
('interpolate_1', 'interpolate_1_data'),
('interpolate_1_data', 'interpolate_2', {'in': 0}),
('scale_2', 'scale_2_data'),
('scale_2_data', 'interpolate_2', {'in': 1}),
('interpolate_2', 'interpolate_2_data'),
('interpolate_2_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
graph_node_attrs_for_2d_case_6 = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([220, 350])
},
'scale_1_data': {'value': None, 'shape': [2], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2, 3]),
'mode': 'linear',
'align_corners': 0,
'antialias': 1,
'pads_begin': 5,
'pads_end': 3,
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 220, 350]), 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([220])
},
'scale_2_data': {'value': None, 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2]),
'mode': 'linear',
'align_corners': 0,
'antialias': 1,
'pads_begin': 5,
'pads_end': 3,
'version': 'opset1'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 220, 350]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 220, 350]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_2d_case_6 = edges_for_2d_case_4
new_ref_graph_node_attrs_for_3d_case_1_opset4_case = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 5, 1024, 256, 800]),
'kind': 'data',
'data_type': None
},
'size_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4096, 1280, 2400])
},
'size_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([4.0, 5.0, 3.0])
},
'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'axes_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 3, 4])
},
'axes_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'nearest',
'shape_calculation_mode': 'sizes',
'version': 'opset4'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 2400]), 'kind': 'data'},
'size_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([512])
},
'size_3_data': {'value': None, 'shape': [1], 'kind': 'data'},
'scale_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([512.0 / 2400.0])
},
'scale_3_data': {'value': None, 'shape': [1], 'kind': 'data'},
'axes_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4])
},
'axes_3_data': {'value': int64_array([4]), 'shape': [1], 'kind': 'data'},
'interpolate_3': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'nearest',
'shape_calculation_mode': 'sizes',
'version': 'opset4'
},
'interpolate_3_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
new_ref_edges_for_3d_case_1_opset4_case = ref_edges_for_2d_case_1_opset4_case
new_graph_node_attrs_for_3d_case_1_opset4_case = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 5, 1024, 256, 800]),
'kind': 'data',
'data_type': None
},
'size_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4096, 2400])
},
'size_1_data': {'value': int64_array([4096, 2400]), 'shape': [2], 'kind': 'data'},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([4.0, 3.0])
},
'scale_1_data': {'value': np.array([4.0, 3.0]), 'shape': [2], 'kind': 'data'},
'axes_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 4])
},
'axes_1_data': {'value': int64_array([2, 4]), 'shape': [2], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'nearest',
'shape_calculation_mode': 'sizes',
'version': 'opset4'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 5, 4096, 256, 2400]), 'kind': 'data'},
'size_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1280])
},
'size_2_data': {'value': int64_array([1280]), 'shape': [1], 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([5.0])
},
'scale_2_data': {'value': np.array([5.0]), 'shape': [1], 'kind': 'data'},
'axes_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([3])
},
'axes_2_data': {'value': int64_array([3]), 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'nearest',
'shape_calculation_mode': 'sizes',
'version': 'opset4'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 2400]), 'kind': 'data'},
'size_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([512])
},
'size_3_data': {'value': int64_array([512]), 'shape': [1], 'kind': 'data'},
'scale_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([512.0 / 2400.0])
},
'scale_3_data': {'value': np.array([512.0 / 2400.0]), 'shape': [1], 'kind': 'data'},
'axes_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4])
},
'axes_3_data': {'value': int64_array([4]), 'shape': [1], 'kind': 'data'},
'interpolate_3': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'nearest',
'shape_calculation_mode': 'sizes',
'version': 'opset4'
},
'interpolate_3_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
new_edges_for_3d_case_1_opset4_case = edges_for_2d_case_1_opset4_case
graph_node_attrs_for_3d_case_1 = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 5, 1024, 256, 800]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4096, 2400])
},
'scale_1_data': {'value': int64_array([4096, 2400]), 'shape': [2], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2, 4]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 5, 4096, 256, 2400]), 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1280])
},
'scale_2_data': {'value': int64_array([1280]), 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([3]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 2400]), 'kind': 'data'},
'scale_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([512])
},
'scale_3_data': {'value': int64_array([512]), 'shape': [1], 'kind': 'data'},
'interpolate_3': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([4]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_3_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_3d_case_1 = edges_for_2d_case_1
graph_node_attrs_for_3d_case_2 = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 5, 1024, 256, 800]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4096, 1280])
},
'scale_1_data': {'value': None, 'shape': [1], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2, 3]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 800]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 800]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_3d_case_2 = edges_for_2d_case_2
graph_node_attrs_for_3d_case_3 = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([16, 44, 512, 87, 790]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([256])
},
'scale_1_data': {'value': None, 'shape': [1], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([16, 44, 256, 87, 790]), 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2370])
},
'scale_2_data': {'value': None, 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([4]),
'mode': 'linear',
'version': 'opset1'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([16, 44, 256, 87, 2370]), 'kind': 'data'},
'scale_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([435])
},
'scale_3_data': {'value': None, 'shape': [1], 'kind': 'data'},
'interpolate_3': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([3]),
'mode': 'cubic',
'version': 'opset1'
},
'interpolate_3_data': {'value': None, 'shape': int64_array([16, 44, 256, 435, 2370]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([16, 44, 256, 435, 2370]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_3d_case_3 = edges_for_2d_case_3
new_ref_graph_node_attrs_for_3d_case_4_opset4_case = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([10, 64, 511, 416, 10240]),
'kind': 'data',
'data_type': None
},
'size_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4599, 912, 133120])
},
'size_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const',
'value': np.array([4599.0 / 511.0, 912.0 / 416.0, 133120.0 / 10240.0])
},
'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'axes_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 3, 4])
},
'axes_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'linear',
'antialias': 1,
'shape_calculation_mode': 'sizes',
'version': 'opset4'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
new_ref_edges_for_3d_case_4_opset4_case = new_ref_edges_for_2d_case_4_opset4_case
new_graph_node_attrs_for_3d_case_4_opset4_case = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([10, 64, 511, 416, 10240]),
'kind': 'data',
'data_type': None
},
'size_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4599, 133120])
},
'size_1_data': {'value': int64_array([4599, 133120]), 'shape': [2], 'kind': 'data'},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([4599.0 / 511.0, 133120.0 / 10240.0])
},
'scale_1_data': {'value': np.array([4599.0 / 511.0, 133120.0 / 10240.0]), 'shape': [2], 'kind': 'data'},
'axes_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 4])
},
'axes_1_data': {'value': int64_array([2, 4]), 'shape': [2], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'linear',
'antialias': 1,
'shape_calculation_mode': 'sizes',
'version': 'opset4'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([10, 64, 4599, 416, 133120]), 'kind': 'data'},
'size_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([912])
},
'size_2_data': {'value': int64_array([912]), 'shape': [1], 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([912.0 / 416.0])
},
'scale_2_data': {'value': np.array([912.0 / 416.0]), 'shape': [1], 'kind': 'data'},
'axes_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([3])
},
'axes_2_data': {'value': int64_array([3]), 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'linear',
'antialias': 1,
'shape_calculation_mode': 'sizes',
'version': 'opset4'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
new_edges_for_3d_case_4_opset4_case = new_edges_for_2d_case_4_opset4_case
graph_node_attrs_for_3d_case_4 = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([10, 64, 511, 416, 10240]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4599, 133120])
},
'scale_1_data': {'value': int64_array([4599, 133120]), 'shape': [2], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2, 4]),
'mode': 'linear',
'align_corners': 0,
'antialias': 1,
'pads_begin': 5,
'pads_end': 3,
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([10, 64, 4599, 416, 133120]), 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([912])
},
'scale_2_data': {'value': int64_array([912]), 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([3]),
'mode': 'linear',
'align_corners': 0,
'antialias': 1,
'pads_begin': 5,
'pads_end': 3,
'version': 'opset1'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_3d_case_4 = edges_for_2d_case_4
class InterpolateSequenceToInterpolateTest(unittest.TestCase):
def test_2d_interpolate_sequence_1(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_1,
edges=edges_for_2d_case_1
)
ref_graph = build_graph(
nodes_attrs={
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660, 700])
},
'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2, 3]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 700]), 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1320])
},
'scale_2_data': {'value': None, 'shape': None, 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
},
edges=[
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate_1', {'in': 0}),
('scale_1', 'scale_1_data'),
('scale_1_data', 'interpolate_1', {'in': 1}),
('interpolate_1', 'interpolate_1_data'),
('scale_2', 'scale_2_data'),
('interpolate_2', 'interpolate_2_data'),
('interpolate_1_data', 'interpolate_2', {'in': 0}),
('scale_2_data', 'interpolate_2', {'in': 1}),
('interpolate_2_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_2d_interpolate_sequence_1_opset4_case(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_1_opset4_case,
edges=edges_for_2d_case_1_opset4_case
)
ref_graph = build_graph(
nodes_attrs=ref_graph_node_attrs_for_2d_case_1_opset4_case,
edges=ref_edges_for_2d_case_1_opset4_case
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_2d_interpolate_sequence_2(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_2,
edges=edges_for_2d_case_2
)
ref_graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_2,
edges=edges_for_2d_case_2
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_2d_interpolate_sequence_3(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_3,
edges=edges_for_2d_case_3
)
ref_graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_3,
edges=edges_for_2d_case_3
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_2d_interpolate_sequence_4(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_4,
edges=edges_for_2d_case_4
)
ref_graph = build_graph(
nodes_attrs={
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'scale': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2200, 700])
},
'scale_data': {'value': None, 'shape': None, 'kind': 'data'},
'interpolate': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2, 3]),
'mode': 'linear',
'align_corners': 0,
'antialias': 1,
'pads_begin': 5,
'pads_end': 3,
'version': 'opset1'
},
'interpolate_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
},
edges=[
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate', {'in': 0}),
('scale', 'scale_data'),
('scale_data', 'interpolate', {'in': 1}),
('interpolate', 'interpolate_data'),
('interpolate_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_2d_interpolate_sequence_4_opset4_case(self):
graph = build_graph(
nodes_attrs=new_graph_node_attrs_for_2d_case_4_opset4_case,
edges=new_edges_for_2d_case_4_opset4_case
)
ref_graph = build_graph(
nodes_attrs=new_ref_graph_node_attrs_for_2d_case_4_opset4_case,
edges=new_ref_edges_for_2d_case_4_opset4_case
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_2d_interpolate_sequence_5(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_4,
edges=edges_for_2d_case_4,
update_attributes={
'interpolate_1': {
'align_corners': 1, 'antialias': 1, 'pads_begin': 3, 'pads_end': 0
}
}
)
ref_graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_4,
edges=edges_for_2d_case_4,
update_attributes={
'interpolate_1': {
'align_corners': 1, 'antialias': 1, 'pads_begin': 3, 'pads_end': 0
}
}
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_2d_interpolate_sequence_5_opset4_case(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_4_opset4_case,
edges=edges_for_2d_case_4_opset4_case,
update_attributes={
'interpolate_1': {
'antialias': 0, 'cube_coeff': -0.1
}
}
)
ref_graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_4_opset4_case,
edges=edges_for_2d_case_4_opset4_case,
update_attributes={
'interpolate_1': {
'antialias': 0, 'cube_coeff': -0.1
}
}
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_2d_interpolate_sequence_6(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_6,
edges=edges_for_2d_case_6,
)
ref_graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_6,
edges=edges_for_2d_case_6
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_3d_interpolate_sequence_1(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_3d_case_1,
edges=edges_for_3d_case_1
)
ref_graph = build_graph(
nodes_attrs={
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 5, 1024, 256, 800]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4096, 1280, 2400])
},
'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2, 3, 4]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 2400]), 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([512])
},
'scale_2_data': {'value': None, 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([4]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
},
edges=[
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate_1', {'in': 0}),
('scale_1', 'scale_1_data'),
('scale_1_data', 'interpolate_1', {'in': 1}),
('interpolate_1', 'interpolate_1_data'),
('scale_2', 'scale_2_data'),
('interpolate_2', 'interpolate_2_data'),
('interpolate_1_data', 'interpolate_2', {'in': 0}),
('scale_2_data', 'interpolate_2', {'in': 1}),
('interpolate_2_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_3d_interpolate_sequence_1_opset4_case(self):
graph = build_graph(
nodes_attrs=new_graph_node_attrs_for_3d_case_1_opset4_case,
edges=new_edges_for_3d_case_1_opset4_case
)
ref_graph = build_graph(
nodes_attrs=new_ref_graph_node_attrs_for_3d_case_1_opset4_case,
edges=new_ref_edges_for_3d_case_1_opset4_case
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_3d_interpolate_sequence_2(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_3d_case_2,
edges=edges_for_3d_case_2
)
ref_graph = build_graph(
nodes_attrs=graph_node_attrs_for_3d_case_2,
edges=edges_for_3d_case_2
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_3d_interpolate_sequence_3(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_3d_case_3,
edges=edges_for_3d_case_3
)
ref_graph = build_graph(
nodes_attrs=graph_node_attrs_for_3d_case_3,
edges=edges_for_3d_case_3
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_3d_interpolate_sequence_4(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_3d_case_4,
edges=edges_for_3d_case_4
)
ref_graph = build_graph(
nodes_attrs={
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([10, 64, 511, 416, 10240]),
'kind': 'data',
'data_type': None
},
'scale': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4599, 912, 133120])
},
'scale_data': {'value': None, 'shape': None, 'kind': 'data'},
'interpolate': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2, 3, 4]),
'mode': 'linear',
'align_corners': 0,
'antialias': 1,
'pads_begin': 5,
'pads_end': 3,
'version': 'opset1'
},
'interpolate_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
},
edges=[
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate', {'in': 0}),
('scale', 'scale_data'),
('scale_data', 'interpolate', {'in': 1}),
('interpolate', 'interpolate_data'),
('interpolate_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_3d_interpolate_sequence_4_opset4_case(self):
graph = build_graph(
nodes_attrs=new_graph_node_attrs_for_3d_case_4_opset4_case,
edges=new_edges_for_3d_case_4_opset4_case
)
ref_graph = build_graph(
nodes_attrs=new_ref_graph_node_attrs_for_3d_case_4_opset4_case,
edges=new_ref_edges_for_3d_case_4_opset4_case
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_3d_interpolate_sequence_5(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_3d_case_4,
edges=edges_for_3d_case_4,
update_attributes={
'interpolate_1': {
'align_corners': 1, 'antialias': 1, 'pads_begin': 3, 'pads_end': 7
}
}
)
ref_graph = build_graph(
nodes_attrs=graph_node_attrs_for_3d_case_4,
edges=edges_for_3d_case_4,
update_attributes={
'interpolate_1': {
'align_corners': 1, 'antialias': 1, 'pads_begin': 3, 'pads_end': 7
}
}
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
| 37.512934 | 119 | 0.519156 |
import numpy as np
import unittest
from openvino.tools.mo.middle.InterpolateSequenceToInterpolate import InterpolateSequenceToInterpolate
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs
from unit_tests.utils.graph import build_graph
graph_node_attrs_for_2d_case_1_opset4_case = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'size_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660])
},
'size_1_data': {'value': int64_array([660]), 'shape': [1], 'kind': 'data'},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([3.0])
},
'scale_1_data': {'value': np.array([3.0]), 'shape': [1], 'kind': 'data'},
'axes_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2])
},
'axes_1_data': {'value': int64_array([2]), 'shape': [1], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'nearest',
'shape_calculation_mode': 'scales',
'version': 'opset4'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 350]), 'kind': 'data'},
'size_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700])
},
'size_2_data': {'value': int64_array([700]), 'shape': [1], 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([2.0])
},
'scale_2_data': {'value': np.array([2.0]), 'shape': [1], 'kind': 'data'},
'axes_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([3])
},
'axes_2_data': {'value': int64_array([3]), 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'nearest',
'shape_calculation_mode': 'scales',
'version': 'opset4'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 660, 700]), 'kind': 'data'},
'size_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1320])
},
'size_3_data': {'value': int64_array([1320]), 'shape': [1], 'kind': 'data'},
'scale_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([2.0])
},
'scale_3_data': {'value': np.array([2.0]), 'shape': [1], 'kind': 'data'},
'axes_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2])
},
'axes_3_data': {'value': int64_array([2]), 'shape': [1], 'kind': 'data'},
'interpolate_3': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'nearest',
'shape_calculation_mode': 'scales',
'version': 'opset4'
},
'interpolate_3_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_2d_case_1_opset4_case = [
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate_1', {'in': 0}),
('size_1', 'size_1_data'),
('scale_1', 'scale_1_data'),
('axes_1', 'axes_1_data'),
('size_1_data', 'interpolate_1', {'in': 1}),
('scale_1_data', 'interpolate_1', {'in': 2}),
('axes_1_data', 'interpolate_1', {'in': 3}),
('interpolate_1', 'interpolate_1_data'),
('interpolate_1_data', 'interpolate_2', {'in': 0}),
('size_2', 'size_2_data'),
('scale_2', 'scale_2_data'),
('axes_2', 'axes_2_data'),
('size_2_data', 'interpolate_2', {'in': 1}),
('scale_2_data', 'interpolate_2', {'in': 2}),
('axes_2_data', 'interpolate_2', {'in': 3}),
('interpolate_2', 'interpolate_2_data'),
('interpolate_2_data', 'interpolate_3', {'in': 0}),
('size_3', 'size_3_data'),
('scale_3', 'scale_3_data'),
('axes_3', 'axes_3_data'),
('size_3_data', 'interpolate_3', {'in': 1}),
('scale_3_data', 'interpolate_3', {'in': 2}),
('axes_3_data', 'interpolate_3', {'in': 3}),
('interpolate_3', 'interpolate_3_data'),
('interpolate_3_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
ref_graph_node_attrs_for_2d_case_1_opset4_case = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'size_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660, 700])
},
'size_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([3.0, 2.0])
},
'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'axes_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 3])
},
'axes_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'nearest',
'shape_calculation_mode': 'scales',
'antialias': 0,
'pads_begin': int64_array([0]),
'pads_end': int64_array([0]),
'coordinate_transformation_mode': 'half_pixel',
'nearest_mode': 'round_prefer_floor',
'cube_coeff': -0.75,
'version': 'opset4'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 700]), 'kind': 'data'},
'size_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1320])
},
'size_3_data': {'value': None, 'shape': [1], 'kind': 'data'},
'scale_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([2.0])
},
'scale_3_data': {'value': None, 'shape': [1], 'kind': 'data'},
'axes_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2])
},
'axes_3_data': {'value': int64_array([2]), 'shape': [1], 'kind': 'data'},
'interpolate_3': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'nearest',
'shape_calculation_mode': 'scales',
'version': 'opset4'
},
'interpolate_3_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
ref_edges_for_2d_case_1_opset4_case = [
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate_1', {'in': 0}),
('size_1', 'size_1_data'),
('scale_1', 'scale_1_data'),
('axes_1', 'axes_1_data'),
('size_1_data', 'interpolate_1', {'in': 1}),
('scale_1_data', 'interpolate_1', {'in': 2}),
('axes_1_data', 'interpolate_1', {'in': 3}),
('interpolate_1', 'interpolate_1_data'),
('interpolate_1_data', 'interpolate_3', {'in': 0}),
('size_3', 'size_3_data'),
('scale_3', 'scale_3_data'),
('axes_3', 'axes_3_data'),
('size_3_data', 'interpolate_3', {'in': 1}),
('scale_3_data', 'interpolate_3', {'in': 2}),
('axes_3_data', 'interpolate_3', {'in': 3}),
('interpolate_3', 'interpolate_3_data'),
('interpolate_3_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
graph_node_attrs_for_2d_case_1 = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660])
},
'scale_1_data': {'value': int64_array([660]), 'shape': [1], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 350]), 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700])
},
'scale_2_data': {'value': int64_array([700]), 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([3]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 660, 700]), 'kind': 'data'},
'scale_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1320])
},
'scale_3_data': {'value': int64_array([1320]), 'shape': [1], 'kind': 'data'},
'interpolate_3': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_3_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_2d_case_1 = [
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate_1', {'in': 0}),
('scale_1', 'scale_1_data'),
('scale_1_data', 'interpolate_1', {'in': 1}),
('interpolate_1', 'interpolate_1_data'),
('interpolate_1_data', 'interpolate_2', {'in': 0}),
('scale_2', 'scale_2_data'),
('scale_2_data', 'interpolate_2', {'in': 1}),
('interpolate_2', 'interpolate_2_data'),
('interpolate_2_data', 'interpolate_3', {'in': 0}),
('scale_3', 'scale_3_data'),
('scale_3_data', 'interpolate_3', {'in': 1}),
('interpolate_3', 'interpolate_3_data'),
('interpolate_3_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
graph_node_attrs_for_2d_case_2 = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660])
},
'scale_1_data': {'value': None, 'shape': [1], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 350]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 660, 350]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_2d_case_2 = [
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate_1', {'in': 0}),
('scale_1', 'scale_1_data'),
('scale_1_data', 'interpolate_1', {'in': 1}),
('interpolate_1', 'interpolate_1_data'),
('interpolate_1_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
graph_node_attrs_for_2d_case_3 = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660])
},
'scale_1_data': {'value': None, 'shape': [1], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 350]), 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700])
},
'scale_2_data': {'value': None, 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([3]),
'mode': 'linear',
'version': 'opset1'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 660, 700]), 'kind': 'data'},
'scale_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1320])
},
'scale_3_data': {'value': None, 'shape': [1], 'kind': 'data'},
'interpolate_3': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2]),
'mode': 'cubic',
'version': 'opset1'
},
'interpolate_3_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_2d_case_3 = edges_for_2d_case_1
new_graph_node_attrs_for_2d_case_4_opset4_case = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'size_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2200])
},
'size_1_data': {'value': int64_array([2200]), 'shape': [1], 'kind': 'data'},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([10.0])
},
'scale_1_data': {'value': np.array([10.0]), 'shape': [1], 'kind': 'data'},
'axes_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2])
},
'axes_1_data': {'value': int64_array([2]), 'shape': [1], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'linear',
'coordinate_transformation_mode': 'asymmetric',
'nearest_mode': 'simple',
'cube_coeff': -0.4,
'antialias': 1,
'shape_calculation_mode': 'scales',
'version': 'opset4'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 2200, 350]), 'kind': 'data'},
'size_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700])
},
'size_2_data': {'value': int64_array([700]), 'shape': [1], 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([2.0])
},
'scale_2_data': {'value': np.array([2.0]), 'shape': [1], 'kind': 'data'},
'axes_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([3])
},
'axes_2_data': {'value': int64_array([3]), 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'linear',
'coordinate_transformation_mode': 'asymmetric',
'nearest_mode': 'simple',
'cube_coeff': -0.4,
'antialias': 1,
'shape_calculation_mode': 'scales',
'version': 'opset4'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
new_edges_for_2d_case_4_opset4_case = [
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate_1', {'in': 0}),
('size_1', 'size_1_data'),
('size_1_data', 'interpolate_1', {'in': 1}),
('scale_1', 'scale_1_data'),
('scale_1_data', 'interpolate_1', {'in': 2}),
('axes_1', 'axes_1_data'),
('axes_1_data', 'interpolate_1', {'in': 3}),
('interpolate_1', 'interpolate_1_data'),
('interpolate_1_data', 'interpolate_2', {'in': 0}),
('size_2', 'size_2_data'),
('size_2_data', 'interpolate_2', {'in': 1}),
('scale_2', 'scale_2_data'),
('scale_2_data', 'interpolate_2', {'in': 2}),
('axes_2', 'axes_2_data'),
('axes_2_data', 'interpolate_2', {'in': 3}),
('interpolate_2', 'interpolate_2_data'),
('interpolate_2_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
new_ref_graph_node_attrs_for_2d_case_4_opset4_case = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'size_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2200, 700])
},
'size_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([10.0, 2.0])
},
'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'axes_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 3])
},
'axes_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'linear',
'coordinate_transformation_mode': 'asymmetric',
'nearest_mode': 'simple',
'cube_coeff': -0.4,
'antialias': 1,
'shape_calculation_mode': 'scales',
'version': 'opset4'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
new_ref_edges_for_2d_case_4_opset4_case = [
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate_1', {'in': 0}),
('size_1', 'size_1_data'),
('size_1_data', 'interpolate_1', {'in': 1}),
('scale_1', 'scale_1_data'),
('scale_1_data', 'interpolate_1', {'in': 2}),
('axes_1', 'axes_1_data'),
('axes_1_data', 'interpolate_1', {'in': 3}),
('interpolate_1', 'interpolate_1_data'),
('interpolate_1_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
graph_node_attrs_for_2d_case_4_opset4_case = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2200])
},
'scale_1_data': {'value': None, 'shape': [1], 'kind': 'data'},
'axes_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2])
},
'axes_1_data': {'value': int64_array([2]), 'shape': [1], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'linear',
'coordinate_transformation_mode': 'asymmetric',
'nearest_mode': 'simple',
'cube_coeff': -0.4,
'antialias': 1,
'version': 'opset4'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 2200, 350]), 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700])
},
'scale_2_data': {'value': None, 'shape': [1], 'kind': 'data'},
'axes_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([3])
},
'axes_2_data': {'value': int64_array([3]), 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'linear',
'coordinate_transformation_mode': 'asymmetric',
'nearest_mode': 'simple',
'cube_coeff': -0.4,
'antialias': 1,
'version': 'opset4'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_2d_case_4_opset4_case = [
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate_1', {'in': 0}),
('scale_1', 'scale_1_data'),
('scale_1_data', 'interpolate_1', {'in': 1}),
('axes_1', 'axes_1_data'),
('axes_1_data', 'interpolate_1', {'in': 2}),
('interpolate_1', 'interpolate_1_data'),
('interpolate_1_data', 'interpolate_2', {'in': 0}),
('scale_2', 'scale_2_data'),
('scale_2_data', 'interpolate_2', {'in': 1}),
('axes_2', 'axes_2_data'),
('axes_2_data', 'interpolate_2', {'in': 2}),
('interpolate_2', 'interpolate_2_data'),
('interpolate_2_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
graph_node_attrs_for_2d_case_4 = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2200])
},
'scale_1_data': {'value': int64_array([2200]), 'shape': [1], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2]),
'mode': 'linear',
'align_corners': 0,
'antialias': 1,
'pads_begin': 5,
'pads_end': 3,
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 2200, 350]), 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700])
},
'scale_2_data': {'value': int64_array([700]), 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([3]),
'mode': 'linear',
'align_corners': 0,
'antialias': 1,
'pads_begin': 5,
'pads_end': 3,
'version': 'opset1'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_2d_case_4 = [
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate_1', {'in': 0}),
('scale_1', 'scale_1_data'),
('scale_1_data', 'interpolate_1', {'in': 1}),
('interpolate_1', 'interpolate_1_data'),
('interpolate_1_data', 'interpolate_2', {'in': 0}),
('scale_2', 'scale_2_data'),
('scale_2_data', 'interpolate_2', {'in': 1}),
('interpolate_2', 'interpolate_2_data'),
('interpolate_2_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
graph_node_attrs_for_2d_case_6 = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([220, 350])
},
'scale_1_data': {'value': None, 'shape': [2], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2, 3]),
'mode': 'linear',
'align_corners': 0,
'antialias': 1,
'pads_begin': 5,
'pads_end': 3,
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 220, 350]), 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([220])
},
'scale_2_data': {'value': None, 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2]),
'mode': 'linear',
'align_corners': 0,
'antialias': 1,
'pads_begin': 5,
'pads_end': 3,
'version': 'opset1'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 220, 350]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 220, 350]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_2d_case_6 = edges_for_2d_case_4
new_ref_graph_node_attrs_for_3d_case_1_opset4_case = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 5, 1024, 256, 800]),
'kind': 'data',
'data_type': None
},
'size_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4096, 1280, 2400])
},
'size_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([4.0, 5.0, 3.0])
},
'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'axes_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 3, 4])
},
'axes_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'nearest',
'shape_calculation_mode': 'sizes',
'version': 'opset4'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 2400]), 'kind': 'data'},
'size_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([512])
},
'size_3_data': {'value': None, 'shape': [1], 'kind': 'data'},
'scale_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([512.0 / 2400.0])
},
'scale_3_data': {'value': None, 'shape': [1], 'kind': 'data'},
'axes_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4])
},
'axes_3_data': {'value': int64_array([4]), 'shape': [1], 'kind': 'data'},
'interpolate_3': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'nearest',
'shape_calculation_mode': 'sizes',
'version': 'opset4'
},
'interpolate_3_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
new_ref_edges_for_3d_case_1_opset4_case = ref_edges_for_2d_case_1_opset4_case
new_graph_node_attrs_for_3d_case_1_opset4_case = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 5, 1024, 256, 800]),
'kind': 'data',
'data_type': None
},
'size_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4096, 2400])
},
'size_1_data': {'value': int64_array([4096, 2400]), 'shape': [2], 'kind': 'data'},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([4.0, 3.0])
},
'scale_1_data': {'value': np.array([4.0, 3.0]), 'shape': [2], 'kind': 'data'},
'axes_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 4])
},
'axes_1_data': {'value': int64_array([2, 4]), 'shape': [2], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'nearest',
'shape_calculation_mode': 'sizes',
'version': 'opset4'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 5, 4096, 256, 2400]), 'kind': 'data'},
'size_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1280])
},
'size_2_data': {'value': int64_array([1280]), 'shape': [1], 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([5.0])
},
'scale_2_data': {'value': np.array([5.0]), 'shape': [1], 'kind': 'data'},
'axes_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([3])
},
'axes_2_data': {'value': int64_array([3]), 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'nearest',
'shape_calculation_mode': 'sizes',
'version': 'opset4'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 2400]), 'kind': 'data'},
'size_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([512])
},
'size_3_data': {'value': int64_array([512]), 'shape': [1], 'kind': 'data'},
'scale_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([512.0 / 2400.0])
},
'scale_3_data': {'value': np.array([512.0 / 2400.0]), 'shape': [1], 'kind': 'data'},
'axes_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4])
},
'axes_3_data': {'value': int64_array([4]), 'shape': [1], 'kind': 'data'},
'interpolate_3': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'nearest',
'shape_calculation_mode': 'sizes',
'version': 'opset4'
},
'interpolate_3_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
new_edges_for_3d_case_1_opset4_case = edges_for_2d_case_1_opset4_case
graph_node_attrs_for_3d_case_1 = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 5, 1024, 256, 800]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4096, 2400])
},
'scale_1_data': {'value': int64_array([4096, 2400]), 'shape': [2], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2, 4]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 5, 4096, 256, 2400]), 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1280])
},
'scale_2_data': {'value': int64_array([1280]), 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([3]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 2400]), 'kind': 'data'},
'scale_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([512])
},
'scale_3_data': {'value': int64_array([512]), 'shape': [1], 'kind': 'data'},
'interpolate_3': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([4]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_3_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_3d_case_1 = edges_for_2d_case_1
graph_node_attrs_for_3d_case_2 = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 5, 1024, 256, 800]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4096, 1280])
},
'scale_1_data': {'value': None, 'shape': [1], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2, 3]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 800]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 800]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_3d_case_2 = edges_for_2d_case_2
graph_node_attrs_for_3d_case_3 = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([16, 44, 512, 87, 790]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([256])
},
'scale_1_data': {'value': None, 'shape': [1], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([16, 44, 256, 87, 790]), 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2370])
},
'scale_2_data': {'value': None, 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([4]),
'mode': 'linear',
'version': 'opset1'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([16, 44, 256, 87, 2370]), 'kind': 'data'},
'scale_3': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([435])
},
'scale_3_data': {'value': None, 'shape': [1], 'kind': 'data'},
'interpolate_3': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([3]),
'mode': 'cubic',
'version': 'opset1'
},
'interpolate_3_data': {'value': None, 'shape': int64_array([16, 44, 256, 435, 2370]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([16, 44, 256, 435, 2370]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_3d_case_3 = edges_for_2d_case_3
new_ref_graph_node_attrs_for_3d_case_4_opset4_case = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([10, 64, 511, 416, 10240]),
'kind': 'data',
'data_type': None
},
'size_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4599, 912, 133120])
},
'size_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const',
'value': np.array([4599.0 / 511.0, 912.0 / 416.0, 133120.0 / 10240.0])
},
'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'axes_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 3, 4])
},
'axes_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'linear',
'antialias': 1,
'shape_calculation_mode': 'sizes',
'version': 'opset4'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
new_ref_edges_for_3d_case_4_opset4_case = new_ref_edges_for_2d_case_4_opset4_case
new_graph_node_attrs_for_3d_case_4_opset4_case = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([10, 64, 511, 416, 10240]),
'kind': 'data',
'data_type': None
},
'size_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4599, 133120])
},
'size_1_data': {'value': int64_array([4599, 133120]), 'shape': [2], 'kind': 'data'},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([4599.0 / 511.0, 133120.0 / 10240.0])
},
'scale_1_data': {'value': np.array([4599.0 / 511.0, 133120.0 / 10240.0]), 'shape': [2], 'kind': 'data'},
'axes_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 4])
},
'axes_1_data': {'value': int64_array([2, 4]), 'shape': [2], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'linear',
'antialias': 1,
'shape_calculation_mode': 'sizes',
'version': 'opset4'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([10, 64, 4599, 416, 133120]), 'kind': 'data'},
'size_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([912])
},
'size_2_data': {'value': int64_array([912]), 'shape': [1], 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([912.0 / 416.0])
},
'scale_2_data': {'value': np.array([912.0 / 416.0]), 'shape': [1], 'kind': 'data'},
'axes_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([3])
},
'axes_2_data': {'value': int64_array([3]), 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'mode': 'linear',
'antialias': 1,
'shape_calculation_mode': 'sizes',
'version': 'opset4'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
new_edges_for_3d_case_4_opset4_case = new_edges_for_2d_case_4_opset4_case
graph_node_attrs_for_3d_case_4 = {
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([10, 64, 511, 416, 10240]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4599, 133120])
},
'scale_1_data': {'value': int64_array([4599, 133120]), 'shape': [2], 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2, 4]),
'mode': 'linear',
'align_corners': 0,
'antialias': 1,
'pads_begin': 5,
'pads_end': 3,
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([10, 64, 4599, 416, 133120]), 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([912])
},
'scale_2_data': {'value': int64_array([912]), 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([3]),
'mode': 'linear',
'align_corners': 0,
'antialias': 1,
'pads_begin': 5,
'pads_end': 3,
'version': 'opset1'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
}
edges_for_3d_case_4 = edges_for_2d_case_4
class InterpolateSequenceToInterpolateTest(unittest.TestCase):
def test_2d_interpolate_sequence_1(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_1,
edges=edges_for_2d_case_1
)
ref_graph = build_graph(
nodes_attrs={
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660, 700])
},
'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2, 3]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 700]), 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1320])
},
'scale_2_data': {'value': None, 'shape': None, 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
},
edges=[
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate_1', {'in': 0}),
('scale_1', 'scale_1_data'),
('scale_1_data', 'interpolate_1', {'in': 1}),
('interpolate_1', 'interpolate_1_data'),
('scale_2', 'scale_2_data'),
('interpolate_2', 'interpolate_2_data'),
('interpolate_1_data', 'interpolate_2', {'in': 0}),
('scale_2_data', 'interpolate_2', {'in': 1}),
('interpolate_2_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_2d_interpolate_sequence_1_opset4_case(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_1_opset4_case,
edges=edges_for_2d_case_1_opset4_case
)
ref_graph = build_graph(
nodes_attrs=ref_graph_node_attrs_for_2d_case_1_opset4_case,
edges=ref_edges_for_2d_case_1_opset4_case
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_2d_interpolate_sequence_2(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_2,
edges=edges_for_2d_case_2
)
ref_graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_2,
edges=edges_for_2d_case_2
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_2d_interpolate_sequence_3(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_3,
edges=edges_for_2d_case_3
)
ref_graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_3,
edges=edges_for_2d_case_3
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_2d_interpolate_sequence_4(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_4,
edges=edges_for_2d_case_4
)
ref_graph = build_graph(
nodes_attrs={
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 4, 220, 350]),
'kind': 'data',
'data_type': None
},
'scale': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2200, 700])
},
'scale_data': {'value': None, 'shape': None, 'kind': 'data'},
'interpolate': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2, 3]),
'mode': 'linear',
'align_corners': 0,
'antialias': 1,
'pads_begin': 5,
'pads_end': 3,
'version': 'opset1'
},
'interpolate_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
},
edges=[
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate', {'in': 0}),
('scale', 'scale_data'),
('scale_data', 'interpolate', {'in': 1}),
('interpolate', 'interpolate_data'),
('interpolate_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_2d_interpolate_sequence_4_opset4_case(self):
graph = build_graph(
nodes_attrs=new_graph_node_attrs_for_2d_case_4_opset4_case,
edges=new_edges_for_2d_case_4_opset4_case
)
ref_graph = build_graph(
nodes_attrs=new_ref_graph_node_attrs_for_2d_case_4_opset4_case,
edges=new_ref_edges_for_2d_case_4_opset4_case
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_2d_interpolate_sequence_5(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_4,
edges=edges_for_2d_case_4,
update_attributes={
'interpolate_1': {
'align_corners': 1, 'antialias': 1, 'pads_begin': 3, 'pads_end': 0
}
}
)
ref_graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_4,
edges=edges_for_2d_case_4,
update_attributes={
'interpolate_1': {
'align_corners': 1, 'antialias': 1, 'pads_begin': 3, 'pads_end': 0
}
}
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_2d_interpolate_sequence_5_opset4_case(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_4_opset4_case,
edges=edges_for_2d_case_4_opset4_case,
update_attributes={
'interpolate_1': {
'antialias': 0, 'cube_coeff': -0.1
}
}
)
ref_graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_4_opset4_case,
edges=edges_for_2d_case_4_opset4_case,
update_attributes={
'interpolate_1': {
'antialias': 0, 'cube_coeff': -0.1
}
}
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_2d_interpolate_sequence_6(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_6,
edges=edges_for_2d_case_6,
)
ref_graph = build_graph(
nodes_attrs=graph_node_attrs_for_2d_case_6,
edges=edges_for_2d_case_6
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_3d_interpolate_sequence_1(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_3d_case_1,
edges=edges_for_3d_case_1
)
ref_graph = build_graph(
nodes_attrs={
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([1, 5, 1024, 256, 800]),
'kind': 'data',
'data_type': None
},
'scale_1': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4096, 1280, 2400])
},
'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'interpolate_1': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2, 3, 4]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_1_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 2400]), 'kind': 'data'},
'scale_2': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([512])
},
'scale_2_data': {'value': None, 'shape': [1], 'kind': 'data'},
'interpolate_2': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([4]),
'mode': 'nearest',
'version': 'opset1'
},
'interpolate_2_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
},
edges=[
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate_1', {'in': 0}),
('scale_1', 'scale_1_data'),
('scale_1_data', 'interpolate_1', {'in': 1}),
('interpolate_1', 'interpolate_1_data'),
('scale_2', 'scale_2_data'),
('interpolate_2', 'interpolate_2_data'),
('interpolate_1_data', 'interpolate_2', {'in': 0}),
('scale_2_data', 'interpolate_2', {'in': 1}),
('interpolate_2_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_3d_interpolate_sequence_1_opset4_case(self):
graph = build_graph(
nodes_attrs=new_graph_node_attrs_for_3d_case_1_opset4_case,
edges=new_edges_for_3d_case_1_opset4_case
)
ref_graph = build_graph(
nodes_attrs=new_ref_graph_node_attrs_for_3d_case_1_opset4_case,
edges=new_ref_edges_for_3d_case_1_opset4_case
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_3d_interpolate_sequence_2(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_3d_case_2,
edges=edges_for_3d_case_2
)
ref_graph = build_graph(
nodes_attrs=graph_node_attrs_for_3d_case_2,
edges=edges_for_3d_case_2
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_3d_interpolate_sequence_3(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_3d_case_3,
edges=edges_for_3d_case_3
)
ref_graph = build_graph(
nodes_attrs=graph_node_attrs_for_3d_case_3,
edges=edges_for_3d_case_3
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_3d_interpolate_sequence_4(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_3d_case_4,
edges=edges_for_3d_case_4
)
ref_graph = build_graph(
nodes_attrs={
'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_data': {
'value': None,
'shape': int64_array([10, 64, 511, 416, 10240]),
'kind': 'data',
'data_type': None
},
'scale': {
'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4599, 912, 133120])
},
'scale_data': {'value': None, 'shape': None, 'kind': 'data'},
'interpolate': {
'type': 'Interpolate',
'kind': 'op',
'op': 'Interpolate',
'axes': int64_array([2, 3, 4]),
'mode': 'linear',
'align_corners': 0,
'antialias': 1,
'pads_begin': 5,
'pads_end': 3,
'version': 'opset1'
},
'interpolate_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},
'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'},
'abs_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'},
'output': {'kind': 'op', 'op': 'Result'},
},
edges=[
('placeholder', 'placeholder_data'),
('placeholder_data', 'interpolate', {'in': 0}),
('scale', 'scale_data'),
('scale_data', 'interpolate', {'in': 1}),
('interpolate', 'interpolate_data'),
('interpolate_data', 'abs'),
('abs', 'abs_data'),
('abs_data', 'output'),
]
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_3d_interpolate_sequence_4_opset4_case(self):
graph = build_graph(
nodes_attrs=new_graph_node_attrs_for_3d_case_4_opset4_case,
edges=new_edges_for_3d_case_4_opset4_case
)
ref_graph = build_graph(
nodes_attrs=new_ref_graph_node_attrs_for_3d_case_4_opset4_case,
edges=new_ref_edges_for_3d_case_4_opset4_case
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
def test_3d_interpolate_sequence_5(self):
graph = build_graph(
nodes_attrs=graph_node_attrs_for_3d_case_4,
edges=edges_for_3d_case_4,
update_attributes={
'interpolate_1': {
'align_corners': 1, 'antialias': 1, 'pads_begin': 3, 'pads_end': 7
}
}
)
ref_graph = build_graph(
nodes_attrs=graph_node_attrs_for_3d_case_4,
edges=edges_for_3d_case_4,
update_attributes={
'interpolate_1': {
'align_corners': 1, 'antialias': 1, 'pads_begin': 3, 'pads_end': 7
}
}
)
InterpolateSequenceToInterpolate().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, ref_graph, 'output')
self.assertTrue(flag, resp)
| true | true |
f73e8577eb976662de1a03926e2323985b4d4c7a | 1,376 | py | Python | addons/Sprytile-6b68d00/rx/internal/priorityqueue.py | trisadmeslek/V-Sekai-Blender-tools | 0d8747387c58584b50c69c61ba50a881319114f8 | [
"MIT"
] | 733 | 2017-08-22T09:47:54.000Z | 2022-03-27T23:56:52.000Z | rx/internal/priorityqueue.py | asheraryam/Sprytile | c63be50d14b07192ff134ceab256f0d69b9c4c92 | [
"MIT"
] | 74 | 2017-08-16T09:13:05.000Z | 2022-03-15T02:31:49.000Z | rx/internal/priorityqueue.py | asheraryam/Sprytile | c63be50d14b07192ff134ceab256f0d69b9c4c92 | [
"MIT"
] | 77 | 2017-09-14T16:56:11.000Z | 2022-03-27T13:55:16.000Z | import heapq
import rx
class PriorityQueue(object):
"""Priority queue for scheduling"""
def __init__(self, capacity=None):
self.items = []
self.count = 0 # Monotonic increasing for sort stability
self.lock = rx.config.get("Lock")()
def __len__(self):
"""Returns length of queue"""
return len(self.items)
def peek(self):
"""Returns first item in queue without removing it"""
return self.items[0][0]
def remove_at(self, index):
"""Removes item at given index"""
with self.lock:
item = self.items.pop(index)[0]
heapq.heapify(self.items)
return item
def dequeue(self):
"""Returns and removes item with lowest priority from queue"""
with self.lock:
item = heapq.heappop(self.items)[0]
return item
def enqueue(self, item):
"""Adds item to queue"""
with self.lock:
heapq.heappush(self.items, (item, self.count))
self.count += 1
def remove(self, item):
"""Remove given item from queue"""
with self.lock:
for index, _item in enumerate(self.items):
if _item[0] == item:
self.items.pop(index)
heapq.heapify(self.items)
return True
return False
| 23.724138 | 70 | 0.551599 | import heapq
import rx
class PriorityQueue(object):
def __init__(self, capacity=None):
self.items = []
self.count = 0
self.lock = rx.config.get("Lock")()
def __len__(self):
return len(self.items)
def peek(self):
return self.items[0][0]
def remove_at(self, index):
with self.lock:
item = self.items.pop(index)[0]
heapq.heapify(self.items)
return item
def dequeue(self):
with self.lock:
item = heapq.heappop(self.items)[0]
return item
def enqueue(self, item):
with self.lock:
heapq.heappush(self.items, (item, self.count))
self.count += 1
def remove(self, item):
with self.lock:
for index, _item in enumerate(self.items):
if _item[0] == item:
self.items.pop(index)
heapq.heapify(self.items)
return True
return False
| true | true |
f73e85bcadcc1dc382bfacc5c9411305e5bf78a8 | 1,006 | py | Python | entry/migrations/0019_visitordetails.py | Shrinidhi1904/AtlasCopco | 3116d8f7bdff9635952c3db741adc8abe93bfb72 | [
"MIT"
] | 1 | 2021-07-10T12:13:17.000Z | 2021-07-10T12:13:17.000Z | entry/migrations/0019_visitordetails.py | Shrinidhi1904/AtlasCopco | 3116d8f7bdff9635952c3db741adc8abe93bfb72 | [
"MIT"
] | null | null | null | entry/migrations/0019_visitordetails.py | Shrinidhi1904/AtlasCopco | 3116d8f7bdff9635952c3db741adc8abe93bfb72 | [
"MIT"
] | 10 | 2020-09-25T14:04:02.000Z | 2021-11-04T18:41:40.000Z | # Generated by Django 3.1.1 on 2021-01-30 17:49
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('entry', '0018_auto_20210121_1805'),
]
operations = [
migrations.CreateModel(
name='VisitorDetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='Name')),
('email', models.EmailField(max_length=254, validators=[django.core.validators.EmailValidator()], verbose_name='E-mail')),
('safety_training', models.BooleanField(verbose_name='Is Given Safety Training?')),
('visitor', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='entry.visitor', verbose_name='Visitor:')),
],
),
]
| 38.692308 | 139 | 0.625249 |
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('entry', '0018_auto_20210121_1805'),
]
operations = [
migrations.CreateModel(
name='VisitorDetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='Name')),
('email', models.EmailField(max_length=254, validators=[django.core.validators.EmailValidator()], verbose_name='E-mail')),
('safety_training', models.BooleanField(verbose_name='Is Given Safety Training?')),
('visitor', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='entry.visitor', verbose_name='Visitor:')),
],
),
]
| true | true |
f73e85e463c8099c9e091af90a3c75d377448f2d | 14,449 | py | Python | src/spaceone/inventory/manager/collector_manager/filter_manager.py | jihyungSong/inventory | c8b0e4dff4c43693b773a1b486a757599ac29c8e | [
"Apache-2.0"
] | 1 | 2020-07-26T17:53:18.000Z | 2020-07-26T17:53:18.000Z | src/spaceone/inventory/manager/collector_manager/filter_manager.py | jihyungSong/inventory | c8b0e4dff4c43693b773a1b486a757599ac29c8e | [
"Apache-2.0"
] | null | null | null | src/spaceone/inventory/manager/collector_manager/filter_manager.py | jihyungSong/inventory | c8b0e4dff4c43693b773a1b486a757599ac29c8e | [
"Apache-2.0"
] | null | null | null | import logging
from google.protobuf.json_format import MessageToDict
from spaceone.core import cache
from spaceone.core.manager import BaseManager
from spaceone.inventory.manager.collector_manager.collecting_manager import RESOURCE_MAP
_LOGGER = logging.getLogger(__name__)
class FilterManager(BaseManager):
"""
Transform filter for collector
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.CONSTANT_FILTER_CACHE_TIMEOUT = 86400 # 24 hours
#####################################################
# TODO: result of _get_collect_filter, and secret_id
""" I want to know the founded result from _get_collect_filter must be related with secret_id
If resource is not collected by this secret_id, I don't want to make collect call
"""
def get_collect_filter(self, filters, plugin_info, secret_id_list=[]):
""" Create new filters for Collect plugin's parameter
filter_format(filters) -> new_filter
Args:
filter_format(list): filter_format from plugin.options.filter_format or None
filters(dict): filters from Client request
Returns:
new_filter: new filters for Plugin(Collector) query
related_secret_id_list : list of secret matched on query
Example:
'filter_format': [{'change_rules': [{'change_key': 'instance_id',
'resource_key': 'data.compute.instance_id'},
{'change_key': 'region_name',
'resource_key': 'data.compute.region'}],
'key': 'project_id',
'name': 'Project ID',
'resource_type': 'SERVER',
'search_key': 'identity.Project.project_id',
'type': 'str'},
{'change_rules': [{'change_key': 'instance_id',
'resource_key': 'data.compute.instance_id'},
{'change_key': 'region_name',
'resource_key': 'data.compute.region'}],
'key': 'collection_info.service_accounts',
'name': 'Service Account ID',
'resource_type': 'SERVER',
'search_key': 'identity.ServiceAccount.service_account_id',
'type': 'str'},
{'change_rules': [{'change_key': 'instance_id',
'resource_key': 'data.compute.instance_id'},
{'change_key': 'region_name',
'resource_key': 'data.compute.region'}],
'key': 'server_id',
'name': 'Server ID',
'resource_type': 'SERVER',
'search_key': 'inventory.Server.server_id',
'type': 'list'},
{'key': 'instance_id',
'name': 'Instance ID',
'resource_type': 'CUSTOM',
'type': 'list'},
{'key': 'region_name',
'name': 'Region',
'resource_type': 'CUSTOM',
'type': 'list'}],
filters:
{
'region_id': 'region-xxxxx',
'zone_id': 'zone-yyyyy',
'instance_id': ['i-zzzz', ...]] # CUSTOM resource type
'instance_type': 'm4.xlarge'
}
new_filter:
{
'instance_id': ['i-123', 'i-2222', ...]
'instance_type': 'm4.xlarge'
}
related_secret_id_list: ['secret-12343', 'secret-254555' ...]
"""
metadata = plugin_info.get('metadata', None)
#############################
# WARNING
# options is old spec.
#############################
options = plugin_info.get('options', {})
if metadata:
options = metadata
filter_format = options.get('filter_format', None)
if filter_format is None:
_LOGGER.warning(f'[_get_collector_filter] No filter_format at plugin_info')
return {}, secret_id_list
if filters == {}:
_LOGGER.debug(f'[_get_collector_filter] No filters, do nothing')
return {}, secret_id_list
filter_format_by_key = {}
# filter_format_by_key['zone_id'] = {'key':'project_id', 'name':'Project ID' ...
for item in filter_format:
filter_format_by_key[item['key']] = item
for filter_key, filter_value in filters.items():
if filter_key not in filter_format_by_key:
_LOGGER.error(f'[_get_collect_filter] unsupported filter_key: {filter_key}')
# Strict error raise, for reducing too heavy requst
raise ERROR_UNSUPPORTED_FILTER_KEY(key=filter_key, value=filter_value)
query_filter, custom_keys = self._prepare_query_filter(filters, filter_format_by_key)
_LOGGER.debug(f'[_get_collect_filter] query_filter: {query_filter}, custom_keys: {custom_keys}')
query_per_resources = self._make_query_per_resources(query_filter, filter_format_by_key)
_LOGGER.debug(f'[_get_collect_filter] query_per_resources: {query_per_resources}')
new_filter, filtered_secret_id_list = self._search_resources(query_per_resources,
filter_format_by_key,
secret_id_list)
_LOGGER.debug(f'[_get_collect_filter] new_filter: {new_filter}')
related_secret_id_list = _intersection(secret_id_list, filtered_secret_id_list)
if len(custom_keys) > 0:
new_filter = self._append_custom_keys(new_filter, filters, custom_keys)
_LOGGER.debug(f'[_get_collect_filter] new_filter_with_custom_keys: {new_filter}')
return new_filter, related_secret_id_list
def cache_filter(self, collector_id, secret_id, data):
"""
FilerManager can save cache of filter for collect plugin
Save the region_name cache
Args:
data (dict): {
'region_name': list of region name,
'cloud_service': list of cloud service (for cloud service plugin)
}
Key: collector-filter:<collector_id>:<secret_id>
Value: region_name: [list of regions]
"""
key = f'collector-filter:{collector_id}:{secret_id}'
_LOGGER.debug(f'[cache_filter] {key} : {data}')
cache.set(key, data, expire=self.CONSTANT_FILTER_CACHE_TIMEOUT)
def _get_filer_cache(self, collector_id, secret_id):
key = f'collector-filter:{collector_id}:{secret_id}'
try:
data = cache.get(key)
_LOGGER.debug(f'[cache_filter] {key} : {data}')
return data
except Exception as e:
# May be no_cache
return None
def _prepare_query_filter(self, filters, filter_format_by_key):
query_filter = {}
"""
'region_id': [{'k': 'region_id', 'v': 'region-xxx', 'o': 'eq'}]
'server_id': [{'k': 'server_id', 'v': 'server-yyyy', 'o': 'eq'} ....]
...
"""
custom_keys = {}
# Foreach filter, we will find matched resource list
for filter_key, filter_value in filters.items():
# filter_key : region_id
filter_element = filter_format_by_key[filter_key]
_LOGGER.debug(f'[_prepare_query_filter] filter_element: {filter_element}')
if filter_element['resource_type'] == 'CUSTOM':
# DO NOT save CUSTOM key at query_filter
custom_keys[filter_key] = filter_element
continue
# list of new_filter[key]
v_list = query_filter.get(filter_key, [])
if filter_element:
# Ask to manager, is there any matched resource
query = self._make_query_for_manager(filter_key, filter_value, filter_element)
if isinstance(query, list) is False:
_LOGGER.error("LOGIC ERROR, _make_query_for_manager does not return list value: {query}")
else:
v_list.extend(query)
query_filter[filter_key] = v_list
return query_filter, custom_keys
def _make_query_per_resources(self, query_filter, filter_format_by_key):
# Make query per Resource
query_per_resources = {}
"""
'SERVER': {
'key': 'zone_id',
'filter': [{'k': 'region_id', 'v': 'region-xxxx', 'o': 'eq'}],
'filter_or': [{'k': 'server_id', 'v': 'server-yyyy', 'o': 'eq'}, ...]
}
"""
for query_key, query in query_filter.items():
res_type = filter_format_by_key[query_key]['resource_type']
query_string = query_per_resources.get(res_type, {'key': query_key, 'filter': [], 'filter_or': []})
if len(query) == 1:
query_string['filter'].extend(query)
elif len(query) > 1:
query_string['filter_or'].extend(query)
else:
_LOGGER.debug(f'[_get_collector_filter] wrong query: {query}')
query_per_resources[res_type] = query_string
return query_per_resources
def _search_resources(self, query_per_resources, filter_format_by_key, secret_id_list):
"""
# Search Resource by Resource's Manager
Returns: tuple of tranformed query, secret_id_list
tranformed_query {
'instance_id': [list of value],
}
related_secret_id_list : [list of secrets]
"""
result = {}
#secret_id_list = []
for res_type, query in query_per_resources.items():
""" Example
query: {'key': 'zone_id',
'filter': [
{'k': 'zone_id', 'v': 'zone-d710c1cb0ea7', 'o': 'eq'},
{'k': 'region_id', 'v': 'region-85445849c20c', 'o': 'eq'},
{'k': 'pool_id', 'v': 'pool-a1f35b107bb4', 'o': 'eq'}],
'filter_or': []}
"""
_LOGGER.debug(f'[_search_resources] query: {query}')
try:
mgr = self.locator.get_manager(RESOURCE_MAP[res_type])
except Exception as e:
_LOGGER.error('########## NOTICE to Developer (bug) ###################################')
_LOGGER.error(f'[_search_resources] Not found manager based on resource_type: {res_type}')
_LOGGER.error(e)
continue
"""
{'change_rules': [{'change_key': 'instance_id',
'resource_key': 'data.compute.instance_id'},
{'change_key': 'region_name',
'resource_key': 'data.compute.region'}],
"""
filter_element = filter_format_by_key[query['key']]
change_rules = filter_element['change_rules']
del query['key']
# Ask to manager
try:
_LOGGER.debug(f'[_search_resources] query: {query}, key={change_rules}')
value_list, filtered_secret_id_list = mgr.query_resources(query, change_rules)
_LOGGER.debug(f'[_search_resources] filered: {value_list}')
result.update(value_list)
secret_id_list = _intersection(secret_id_list, filtered_secret_id_list)
except Exception as e:
_LOGGER.error('########## NOTICE to Developer (bug) ####################################')
_LOGGER.error(f'{res_type} Manager has bug for query_resources functions')
_LOGGER.error(e)
return result, secret_id_list
def _append_custom_keys(self, new_filter, filters, custom_keys):
"""
Args: {'key':'instance_id', 'name':'Instance ID', 'type':'list', 'resource_type': 'CUSTOM'}
Return: updated new_filter
"""
updated_filter = new_filter.copy()
for custom_key, formats in custom_keys.items():
_LOGGER.debug(f'[_append_custom_keys] append custom_key: {custom_key}, {formats}')
values = filters.get(custom_key, None)
if values is None:
continue
value_type = formats['type']
_LOGGER.debug(f'[_append_custom_keys] find values: {values}, type: {value_type}')
if value_type == 'list':
current_value = new_filter.get(custom_key, [])
current_value.extend(values)
updated_filter.update({custom_key: current_value})
elif value_type == 'str':
current_value = new_filter.get(custom_key, None)
if current_value:
_LOGGER.warning(f'[_append_custom_keys] change filter_value: {current_value} -> {values}')
updated_filter.update({custom_key: values})
else:
_LOGGER.error(f'[_append_custom_keys] un-supported type: {formats}, type: {value_type}')
_LOGGER.debug(f'[_append_custom_keys] updated_filter: {updated_filter}')
return updated_filter
def _make_query_for_manager(self, key, value, filter_element):
"""
Args:
key(str): key for query
value: query value of element (str, int, bool, float, list)
filter_element(dict): one element for filter_format
Returns:
query_statement (list, since there are list type)
Example)
value: region-xxxxx
filter_element: {'key':'region_id', 'name':'Region', 'type':'str', 'resource_type': 'SERVER', 'change_key': ['data.compute.instance_id', 'instance_id']}
"""
query_filter = []
f_type = filter_element['type']
if f_type == 'list':
query_filter.append({
'k': key,
'v': value,
'o': 'in'
})
elif f_type == 'str':
query_filter.append({
'k': key,
'v': value,
'o': 'eq'
})
else:
_LOGGER.error(f'Unspported filter_element, {filter_element}, supported type: list | str')
return query_filter
def _intersection(list_a, list_b):
""" Return intersection between list_a and list_b
"""
if len(list_b) == 0:
# May be user send blank list
return list_a
a = set(list_a)
b = set(list_b)
c = a.intersection(b)
_LOGGER.debug(f'[_intersection] a: {list_a}, b: {list_b} -> {c}')
return list(c)
| 42.002907 | 164 | 0.561423 | import logging
from google.protobuf.json_format import MessageToDict
from spaceone.core import cache
from spaceone.core.manager import BaseManager
from spaceone.inventory.manager.collector_manager.collecting_manager import RESOURCE_MAP
_LOGGER = logging.getLogger(__name__)
class FilterManager(BaseManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.CONSTANT_FILTER_CACHE_TIMEOUT = 86400
or_id, secret_id, data):
key = f'collector-filter:{collector_id}:{secret_id}'
_LOGGER.debug(f'[cache_filter] {key} : {data}')
cache.set(key, data, expire=self.CONSTANT_FILTER_CACHE_TIMEOUT)
def _get_filer_cache(self, collector_id, secret_id):
key = f'collector-filter:{collector_id}:{secret_id}'
try:
data = cache.get(key)
_LOGGER.debug(f'[cache_filter] {key} : {data}')
return data
except Exception as e:
return None
def _prepare_query_filter(self, filters, filter_format_by_key):
query_filter = {}
custom_keys = {}
for filter_key, filter_value in filters.items():
filter_element = filter_format_by_key[filter_key]
_LOGGER.debug(f'[_prepare_query_filter] filter_element: {filter_element}')
if filter_element['resource_type'] == 'CUSTOM':
custom_keys[filter_key] = filter_element
continue
v_list = query_filter.get(filter_key, [])
if filter_element:
query = self._make_query_for_manager(filter_key, filter_value, filter_element)
if isinstance(query, list) is False:
_LOGGER.error("LOGIC ERROR, _make_query_for_manager does not return list value: {query}")
else:
v_list.extend(query)
query_filter[filter_key] = v_list
return query_filter, custom_keys
def _make_query_per_resources(self, query_filter, filter_format_by_key):
query_per_resources = {}
for query_key, query in query_filter.items():
res_type = filter_format_by_key[query_key]['resource_type']
query_string = query_per_resources.get(res_type, {'key': query_key, 'filter': [], 'filter_or': []})
if len(query) == 1:
query_string['filter'].extend(query)
elif len(query) > 1:
query_string['filter_or'].extend(query)
else:
_LOGGER.debug(f'[_get_collector_filter] wrong query: {query}')
query_per_resources[res_type] = query_string
return query_per_resources
def _search_resources(self, query_per_resources, filter_format_by_key, secret_id_list):
result = {}
for res_type, query in query_per_resources.items():
_LOGGER.debug(f'[_search_resources] query: {query}')
try:
mgr = self.locator.get_manager(RESOURCE_MAP[res_type])
except Exception as e:
_LOGGER.error('########## NOTICE to Developer (bug) ###################################')
_LOGGER.error(f'[_search_resources] Not found manager based on resource_type: {res_type}')
_LOGGER.error(e)
continue
filter_element = filter_format_by_key[query['key']]
change_rules = filter_element['change_rules']
del query['key']
try:
_LOGGER.debug(f'[_search_resources] query: {query}, key={change_rules}')
value_list, filtered_secret_id_list = mgr.query_resources(query, change_rules)
_LOGGER.debug(f'[_search_resources] filered: {value_list}')
result.update(value_list)
secret_id_list = _intersection(secret_id_list, filtered_secret_id_list)
except Exception as e:
_LOGGER.error('########## NOTICE to Developer (bug) ####################################')
_LOGGER.error(f'{res_type} Manager has bug for query_resources functions')
_LOGGER.error(e)
return result, secret_id_list
def _append_custom_keys(self, new_filter, filters, custom_keys):
updated_filter = new_filter.copy()
for custom_key, formats in custom_keys.items():
_LOGGER.debug(f'[_append_custom_keys] append custom_key: {custom_key}, {formats}')
values = filters.get(custom_key, None)
if values is None:
continue
value_type = formats['type']
_LOGGER.debug(f'[_append_custom_keys] find values: {values}, type: {value_type}')
if value_type == 'list':
current_value = new_filter.get(custom_key, [])
current_value.extend(values)
updated_filter.update({custom_key: current_value})
elif value_type == 'str':
current_value = new_filter.get(custom_key, None)
if current_value:
_LOGGER.warning(f'[_append_custom_keys] change filter_value: {current_value} -> {values}')
updated_filter.update({custom_key: values})
else:
_LOGGER.error(f'[_append_custom_keys] un-supported type: {formats}, type: {value_type}')
_LOGGER.debug(f'[_append_custom_keys] updated_filter: {updated_filter}')
return updated_filter
def _make_query_for_manager(self, key, value, filter_element):
query_filter = []
f_type = filter_element['type']
if f_type == 'list':
query_filter.append({
'k': key,
'v': value,
'o': 'in'
})
elif f_type == 'str':
query_filter.append({
'k': key,
'v': value,
'o': 'eq'
})
else:
_LOGGER.error(f'Unspported filter_element, {filter_element}, supported type: list | str')
return query_filter
def _intersection(list_a, list_b):
if len(list_b) == 0:
return list_a
a = set(list_a)
b = set(list_b)
c = a.intersection(b)
_LOGGER.debug(f'[_intersection] a: {list_a}, b: {list_b} -> {c}')
return list(c)
| true | true |
f73e87693090402e524f96918cc305e8eca95fae | 149 | py | Python | projects/code_combat/4_Backwoods_Forest/114-Backwoods_Standoff_B/stand_b.py | only-romano/junkyard | b60a25b2643f429cdafee438d20f9966178d6f36 | [
"MIT"
] | null | null | null | projects/code_combat/4_Backwoods_Forest/114-Backwoods_Standoff_B/stand_b.py | only-romano/junkyard | b60a25b2643f429cdafee438d20f9966178d6f36 | [
"MIT"
] | null | null | null | projects/code_combat/4_Backwoods_Forest/114-Backwoods_Standoff_B/stand_b.py | only-romano/junkyard | b60a25b2643f429cdafee438d20f9966178d6f36 | [
"MIT"
] | null | null | null | while True:
enemy = hero.findNearestEnemy()
if hero.isReady("cleave"):
hero.cleave(enemy)
else:
hero.attack(enemy)
| 21.285714 | 36 | 0.583893 | while True:
enemy = hero.findNearestEnemy()
if hero.isReady("cleave"):
hero.cleave(enemy)
else:
hero.attack(enemy)
| true | true |
f73e88052ea184704f7d5e3ba1ba61be8643b479 | 437 | py | Python | {{cookiecutter.project_slug}}/backend/app/app/schemas/role.py | gma2th/full-stack | fd43b13ade62c5dd7acb9d711400c702593984d4 | [
"MIT"
] | 516 | 2018-03-06T19:20:47.000Z | 2022-03-30T22:22:11.000Z | {{cookiecutter.project_slug}}/backend/app/app/schemas/role.py | ohld/full-stack | cc2f9753f268a7e1264dd01b888f587c3a45c9a2 | [
"MIT"
] | 23 | 2018-03-21T19:38:40.000Z | 2020-12-27T23:08:09.000Z | {{cookiecutter.project_slug}}/backend/app/app/schemas/role.py | ohld/full-stack | cc2f9753f268a7e1264dd01b888f587c3a45c9a2 | [
"MIT"
] | 85 | 2018-03-29T16:46:40.000Z | 2022-01-27T18:47:39.000Z | # Import standard library packages
# Import installed packages
from marshmallow import fields
# Import app code
from .base import BaseSchema
class RoleSchema(BaseSchema):
# Own properties
id = fields.Int()
created_at = fields.DateTime()
name = fields.Str()
users = fields.Nested(
"UserSchema",
only=["id", "first_name", "last_name", "email", "is_active", "is_superuser"],
many=True,
)
| 21.85 | 85 | 0.659039 |
from marshmallow import fields
from .base import BaseSchema
class RoleSchema(BaseSchema):
id = fields.Int()
created_at = fields.DateTime()
name = fields.Str()
users = fields.Nested(
"UserSchema",
only=["id", "first_name", "last_name", "email", "is_active", "is_superuser"],
many=True,
)
| true | true |
f73e89ae275a921d5d1ba5f77f9c6643a6dc8e51 | 2,245 | py | Python | tests/neptune/new/attributes/test_attribute_base.py | michalsustr/neptune-client | 98858859f26d6b4e3aa59fbf6be63ff97e1abd9a | [
"Apache-2.0"
] | null | null | null | tests/neptune/new/attributes/test_attribute_base.py | michalsustr/neptune-client | 98858859f26d6b4e3aa59fbf6be63ff97e1abd9a | [
"Apache-2.0"
] | null | null | null | tests/neptune/new/attributes/test_attribute_base.py | michalsustr/neptune-client | 98858859f26d6b4e3aa59fbf6be63ff97e1abd9a | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2020, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import threading
import time
import unittest
import uuid
from typing import Optional
from mock import MagicMock
from neptune.new.internal.container_type import ContainerType
from neptune.new.internal.id_formats import UniqueId
from neptune.new.internal.operation_processors.operation_processor import (
OperationProcessor,
)
from neptune.new.internal.operation_processors.sync_operation_processor import (
SyncOperationProcessor,
)
from neptune.new.internal.backends.neptune_backend_mock import NeptuneBackendMock
from neptune.new.metadata_containers import Run
_now = time.time()
class TestAttributeBase(unittest.TestCase):
# TODO: test Projects, Model and ModelVersion
@staticmethod
def _create_run(processor: Optional[OperationProcessor] = None):
backend = NeptuneBackendMock()
exp = backend.create_run(UniqueId(str(uuid.uuid4())))
if processor is None:
processor = SyncOperationProcessor(exp.id, ContainerType.RUN, backend)
_run = Run(
id_=exp.id,
backend=backend,
op_processor=processor,
background_job=MagicMock(),
lock=threading.RLock(),
workspace=MagicMock(),
project_id=MagicMock(),
project_name=MagicMock(),
sys_id=MagicMock(),
)
_run.sync()
_run.start()
return _run
@staticmethod
def _random_path():
return ["some", "random", "path", str(uuid.uuid4())]
@staticmethod
def _random_wait():
return bool(random.getrandbits(1))
@staticmethod
def _now():
return _now
| 29.155844 | 82 | 0.702004 |
import random
import threading
import time
import unittest
import uuid
from typing import Optional
from mock import MagicMock
from neptune.new.internal.container_type import ContainerType
from neptune.new.internal.id_formats import UniqueId
from neptune.new.internal.operation_processors.operation_processor import (
OperationProcessor,
)
from neptune.new.internal.operation_processors.sync_operation_processor import (
SyncOperationProcessor,
)
from neptune.new.internal.backends.neptune_backend_mock import NeptuneBackendMock
from neptune.new.metadata_containers import Run
_now = time.time()
class TestAttributeBase(unittest.TestCase):
@staticmethod
def _create_run(processor: Optional[OperationProcessor] = None):
backend = NeptuneBackendMock()
exp = backend.create_run(UniqueId(str(uuid.uuid4())))
if processor is None:
processor = SyncOperationProcessor(exp.id, ContainerType.RUN, backend)
_run = Run(
id_=exp.id,
backend=backend,
op_processor=processor,
background_job=MagicMock(),
lock=threading.RLock(),
workspace=MagicMock(),
project_id=MagicMock(),
project_name=MagicMock(),
sys_id=MagicMock(),
)
_run.sync()
_run.start()
return _run
@staticmethod
def _random_path():
return ["some", "random", "path", str(uuid.uuid4())]
@staticmethod
def _random_wait():
return bool(random.getrandbits(1))
@staticmethod
def _now():
return _now
| true | true |
f73e8a73deb3d8ddce843f603c78ce40a13fdb7f | 2,124 | py | Python | mc2/permissions.py | praekeltfoundation/mc2 | 5367a8aed309fade0f17bc72efa099b0afc76aa7 | [
"BSD-2-Clause"
] | 4 | 2016-03-09T00:51:17.000Z | 2017-10-05T23:54:00.000Z | mc2/permissions.py | praekeltfoundation/mc2 | 5367a8aed309fade0f17bc72efa099b0afc76aa7 | [
"BSD-2-Clause"
] | 131 | 2015-11-19T16:45:23.000Z | 2018-07-24T09:36:08.000Z | mc2/permissions.py | praekeltfoundation/mc2 | 5367a8aed309fade0f17bc72efa099b0afc76aa7 | [
"BSD-2-Clause"
] | 2 | 2016-07-30T15:36:23.000Z | 2017-09-18T12:40:11.000Z | from urlparse import urlparse
from django.db.models import Q
from django.conf import settings
from mc2.controllers.docker.models import DockerController
def get_app_id_from_domain(domain):
index = domain.find(settings.HUB_DOMAIN)
if not index == -1:
return domain[:index - 1]
return None
def org_permissions(user, service):
if user and service:
domain = urlparse(service).hostname
app_id = get_app_id_from_domain(domain)
if app_id:
controller = DockerController.objects.filter(
Q(domain_urls__contains=domain) | Q(slug=app_id)).first()
else:
controller = DockerController.objects.filter(
domain_urls__contains=domain).first()
# super users have universal access
if user.is_superuser:
return {
'givenName': user.first_name,
'email': user.email,
'has_perm': True,
'is_admin': True,
'service_name': service,
'groups': []}
if controller:
# org admins have super user access
if controller.organization.has_admin(user) or \
controller.organization.has_app_admin(user):
return {
'givenName': user.first_name,
'email': user.email,
'has_perm': True,
'is_admin': True,
'service_name': service,
'groups': []}
# normal org users have non-super user access
if controller.organization.users.filter(id=user.id).exists():
return {
'givenName': user.first_name,
'email': user.email,
'has_perm': True,
'is_admin': False,
'service_name': service,
'groups': []}
return {
'givenName': user.first_name,
'email': user.email,
'has_perm': False,
'is_admin': False,
'service_name': service,
'groups': []}
| 32.676923 | 73 | 0.531544 | from urlparse import urlparse
from django.db.models import Q
from django.conf import settings
from mc2.controllers.docker.models import DockerController
def get_app_id_from_domain(domain):
index = domain.find(settings.HUB_DOMAIN)
if not index == -1:
return domain[:index - 1]
return None
def org_permissions(user, service):
if user and service:
domain = urlparse(service).hostname
app_id = get_app_id_from_domain(domain)
if app_id:
controller = DockerController.objects.filter(
Q(domain_urls__contains=domain) | Q(slug=app_id)).first()
else:
controller = DockerController.objects.filter(
domain_urls__contains=domain).first()
if user.is_superuser:
return {
'givenName': user.first_name,
'email': user.email,
'has_perm': True,
'is_admin': True,
'service_name': service,
'groups': []}
if controller:
if controller.organization.has_admin(user) or \
controller.organization.has_app_admin(user):
return {
'givenName': user.first_name,
'email': user.email,
'has_perm': True,
'is_admin': True,
'service_name': service,
'groups': []}
if controller.organization.users.filter(id=user.id).exists():
return {
'givenName': user.first_name,
'email': user.email,
'has_perm': True,
'is_admin': False,
'service_name': service,
'groups': []}
return {
'givenName': user.first_name,
'email': user.email,
'has_perm': False,
'is_admin': False,
'service_name': service,
'groups': []}
| true | true |
f73e8b772c941910d9c1f6d5867bc4002a5822ef | 21,453 | py | Python | scipy/integrate/_ivp/rk.py | pranavrajpal/scipy | 7dcdeffed53483a60b3e054618520e0f28adeba4 | [
"BSD-3-Clause"
] | 1 | 2021-06-11T22:09:38.000Z | 2021-06-11T22:09:38.000Z | scipy/integrate/_ivp/rk.py | pranavrajpal/scipy | 7dcdeffed53483a60b3e054618520e0f28adeba4 | [
"BSD-3-Clause"
] | 1 | 2021-04-03T20:19:36.000Z | 2021-04-03T20:19:36.000Z | scipy/integrate/_ivp/rk.py | YarivLevy81/scipy | 859c1061b3d5aa30c4466824049d69edde5499a2 | [
"BSD-3-Clause"
] | 1 | 2020-06-28T00:46:20.000Z | 2020-06-28T00:46:20.000Z | import numpy as np
from .base import OdeSolver, DenseOutput
from .common import (validate_max_step, validate_tol, select_initial_step,
norm, warn_extraneous, validate_first_step)
from . import dop853_coefficients
# Multiply steps computed from asymptotic behaviour of errors by this.
SAFETY = 0.9
MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size.
MAX_FACTOR = 10 # Maximum allowed increase in a step size.
def rk_step(fun, t, y, f, h, A, B, C, K):
"""Perform a single Runge-Kutta step.
This function computes a prediction of an explicit Runge-Kutta method and
also estimates the error of a less accurate method.
Notation for Butcher tableau is as in [1]_.
Parameters
----------
fun : callable
Right-hand side of the system.
t : float
Current time.
y : ndarray, shape (n,)
Current state.
f : ndarray, shape (n,)
Current value of the derivative, i.e., ``fun(x, y)``.
h : float
Step to use.
A : ndarray, shape (n_stages, n_stages)
Coefficients for combining previous RK stages to compute the next
stage. For explicit methods the coefficients at and above the main
diagonal are zeros.
B : ndarray, shape (n_stages,)
Coefficients for combining RK stages for computing the final
prediction.
C : ndarray, shape (n_stages,)
Coefficients for incrementing time for consecutive RK stages.
The value for the first stage is always zero.
K : ndarray, shape (n_stages + 1, n)
Storage array for putting RK stages here. Stages are stored in rows.
The last row is a linear combination of the previous rows with
coefficients
Returns
-------
y_new : ndarray, shape (n,)
Solution at t + h computed with a higher accuracy.
f_new : ndarray, shape (n,)
Derivative ``fun(t + h, y_new)``.
References
----------
.. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
Equations I: Nonstiff Problems", Sec. II.4.
"""
K[0] = f
for s, (a, c) in enumerate(zip(A[1:], C[1:]), start=1):
dy = np.dot(K[:s].T, a[:s]) * h
K[s] = fun(t + c * h, y + dy)
y_new = y + h * np.dot(K[:-1].T, B)
f_new = fun(t + h, y_new)
K[-1] = f_new
return y_new, f_new
class RungeKutta(OdeSolver):
"""Base class for explicit Runge-Kutta methods."""
C: np.ndarray = NotImplemented
A: np.ndarray = NotImplemented
B: np.ndarray = NotImplemented
E: np.ndarray = NotImplemented
P: np.ndarray = NotImplemented
order: int = NotImplemented
error_estimator_order: int = NotImplemented
n_stages: int = NotImplemented
def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
rtol=1e-3, atol=1e-6, vectorized=False,
first_step=None, **extraneous):
warn_extraneous(extraneous)
super(RungeKutta, self).__init__(fun, t0, y0, t_bound, vectorized,
support_complex=True)
self.y_old = None
self.max_step = validate_max_step(max_step)
self.rtol, self.atol = validate_tol(rtol, atol, self.n)
self.f = self.fun(self.t, self.y)
if first_step is None:
self.h_abs = select_initial_step(
self.fun, self.t, self.y, self.f, self.direction,
self.error_estimator_order, self.rtol, self.atol)
else:
self.h_abs = validate_first_step(first_step, t0, t_bound)
self.K = np.empty((self.n_stages + 1, self.n), dtype=self.y.dtype)
self.error_exponent = -1 / (self.error_estimator_order + 1)
self.h_previous = None
def _estimate_error(self, K, h):
return np.dot(K.T, self.E) * h
def _estimate_error_norm(self, K, h, scale):
return norm(self._estimate_error(K, h) / scale)
def _step_impl(self):
t = self.t
y = self.y
max_step = self.max_step
rtol = self.rtol
atol = self.atol
min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
if self.h_abs > max_step:
h_abs = max_step
elif self.h_abs < min_step:
h_abs = min_step
else:
h_abs = self.h_abs
step_accepted = False
step_rejected = False
while not step_accepted:
if h_abs < min_step:
return False, self.TOO_SMALL_STEP
h = h_abs * self.direction
t_new = t + h
if self.direction * (t_new - self.t_bound) > 0:
t_new = self.t_bound
h = t_new - t
h_abs = np.abs(h)
y_new, f_new = rk_step(self.fun, t, y, self.f, h, self.A,
self.B, self.C, self.K)
scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol
error_norm = self._estimate_error_norm(self.K, h, scale)
if error_norm < 1:
if error_norm == 0:
factor = MAX_FACTOR
else:
factor = min(MAX_FACTOR,
SAFETY * error_norm ** self.error_exponent)
if step_rejected:
factor = min(1, factor)
h_abs *= factor
step_accepted = True
else:
h_abs *= max(MIN_FACTOR,
SAFETY * error_norm ** self.error_exponent)
step_rejected = True
self.h_previous = h
self.y_old = y
self.t = t_new
self.y = y_new
self.h_abs = h_abs
self.f = f_new
return True, None
def _dense_output_impl(self):
Q = self.K.T.dot(self.P)
return RkDenseOutput(self.t_old, self.t, self.y_old, Q)
class RK23(RungeKutta):
"""Explicit Runge-Kutta method of order 3(2).
This uses the Bogacki-Shampine pair of formulas [1]_. The error is controlled
assuming accuracy of the second-order method, but steps are taken using the
third-order accurate formula (local extrapolation is done). A cubic Hermite
polynomial is used for the dense output.
Can be applied in the complex domain.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar and there are two options for ndarray ``y``.
It can either have shape (n,), then ``fun`` must return array_like with
shape (n,). Or alternatively it can have shape (n, k), then ``fun``
must return array_like with shape (n, k), i.e. each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
first_step : float or None, optional
Initial step size. Default is ``None`` which means that the algorithm
should choose.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e., the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here, `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number evaluations of the system's right-hand side.
njev : int
Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian.
nlu : int
Number of LU decompositions. Is always 0 for this solver.
References
----------
.. [1] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas",
Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
"""
order = 3
error_estimator_order = 2
n_stages = 3
C = np.array([0, 1/2, 3/4])
A = np.array([
[0, 0, 0],
[1/2, 0, 0],
[0, 3/4, 0]
])
B = np.array([2/9, 1/3, 4/9])
E = np.array([5/72, -1/12, -1/9, 1/8])
P = np.array([[1, -4 / 3, 5 / 9],
[0, 1, -2/3],
[0, 4/3, -8/9],
[0, -1, 1]])
class RK45(RungeKutta):
"""Explicit Runge-Kutta method of order 5(4).
This uses the Dormand-Prince pair of formulas [1]_. The error is controlled
assuming accuracy of the fourth-order method accuracy, but steps are taken
using the fifth-order accurate formula (local extrapolation is done).
A quartic interpolation polynomial is used for the dense output [2]_.
Can be applied in the complex domain.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
It can either have shape (n,); then ``fun`` must return array_like with
shape (n,). Alternatively it can have shape (n, k); then ``fun``
must return an array_like with shape (n, k), i.e., each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
first_step : float or None, optional
Initial step size. Default is ``None`` which means that the algorithm
should choose.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e., the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number evaluations of the system's right-hand side.
njev : int
Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian.
nlu : int
Number of LU decompositions. Is always 0 for this solver.
References
----------
.. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta
formulae", Journal of Computational and Applied Mathematics, Vol. 6,
No. 1, pp. 19-26, 1980.
.. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics
of Computation,, Vol. 46, No. 173, pp. 135-150, 1986.
"""
order = 5
error_estimator_order = 4
n_stages = 6
C = np.array([0, 1/5, 3/10, 4/5, 8/9, 1])
A = np.array([
[0, 0, 0, 0, 0],
[1/5, 0, 0, 0, 0],
[3/40, 9/40, 0, 0, 0],
[44/45, -56/15, 32/9, 0, 0],
[19372/6561, -25360/2187, 64448/6561, -212/729, 0],
[9017/3168, -355/33, 46732/5247, 49/176, -5103/18656]
])
B = np.array([35/384, 0, 500/1113, 125/192, -2187/6784, 11/84])
E = np.array([-71/57600, 0, 71/16695, -71/1920, 17253/339200, -22/525,
1/40])
# Corresponds to the optimum value of c_6 from [2]_.
P = np.array([
[1, -8048581381/2820520608, 8663915743/2820520608,
-12715105075/11282082432],
[0, 0, 0, 0],
[0, 131558114200/32700410799, -68118460800/10900136933,
87487479700/32700410799],
[0, -1754552775/470086768, 14199869525/1410260304,
-10690763975/1880347072],
[0, 127303824393/49829197408, -318862633887/49829197408,
701980252875 / 199316789632],
[0, -282668133/205662961, 2019193451/616988883, -1453857185/822651844],
[0, 40617522/29380423, -110615467/29380423, 69997945/29380423]])
class DOP853(RungeKutta):
"""Explicit Runge-Kutta method of order 8.
This is a Python implementation of "DOP853" algorithm originally written
in Fortran [1]_, [2]_. Note that this is not a literate translation, but
the algorithmic core and coefficients are the same.
Can be applied in the complex domain.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here, ``t`` is a scalar, and there are two options for the ndarray ``y``:
It can either have shape (n,); then ``fun`` must return array_like with
shape (n,). Alternatively it can have shape (n, k); then ``fun``
must return an array_like with shape (n, k), i.e. each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
first_step : float or None, optional
Initial step size. Default is ``None`` which means that the algorithm
should choose.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e. the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number evaluations of the system's right-hand side.
njev : int
Number of evaluations of the Jacobian. Is always 0 for this solver
as it does not use the Jacobian.
nlu : int
Number of LU decompositions. Is always 0 for this solver.
References
----------
.. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
Equations I: Nonstiff Problems", Sec. II.
.. [2] `Page with original Fortran code of DOP853
<http://www.unige.ch/~hairer/software.html>`_.
"""
n_stages = dop853_coefficients.N_STAGES
order = 8
error_estimator_order = 7
A = dop853_coefficients.A[:n_stages, :n_stages]
B = dop853_coefficients.B
C = dop853_coefficients.C[:n_stages]
E3 = dop853_coefficients.E3
E5 = dop853_coefficients.E5
D = dop853_coefficients.D
A_EXTRA = dop853_coefficients.A[n_stages + 1:]
C_EXTRA = dop853_coefficients.C[n_stages + 1:]
def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
rtol=1e-3, atol=1e-6, vectorized=False,
first_step=None, **extraneous):
super(DOP853, self).__init__(fun, t0, y0, t_bound, max_step,
rtol, atol, vectorized, first_step,
**extraneous)
self.K_extended = np.empty((dop853_coefficients.N_STAGES_EXTENDED,
self.n), dtype=self.y.dtype)
self.K = self.K_extended[:self.n_stages + 1]
def _estimate_error(self, K, h): # Left for testing purposes.
err5 = np.dot(K.T, self.E5)
err3 = np.dot(K.T, self.E3)
denom = np.hypot(np.abs(err5), 0.1 * np.abs(err3))
correction_factor = np.ones_like(err5)
mask = denom > 0
correction_factor[mask] = np.abs(err5[mask]) / denom[mask]
return h * err5 * correction_factor
def _estimate_error_norm(self, K, h, scale):
err5 = np.dot(K.T, self.E5) / scale
err3 = np.dot(K.T, self.E3) / scale
err5_norm_2 = np.linalg.norm(err5)**2
err3_norm_2 = np.linalg.norm(err3)**2
if err5_norm_2 == 0 and err3_norm_2 == 0:
return 0.0
denom = err5_norm_2 + 0.01 * err3_norm_2
return np.abs(h) * err5_norm_2 / np.sqrt(denom * len(scale))
def _dense_output_impl(self):
K = self.K_extended
h = self.h_previous
for s, (a, c) in enumerate(zip(self.A_EXTRA, self.C_EXTRA),
start=self.n_stages + 1):
dy = np.dot(K[:s].T, a[:s]) * h
K[s] = self.fun(self.t_old + c * h, self.y_old + dy)
F = np.empty((dop853_coefficients.INTERPOLATOR_POWER, self.n),
dtype=self.y_old.dtype)
f_old = K[0]
delta_y = self.y - self.y_old
F[0] = delta_y
F[1] = h * f_old - delta_y
F[2] = 2 * delta_y - h * (self.f + f_old)
F[3:] = h * np.dot(self.D, K)
return Dop853DenseOutput(self.t_old, self.t, self.y_old, F)
class RkDenseOutput(DenseOutput):
def __init__(self, t_old, t, y_old, Q):
super(RkDenseOutput, self).__init__(t_old, t)
self.h = t - t_old
self.Q = Q
self.order = Q.shape[1] - 1
self.y_old = y_old
def _call_impl(self, t):
x = (t - self.t_old) / self.h
if t.ndim == 0:
p = np.tile(x, self.order + 1)
p = np.cumprod(p)
else:
p = np.tile(x, (self.order + 1, 1))
p = np.cumprod(p, axis=0)
y = self.h * np.dot(self.Q, p)
if y.ndim == 2:
y += self.y_old[:, None]
else:
y += self.y_old
return y
class Dop853DenseOutput(DenseOutput):
def __init__(self, t_old, t, y_old, F):
super(Dop853DenseOutput, self).__init__(t_old, t)
self.h = t - t_old
self.F = F
self.y_old = y_old
def _call_impl(self, t):
x = (t - self.t_old) / self.h
if t.ndim == 0:
y = np.zeros_like(self.y_old)
else:
x = x[:, None]
y = np.zeros((len(x), len(self.y_old)), dtype=self.y_old.dtype)
for i, f in enumerate(reversed(self.F)):
y += f
if i % 2 == 0:
y *= x
else:
y *= 1 - x
y += self.y_old
return y.T
| 37.180243 | 107 | 0.595861 | import numpy as np
from .base import OdeSolver, DenseOutput
from .common import (validate_max_step, validate_tol, select_initial_step,
norm, warn_extraneous, validate_first_step)
from . import dop853_coefficients
SAFETY = 0.9
MIN_FACTOR = 0.2
MAX_FACTOR = 10
def rk_step(fun, t, y, f, h, A, B, C, K):
K[0] = f
for s, (a, c) in enumerate(zip(A[1:], C[1:]), start=1):
dy = np.dot(K[:s].T, a[:s]) * h
K[s] = fun(t + c * h, y + dy)
y_new = y + h * np.dot(K[:-1].T, B)
f_new = fun(t + h, y_new)
K[-1] = f_new
return y_new, f_new
class RungeKutta(OdeSolver):
C: np.ndarray = NotImplemented
A: np.ndarray = NotImplemented
B: np.ndarray = NotImplemented
E: np.ndarray = NotImplemented
P: np.ndarray = NotImplemented
order: int = NotImplemented
error_estimator_order: int = NotImplemented
n_stages: int = NotImplemented
def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
rtol=1e-3, atol=1e-6, vectorized=False,
first_step=None, **extraneous):
warn_extraneous(extraneous)
super(RungeKutta, self).__init__(fun, t0, y0, t_bound, vectorized,
support_complex=True)
self.y_old = None
self.max_step = validate_max_step(max_step)
self.rtol, self.atol = validate_tol(rtol, atol, self.n)
self.f = self.fun(self.t, self.y)
if first_step is None:
self.h_abs = select_initial_step(
self.fun, self.t, self.y, self.f, self.direction,
self.error_estimator_order, self.rtol, self.atol)
else:
self.h_abs = validate_first_step(first_step, t0, t_bound)
self.K = np.empty((self.n_stages + 1, self.n), dtype=self.y.dtype)
self.error_exponent = -1 / (self.error_estimator_order + 1)
self.h_previous = None
def _estimate_error(self, K, h):
return np.dot(K.T, self.E) * h
def _estimate_error_norm(self, K, h, scale):
return norm(self._estimate_error(K, h) / scale)
def _step_impl(self):
t = self.t
y = self.y
max_step = self.max_step
rtol = self.rtol
atol = self.atol
min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
if self.h_abs > max_step:
h_abs = max_step
elif self.h_abs < min_step:
h_abs = min_step
else:
h_abs = self.h_abs
step_accepted = False
step_rejected = False
while not step_accepted:
if h_abs < min_step:
return False, self.TOO_SMALL_STEP
h = h_abs * self.direction
t_new = t + h
if self.direction * (t_new - self.t_bound) > 0:
t_new = self.t_bound
h = t_new - t
h_abs = np.abs(h)
y_new, f_new = rk_step(self.fun, t, y, self.f, h, self.A,
self.B, self.C, self.K)
scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol
error_norm = self._estimate_error_norm(self.K, h, scale)
if error_norm < 1:
if error_norm == 0:
factor = MAX_FACTOR
else:
factor = min(MAX_FACTOR,
SAFETY * error_norm ** self.error_exponent)
if step_rejected:
factor = min(1, factor)
h_abs *= factor
step_accepted = True
else:
h_abs *= max(MIN_FACTOR,
SAFETY * error_norm ** self.error_exponent)
step_rejected = True
self.h_previous = h
self.y_old = y
self.t = t_new
self.y = y_new
self.h_abs = h_abs
self.f = f_new
return True, None
def _dense_output_impl(self):
Q = self.K.T.dot(self.P)
return RkDenseOutput(self.t_old, self.t, self.y_old, Q)
class RK23(RungeKutta):
order = 3
error_estimator_order = 2
n_stages = 3
C = np.array([0, 1/2, 3/4])
A = np.array([
[0, 0, 0],
[1/2, 0, 0],
[0, 3/4, 0]
])
B = np.array([2/9, 1/3, 4/9])
E = np.array([5/72, -1/12, -1/9, 1/8])
P = np.array([[1, -4 / 3, 5 / 9],
[0, 1, -2/3],
[0, 4/3, -8/9],
[0, -1, 1]])
class RK45(RungeKutta):
order = 5
error_estimator_order = 4
n_stages = 6
C = np.array([0, 1/5, 3/10, 4/5, 8/9, 1])
A = np.array([
[0, 0, 0, 0, 0],
[1/5, 0, 0, 0, 0],
[3/40, 9/40, 0, 0, 0],
[44/45, -56/15, 32/9, 0, 0],
[19372/6561, -25360/2187, 64448/6561, -212/729, 0],
[9017/3168, -355/33, 46732/5247, 49/176, -5103/18656]
])
B = np.array([35/384, 0, 500/1113, 125/192, -2187/6784, 11/84])
E = np.array([-71/57600, 0, 71/16695, -71/1920, 17253/339200, -22/525,
1/40])
P = np.array([
[1, -8048581381/2820520608, 8663915743/2820520608,
-12715105075/11282082432],
[0, 0, 0, 0],
[0, 131558114200/32700410799, -68118460800/10900136933,
87487479700/32700410799],
[0, -1754552775/470086768, 14199869525/1410260304,
-10690763975/1880347072],
[0, 127303824393/49829197408, -318862633887/49829197408,
701980252875 / 199316789632],
[0, -282668133/205662961, 2019193451/616988883, -1453857185/822651844],
[0, 40617522/29380423, -110615467/29380423, 69997945/29380423]])
class DOP853(RungeKutta):
n_stages = dop853_coefficients.N_STAGES
order = 8
error_estimator_order = 7
A = dop853_coefficients.A[:n_stages, :n_stages]
B = dop853_coefficients.B
C = dop853_coefficients.C[:n_stages]
E3 = dop853_coefficients.E3
E5 = dop853_coefficients.E5
D = dop853_coefficients.D
A_EXTRA = dop853_coefficients.A[n_stages + 1:]
C_EXTRA = dop853_coefficients.C[n_stages + 1:]
def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
rtol=1e-3, atol=1e-6, vectorized=False,
first_step=None, **extraneous):
super(DOP853, self).__init__(fun, t0, y0, t_bound, max_step,
rtol, atol, vectorized, first_step,
**extraneous)
self.K_extended = np.empty((dop853_coefficients.N_STAGES_EXTENDED,
self.n), dtype=self.y.dtype)
self.K = self.K_extended[:self.n_stages + 1]
def _estimate_error(self, K, h):
err5 = np.dot(K.T, self.E5)
err3 = np.dot(K.T, self.E3)
denom = np.hypot(np.abs(err5), 0.1 * np.abs(err3))
correction_factor = np.ones_like(err5)
mask = denom > 0
correction_factor[mask] = np.abs(err5[mask]) / denom[mask]
return h * err5 * correction_factor
def _estimate_error_norm(self, K, h, scale):
err5 = np.dot(K.T, self.E5) / scale
err3 = np.dot(K.T, self.E3) / scale
err5_norm_2 = np.linalg.norm(err5)**2
err3_norm_2 = np.linalg.norm(err3)**2
if err5_norm_2 == 0 and err3_norm_2 == 0:
return 0.0
denom = err5_norm_2 + 0.01 * err3_norm_2
return np.abs(h) * err5_norm_2 / np.sqrt(denom * len(scale))
def _dense_output_impl(self):
K = self.K_extended
h = self.h_previous
for s, (a, c) in enumerate(zip(self.A_EXTRA, self.C_EXTRA),
start=self.n_stages + 1):
dy = np.dot(K[:s].T, a[:s]) * h
K[s] = self.fun(self.t_old + c * h, self.y_old + dy)
F = np.empty((dop853_coefficients.INTERPOLATOR_POWER, self.n),
dtype=self.y_old.dtype)
f_old = K[0]
delta_y = self.y - self.y_old
F[0] = delta_y
F[1] = h * f_old - delta_y
F[2] = 2 * delta_y - h * (self.f + f_old)
F[3:] = h * np.dot(self.D, K)
return Dop853DenseOutput(self.t_old, self.t, self.y_old, F)
class RkDenseOutput(DenseOutput):
def __init__(self, t_old, t, y_old, Q):
super(RkDenseOutput, self).__init__(t_old, t)
self.h = t - t_old
self.Q = Q
self.order = Q.shape[1] - 1
self.y_old = y_old
def _call_impl(self, t):
x = (t - self.t_old) / self.h
if t.ndim == 0:
p = np.tile(x, self.order + 1)
p = np.cumprod(p)
else:
p = np.tile(x, (self.order + 1, 1))
p = np.cumprod(p, axis=0)
y = self.h * np.dot(self.Q, p)
if y.ndim == 2:
y += self.y_old[:, None]
else:
y += self.y_old
return y
class Dop853DenseOutput(DenseOutput):
def __init__(self, t_old, t, y_old, F):
super(Dop853DenseOutput, self).__init__(t_old, t)
self.h = t - t_old
self.F = F
self.y_old = y_old
def _call_impl(self, t):
x = (t - self.t_old) / self.h
if t.ndim == 0:
y = np.zeros_like(self.y_old)
else:
x = x[:, None]
y = np.zeros((len(x), len(self.y_old)), dtype=self.y_old.dtype)
for i, f in enumerate(reversed(self.F)):
y += f
if i % 2 == 0:
y *= x
else:
y *= 1 - x
y += self.y_old
return y.T
| true | true |
f73e8c70efe2c1e679832da03af75ed96eba0cb0 | 8,692 | py | Python | sdk/python/feast/entity.py | Mwad22/feast | 6a09d49e2e7bc105c86f1789c765d89e452af0b0 | [
"Apache-2.0"
] | null | null | null | sdk/python/feast/entity.py | Mwad22/feast | 6a09d49e2e7bc105c86f1789c765d89e452af0b0 | [
"Apache-2.0"
] | null | null | null | sdk/python/feast/entity.py | Mwad22/feast | 6a09d49e2e7bc105c86f1789c765d89e452af0b0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, MutableMapping, Optional
import yaml
from google.protobuf import json_format
from google.protobuf.json_format import MessageToDict, MessageToJson
from google.protobuf.timestamp_pb2 import Timestamp
from feast.loaders import yaml as feast_yaml
from feast.protos.feast.core.Entity_pb2 import Entity as EntityV2Proto
from feast.protos.feast.core.Entity_pb2 import EntityMeta as EntityMetaProto
from feast.protos.feast.core.Entity_pb2 import EntitySpecV2 as EntitySpecProto
from feast.usage import log_exceptions
from feast.value_type import ValueType
class Entity:
"""
Represents a collection of entities and associated metadata.
"""
@log_exceptions
def __init__(
self,
name: str,
value_type: ValueType = ValueType.UNKNOWN,
description: str = "",
join_key: Optional[str] = None,
labels: Optional[MutableMapping[str, str]] = None,
):
self._name = name
self._description = description
self._value_type = value_type
if join_key:
self._join_key = join_key
else:
self._join_key = name
if labels is None:
self._labels = dict() # type: MutableMapping[str, str]
else:
self._labels = labels
self._created_timestamp: Optional[Timestamp] = None
self._last_updated_timestamp: Optional[Timestamp] = None
def __eq__(self, other):
if not isinstance(other, Entity):
raise TypeError("Comparisons should only involve Entity class objects.")
if (
self.labels != other.labels
or self.name != other.name
or self.description != other.description
or self.value_type != other.value_type
or self.join_key != other.join_key
):
return False
return True
def __str__(self):
return str(MessageToJson(self.to_proto()))
@property
def name(self):
"""
Returns the name of this entity
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this entity
"""
self._name = name
@property
def description(self):
"""
Returns the description of this entity
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this entity
"""
self._description = description
@property
def join_key(self):
"""
Returns the join key of this entity
"""
return self._join_key
@join_key.setter
def join_key(self, join_key):
"""
Sets the join key of this entity
"""
self._join_key = join_key
@property
def value_type(self) -> ValueType:
"""
Returns the type of this entity
"""
return self._value_type
@value_type.setter
def value_type(self, value_type: ValueType):
"""
Set the type for this entity
"""
self._value_type = value_type
@property
def labels(self):
"""
Returns the labels of this entity. This is the user defined metadata
defined as a dictionary.
"""
return self._labels
@labels.setter
def labels(self, labels: MutableMapping[str, str]):
"""
Set the labels for this entity
"""
self._labels = labels
@property
def created_timestamp(self):
"""
Returns the created_timestamp of this entity
"""
return self._created_timestamp
@property
def last_updated_timestamp(self):
"""
Returns the last_updated_timestamp of this entity
"""
return self._last_updated_timestamp
def is_valid(self):
"""
Validates the state of a entity locally. Raises an exception
if entity is invalid.
"""
if not self.name:
raise ValueError("No name found in entity.")
if not self.value_type:
raise ValueError("No type found in entity {self.value_type}")
@classmethod
def from_yaml(cls, yml: str):
"""
Creates an entity from a YAML string body or a file path
Args:
yml: Either a file path containing a yaml file or a YAML string
Returns:
Returns a EntityV2 object based on the YAML file
"""
return cls.from_dict(feast_yaml.yaml_loader(yml, load_single=True))
@classmethod
def from_dict(cls, entity_dict):
"""
Creates an entity from a dict
Args:
entity_dict: A dict representation of an entity
Returns:
Returns a EntityV2 object based on the entity dict
"""
entity_proto = json_format.ParseDict(
entity_dict, EntityV2Proto(), ignore_unknown_fields=True
)
return cls.from_proto(entity_proto)
@classmethod
def from_proto(cls, entity_proto: EntityV2Proto):
"""
Creates an entity from a protobuf representation of an entity
Args:
entity_proto: A protobuf representation of an entity
Returns:
Returns a EntityV2 object based on the entity protobuf
"""
entity = cls(
name=entity_proto.spec.name,
description=entity_proto.spec.description,
value_type=ValueType(entity_proto.spec.value_type),
labels=entity_proto.spec.labels,
join_key=entity_proto.spec.join_key,
)
entity._created_timestamp = entity_proto.meta.created_timestamp
entity._last_updated_timestamp = entity_proto.meta.last_updated_timestamp
return entity
def to_proto(self) -> EntityV2Proto:
"""
Converts an entity object to its protobuf representation
Returns:
EntityV2Proto protobuf
"""
meta = EntityMetaProto(
created_timestamp=self.created_timestamp,
last_updated_timestamp=self.last_updated_timestamp,
)
spec = EntitySpecProto(
name=self.name,
description=self.description,
value_type=self.value_type.value,
labels=self.labels,
join_key=self.join_key,
)
return EntityV2Proto(spec=spec, meta=meta)
def to_dict(self) -> Dict:
"""
Converts entity to dict
Returns:
Dictionary object representation of entity
"""
entity_dict = MessageToDict(self.to_proto())
# Remove meta when empty for more readable exports
if entity_dict["meta"] == {}:
del entity_dict["meta"]
return entity_dict
def to_yaml(self):
"""
Converts a entity to a YAML string.
Returns:
Entity string returned in YAML format
"""
entity_dict = self.to_dict()
return yaml.dump(entity_dict, allow_unicode=True, sort_keys=False)
def to_spec_proto(self) -> EntitySpecProto:
"""
Converts an EntityV2 object to its protobuf representation.
Used when passing EntitySpecV2 object to Feast request.
Returns:
EntitySpecV2 protobuf
"""
spec = EntitySpecProto(
name=self.name,
description=self.description,
value_type=self.value_type.value,
labels=self.labels,
join_key=self.join_key,
)
return spec
def _update_from_entity(self, entity):
"""
Deep replaces one entity with another
Args:
entity: Entity to use as a source of configuration
"""
self.name = entity.name
self.description = entity.description
self.value_type = entity.value_type
self.labels = entity.labels
self.join_key = entity.join_key
self._created_timestamp = entity.created_timestamp
self._last_updated_timestamp = entity.last_updated_timestamp
| 27.681529 | 84 | 0.61919 |
from typing import Dict, MutableMapping, Optional
import yaml
from google.protobuf import json_format
from google.protobuf.json_format import MessageToDict, MessageToJson
from google.protobuf.timestamp_pb2 import Timestamp
from feast.loaders import yaml as feast_yaml
from feast.protos.feast.core.Entity_pb2 import Entity as EntityV2Proto
from feast.protos.feast.core.Entity_pb2 import EntityMeta as EntityMetaProto
from feast.protos.feast.core.Entity_pb2 import EntitySpecV2 as EntitySpecProto
from feast.usage import log_exceptions
from feast.value_type import ValueType
class Entity:
@log_exceptions
def __init__(
self,
name: str,
value_type: ValueType = ValueType.UNKNOWN,
description: str = "",
join_key: Optional[str] = None,
labels: Optional[MutableMapping[str, str]] = None,
):
self._name = name
self._description = description
self._value_type = value_type
if join_key:
self._join_key = join_key
else:
self._join_key = name
if labels is None:
self._labels = dict()
else:
self._labels = labels
self._created_timestamp: Optional[Timestamp] = None
self._last_updated_timestamp: Optional[Timestamp] = None
def __eq__(self, other):
if not isinstance(other, Entity):
raise TypeError("Comparisons should only involve Entity class objects.")
if (
self.labels != other.labels
or self.name != other.name
or self.description != other.description
or self.value_type != other.value_type
or self.join_key != other.join_key
):
return False
return True
def __str__(self):
return str(MessageToJson(self.to_proto()))
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def description(self):
return self._description
@description.setter
def description(self, description):
self._description = description
@property
def join_key(self):
return self._join_key
@join_key.setter
def join_key(self, join_key):
self._join_key = join_key
@property
def value_type(self) -> ValueType:
return self._value_type
@value_type.setter
def value_type(self, value_type: ValueType):
self._value_type = value_type
@property
def labels(self):
return self._labels
@labels.setter
def labels(self, labels: MutableMapping[str, str]):
self._labels = labels
@property
def created_timestamp(self):
return self._created_timestamp
@property
def last_updated_timestamp(self):
return self._last_updated_timestamp
def is_valid(self):
if not self.name:
raise ValueError("No name found in entity.")
if not self.value_type:
raise ValueError("No type found in entity {self.value_type}")
@classmethod
def from_yaml(cls, yml: str):
return cls.from_dict(feast_yaml.yaml_loader(yml, load_single=True))
@classmethod
def from_dict(cls, entity_dict):
entity_proto = json_format.ParseDict(
entity_dict, EntityV2Proto(), ignore_unknown_fields=True
)
return cls.from_proto(entity_proto)
@classmethod
def from_proto(cls, entity_proto: EntityV2Proto):
entity = cls(
name=entity_proto.spec.name,
description=entity_proto.spec.description,
value_type=ValueType(entity_proto.spec.value_type),
labels=entity_proto.spec.labels,
join_key=entity_proto.spec.join_key,
)
entity._created_timestamp = entity_proto.meta.created_timestamp
entity._last_updated_timestamp = entity_proto.meta.last_updated_timestamp
return entity
def to_proto(self) -> EntityV2Proto:
meta = EntityMetaProto(
created_timestamp=self.created_timestamp,
last_updated_timestamp=self.last_updated_timestamp,
)
spec = EntitySpecProto(
name=self.name,
description=self.description,
value_type=self.value_type.value,
labels=self.labels,
join_key=self.join_key,
)
return EntityV2Proto(spec=spec, meta=meta)
def to_dict(self) -> Dict:
entity_dict = MessageToDict(self.to_proto())
if entity_dict["meta"] == {}:
del entity_dict["meta"]
return entity_dict
def to_yaml(self):
entity_dict = self.to_dict()
return yaml.dump(entity_dict, allow_unicode=True, sort_keys=False)
def to_spec_proto(self) -> EntitySpecProto:
spec = EntitySpecProto(
name=self.name,
description=self.description,
value_type=self.value_type.value,
labels=self.labels,
join_key=self.join_key,
)
return spec
def _update_from_entity(self, entity):
self.name = entity.name
self.description = entity.description
self.value_type = entity.value_type
self.labels = entity.labels
self.join_key = entity.join_key
self._created_timestamp = entity.created_timestamp
self._last_updated_timestamp = entity.last_updated_timestamp
| true | true |
f73e8cf1b72008c3a80bff339a8965c686f16715 | 14,933 | py | Python | hail/python/hail/ir/matrix_reader.py | iitalics/hail | eebdd30a6a1e0c04d6291abb4b8c834da590fc45 | [
"MIT"
] | 1 | 2022-01-03T13:46:08.000Z | 2022-01-03T13:46:08.000Z | hail/python/hail/ir/matrix_reader.py | iitalics/hail | eebdd30a6a1e0c04d6291abb4b8c834da590fc45 | [
"MIT"
] | 2 | 2016-08-12T18:38:24.000Z | 2018-09-05T15:26:35.000Z | hail/python/hail/ir/matrix_reader.py | iitalics/hail | eebdd30a6a1e0c04d6291abb4b8c834da590fc45 | [
"MIT"
] | null | null | null | import abc
import json
import hail as hl
from .utils import make_filter_and_replace
from ..expr.types import tfloat32, tfloat64, hail_type, tint32, tint64, tstr
from ..genetics.reference_genome import reference_genome_type
from ..typecheck import *
from ..utils import wrap_to_list
from ..utils.misc import escape_str
class MatrixReader(object):
@abc.abstractmethod
def render(self, r):
pass
@abc.abstractmethod
def __eq__(self, other):
pass
class MatrixNativeReader(MatrixReader):
@typecheck_method(path=str,
intervals=nullable(sequenceof(anytype)),
filter_intervals=bool)
def __init__(self, path, intervals, filter_intervals):
if intervals is not None:
t = hl.expr.impute_type(intervals)
if not isinstance(t, hl.tarray) and not isinstance(t.element_type, hl.tinterval):
raise TypeError("'intervals' must be an array of tintervals")
pt = t.element_type.point_type
if isinstance(pt, hl.tstruct):
self._interval_type = t
else:
self._interval_type = hl.tarray(hl.tinterval(hl.tstruct(__point=pt)))
self.path = path
self.filter_intervals = filter_intervals
if intervals is not None and t != self._interval_type:
self.intervals = [hl.Interval(hl.Struct(__point=i.start),
hl.Struct(__point=i.end),
i.includes_start,
i.includes_end) for i in intervals]
else:
self.intervals = intervals
def render(self, r):
reader = {'name': 'MatrixNativeReader',
'path': self.path}
if self.intervals is not None:
assert self._interval_type is not None
reader['options'] = {
'name': 'NativeReaderOptions',
'intervals': self._interval_type._convert_to_json(self.intervals),
'intervalPointType': self._interval_type.element_type.point_type._parsable_string(),
'filterIntervals': self.filter_intervals,
}
return escape_str(json.dumps(reader))
def __eq__(self, other):
return isinstance(other, MatrixNativeReader) and \
other.path == self.path and \
other.intervals == self.intervals and \
other.filter_intervals == self.filter_intervals
class MatrixRangeReader(MatrixReader):
@typecheck_method(n_rows=int,
n_cols=int,
n_partitions=nullable(int))
def __init__(self, n_rows, n_cols, n_partitions):
self.n_rows = n_rows
self.n_cols = n_cols
self.n_partitions = n_partitions
def render(self, r):
reader = {'name': 'MatrixRangeReader',
'nRows': self.n_rows,
'nCols': self.n_cols,
'nPartitions': self.n_partitions}
return escape_str(json.dumps(reader))
def __eq__(self, other):
return isinstance(other, MatrixRangeReader) and \
other.n_rows == self.n_rows and \
other.n_cols == self.n_cols and \
other.n_partitions == self.n_partitions
class MatrixVCFReader(MatrixReader):
@typecheck_method(path=oneof(str, sequenceof(str)),
call_fields=oneof(str, sequenceof(str)),
entry_float_type=enumeration(tfloat32, tfloat64),
header_file=nullable(str),
min_partitions=nullable(int),
reference_genome=nullable(reference_genome_type),
contig_recoding=nullable(dictof(str, str)),
array_elements_required=bool,
skip_invalid_loci=bool,
force_bgz=bool,
force_gz=bool,
filter=nullable(str),
find_replace=nullable(sized_tupleof(str, str)),
_partitions_json=nullable(str))
def __init__(self,
path,
call_fields,
entry_float_type,
header_file,
min_partitions,
reference_genome,
contig_recoding,
array_elements_required,
skip_invalid_loci,
force_bgz,
force_gz,
filter,
find_replace,
_partitions_json):
self.path = wrap_to_list(path)
self.header_file = header_file
self.min_partitions = min_partitions
self.call_fields = wrap_to_list(call_fields)
self.entry_float_type = entry_float_type._parsable_string()
self.reference_genome = reference_genome
self.contig_recoding = contig_recoding
self.array_elements_required = array_elements_required
self.skip_invalid_loci = skip_invalid_loci
self.force_gz = force_gz
self.force_bgz = force_bgz
self.filter = filter
self.find_replace = find_replace
self._partitions_json = _partitions_json
def render(self, r):
reader = {'name': 'MatrixVCFReader',
'files': self.path,
'callFields': self.call_fields,
'entryFloatTypeName': self.entry_float_type,
'headerFile': self.header_file,
'minPartitions': self.min_partitions,
'rg': self.reference_genome.name if self.reference_genome else None,
'contigRecoding': self.contig_recoding if self.contig_recoding else {},
'arrayElementsRequired': self.array_elements_required,
'skipInvalidLoci': self.skip_invalid_loci,
'gzAsBGZ': self.force_bgz,
'forceGZ': self.force_gz,
'filterAndReplace': make_filter_and_replace(self.filter, self.find_replace),
'partitionsJSON': self._partitions_json}
return escape_str(json.dumps(reader))
def __eq__(self, other):
return isinstance(other, MatrixVCFReader) and \
other.path == self.path and \
other.call_fields == self.call_fields and \
other.entry_float_type == self.entry_float_type and \
other.header_file == self.header_file and \
other.min_partitions == self.min_partitions and \
other.reference_genome == self.reference_genome and \
other.contig_recoding == self.contig_recoding and \
other.array_elements_required == self.array_elements_required and \
other.skip_invalid_loci == self.skip_invalid_loci and \
other.force_bgz == self.force_bgz and \
other.force_gz == self.force_gz and \
other.filter == self.filter and \
other.find_replace == self.find_replace and \
other._partitions_json == self._partitions_json
class MatrixBGENReader(MatrixReader):
@typecheck_method(path=oneof(str, sequenceof(str)),
sample_file=nullable(str),
index_file_map=nullable(dictof(str, str)),
n_partitions=nullable(int),
block_size=nullable(int),
included_variants=nullable(anytype))
def __init__(self, path, sample_file, index_file_map, n_partitions, block_size, included_variants):
self.path = wrap_to_list(path)
self.sample_file = sample_file
self.index_file_map = index_file_map if index_file_map else {}
self.n_partitions = n_partitions
self.block_size = block_size
from hail.table import Table
if included_variants is not None:
assert (isinstance(included_variants, Table))
self.included_variants = included_variants
def render(self, r):
reader = {'name': 'MatrixBGENReader',
'files': self.path,
'sampleFile': self.sample_file,
'indexFileMap': self.index_file_map,
'nPartitions': self.n_partitions,
'blockSizeInMB': self.block_size,
# FIXME: This has to be wrong. The included_variants IR is not included as a child
'includedVariants': r(self.included_variants._tir) if self.included_variants else None
}
return escape_str(json.dumps(reader))
def __eq__(self, other):
return isinstance(other, MatrixBGENReader) and \
other.path == self.path and \
other.sample_file == self.sample_file and \
other.index_file_map == self.index_file_map and \
other.block_size == self.block_size and \
other.included_variants == self.included_variants
class TextMatrixReader(MatrixReader):
@typecheck_method(paths=oneof(str, sequenceof(str)),
n_partitions=nullable(int),
row_fields=dictof(str, hail_type),
entry_type=enumeration(tint32, tint64, tfloat32, tfloat64, tstr),
missing_value=str,
has_header=bool,
separator=str,
gzip_as_bgzip=bool,
add_row_id=bool,
comment=sequenceof(str))
def __init__(self,
paths,
n_partitions,
row_fields,
entry_type,
missing_value,
has_header,
separator,
gzip_as_bgzip,
add_row_id,
comment):
self.paths = wrap_to_list(paths)
self.n_partitions = n_partitions
self.row_fields = row_fields
self.entry_type = entry_type
self.missing_value = missing_value
self.has_header = has_header
self.separator = separator
self.gzip_as_bgzip = gzip_as_bgzip
self.add_row_id = add_row_id
self.comment = comment
def render(self, r):
reader = {'name': 'TextMatrixReader',
'paths': self.paths,
'nPartitions': self.n_partitions,
'rowFieldsStr': {k: v._parsable_string()
for k, v in self.row_fields.items()},
'entryTypeStr': self.entry_type._parsable_string(),
'missingValue': self.missing_value,
'hasHeader': self.has_header,
'separatorStr': self.separator,
'gzipAsBGZip': self.gzip_as_bgzip,
'addRowId': self.add_row_id,
'comment': self.comment}
return escape_str(json.dumps(reader))
def __eq__(self, other):
return isinstance(other, TextMatrixReader) and \
self.paths == other.paths and \
self.n_partitions == other.n_partitions and \
self.row_fields == other.row_fields and \
self.entry_type == other.entry_type and \
self.missing_value == other.missing_value and \
self.has_header == other.has_header and \
self.separator == other.separator and \
self.gzip_as_bgzip == other.gzip_as_bgzip and \
self.add_row_id == other.add_row_id and \
self.comment == other.comment
class MatrixPLINKReader(MatrixReader):
@typecheck_method(bed=str, bim=str, fam=str, min_partitions=nullable(int),
missing=str, delimiter=str, quant_pheno=bool,
a2_reference=bool, reference_genome=nullable(reference_genome_type),
contig_recoding=nullable(dictof(str, str)), skip_invalid_loci=bool)
def __init__(self, bed, bim, fam, min_partitions, missing, delimiter, quant_pheno, a2_reference,
reference_genome, contig_recoding, skip_invalid_loci):
self.bed = bed
self.bim = bim
self.fam = fam
self.min_partitions = min_partitions
self.missing = missing
self.delimiter = delimiter
self.quant_pheno = quant_pheno
self.a2_reference = a2_reference
self.reference_genome = reference_genome
self.contig_recoding = contig_recoding
self.skip_invalid_loci = skip_invalid_loci
def render(self, r):
reader = {'name': 'MatrixPLINKReader',
'bed': self.bed,
'bim': self.bim,
'fam': self.fam,
'nPartitions': self.min_partitions,
'missing': self.missing,
'delimiter': self.delimiter,
'quantPheno': self.quant_pheno,
'a2Reference': self.a2_reference,
'rg': self.reference_genome.name if self.reference_genome else None,
'contigRecoding': self.contig_recoding if self.contig_recoding else {},
'skipInvalidLoci': self.skip_invalid_loci}
return escape_str(json.dumps(reader))
def __eq__(self, other):
return isinstance(other, MatrixPLINKReader) and \
other.bed == self.bed and \
other.bim == self.bim and \
other.fam == self.fam and \
other.min_partitions == self.min_partitions and \
other.missing == self.missing and \
other.delimiter == self.delimiter and \
other.quant_pheno == self.quant_pheno and \
other.a2_reference == self.a2_reference and \
other.reference_genome == self.reference_genome and \
other.contig_recoding == self.contig_recoding and \
other.skip_invalid_loci == self.skip_invalid_loci
class MatrixGENReader(MatrixReader):
@typecheck_method(files=sequenceof(str), sample_file=str, chromosome=nullable(str),
min_partitions=nullable(int), tolerance=float, rg=nullable(str),
contig_recoding=dictof(str, str), skip_invalid_loci=bool)
def __init__(self, files, sample_file, chromosome, min_partitions, tolerance,
rg, contig_recoding, skip_invalid_loci):
self.config = {
'name': 'MatrixGENReader',
'files': files,
'sampleFile': sample_file,
'chromosome': chromosome,
'nPartitions': min_partitions,
'tolerance': tolerance,
'rg': rg,
'contigRecoding': contig_recoding if contig_recoding else {},
'skipInvalidLoci': skip_invalid_loci
}
def render(self, r):
return escape_str(json.dumps(self.config))
def __eq__(self, other):
return isinstance(other, MatrixGENReader) and \
self.config == other.config
| 43.034582 | 104 | 0.583205 | import abc
import json
import hail as hl
from .utils import make_filter_and_replace
from ..expr.types import tfloat32, tfloat64, hail_type, tint32, tint64, tstr
from ..genetics.reference_genome import reference_genome_type
from ..typecheck import *
from ..utils import wrap_to_list
from ..utils.misc import escape_str
class MatrixReader(object):
@abc.abstractmethod
def render(self, r):
pass
@abc.abstractmethod
def __eq__(self, other):
pass
class MatrixNativeReader(MatrixReader):
@typecheck_method(path=str,
intervals=nullable(sequenceof(anytype)),
filter_intervals=bool)
def __init__(self, path, intervals, filter_intervals):
if intervals is not None:
t = hl.expr.impute_type(intervals)
if not isinstance(t, hl.tarray) and not isinstance(t.element_type, hl.tinterval):
raise TypeError("'intervals' must be an array of tintervals")
pt = t.element_type.point_type
if isinstance(pt, hl.tstruct):
self._interval_type = t
else:
self._interval_type = hl.tarray(hl.tinterval(hl.tstruct(__point=pt)))
self.path = path
self.filter_intervals = filter_intervals
if intervals is not None and t != self._interval_type:
self.intervals = [hl.Interval(hl.Struct(__point=i.start),
hl.Struct(__point=i.end),
i.includes_start,
i.includes_end) for i in intervals]
else:
self.intervals = intervals
def render(self, r):
reader = {'name': 'MatrixNativeReader',
'path': self.path}
if self.intervals is not None:
assert self._interval_type is not None
reader['options'] = {
'name': 'NativeReaderOptions',
'intervals': self._interval_type._convert_to_json(self.intervals),
'intervalPointType': self._interval_type.element_type.point_type._parsable_string(),
'filterIntervals': self.filter_intervals,
}
return escape_str(json.dumps(reader))
def __eq__(self, other):
return isinstance(other, MatrixNativeReader) and \
other.path == self.path and \
other.intervals == self.intervals and \
other.filter_intervals == self.filter_intervals
class MatrixRangeReader(MatrixReader):
@typecheck_method(n_rows=int,
n_cols=int,
n_partitions=nullable(int))
def __init__(self, n_rows, n_cols, n_partitions):
self.n_rows = n_rows
self.n_cols = n_cols
self.n_partitions = n_partitions
def render(self, r):
reader = {'name': 'MatrixRangeReader',
'nRows': self.n_rows,
'nCols': self.n_cols,
'nPartitions': self.n_partitions}
return escape_str(json.dumps(reader))
def __eq__(self, other):
return isinstance(other, MatrixRangeReader) and \
other.n_rows == self.n_rows and \
other.n_cols == self.n_cols and \
other.n_partitions == self.n_partitions
class MatrixVCFReader(MatrixReader):
@typecheck_method(path=oneof(str, sequenceof(str)),
call_fields=oneof(str, sequenceof(str)),
entry_float_type=enumeration(tfloat32, tfloat64),
header_file=nullable(str),
min_partitions=nullable(int),
reference_genome=nullable(reference_genome_type),
contig_recoding=nullable(dictof(str, str)),
array_elements_required=bool,
skip_invalid_loci=bool,
force_bgz=bool,
force_gz=bool,
filter=nullable(str),
find_replace=nullable(sized_tupleof(str, str)),
_partitions_json=nullable(str))
def __init__(self,
path,
call_fields,
entry_float_type,
header_file,
min_partitions,
reference_genome,
contig_recoding,
array_elements_required,
skip_invalid_loci,
force_bgz,
force_gz,
filter,
find_replace,
_partitions_json):
self.path = wrap_to_list(path)
self.header_file = header_file
self.min_partitions = min_partitions
self.call_fields = wrap_to_list(call_fields)
self.entry_float_type = entry_float_type._parsable_string()
self.reference_genome = reference_genome
self.contig_recoding = contig_recoding
self.array_elements_required = array_elements_required
self.skip_invalid_loci = skip_invalid_loci
self.force_gz = force_gz
self.force_bgz = force_bgz
self.filter = filter
self.find_replace = find_replace
self._partitions_json = _partitions_json
def render(self, r):
reader = {'name': 'MatrixVCFReader',
'files': self.path,
'callFields': self.call_fields,
'entryFloatTypeName': self.entry_float_type,
'headerFile': self.header_file,
'minPartitions': self.min_partitions,
'rg': self.reference_genome.name if self.reference_genome else None,
'contigRecoding': self.contig_recoding if self.contig_recoding else {},
'arrayElementsRequired': self.array_elements_required,
'skipInvalidLoci': self.skip_invalid_loci,
'gzAsBGZ': self.force_bgz,
'forceGZ': self.force_gz,
'filterAndReplace': make_filter_and_replace(self.filter, self.find_replace),
'partitionsJSON': self._partitions_json}
return escape_str(json.dumps(reader))
def __eq__(self, other):
return isinstance(other, MatrixVCFReader) and \
other.path == self.path and \
other.call_fields == self.call_fields and \
other.entry_float_type == self.entry_float_type and \
other.header_file == self.header_file and \
other.min_partitions == self.min_partitions and \
other.reference_genome == self.reference_genome and \
other.contig_recoding == self.contig_recoding and \
other.array_elements_required == self.array_elements_required and \
other.skip_invalid_loci == self.skip_invalid_loci and \
other.force_bgz == self.force_bgz and \
other.force_gz == self.force_gz and \
other.filter == self.filter and \
other.find_replace == self.find_replace and \
other._partitions_json == self._partitions_json
class MatrixBGENReader(MatrixReader):
@typecheck_method(path=oneof(str, sequenceof(str)),
sample_file=nullable(str),
index_file_map=nullable(dictof(str, str)),
n_partitions=nullable(int),
block_size=nullable(int),
included_variants=nullable(anytype))
def __init__(self, path, sample_file, index_file_map, n_partitions, block_size, included_variants):
self.path = wrap_to_list(path)
self.sample_file = sample_file
self.index_file_map = index_file_map if index_file_map else {}
self.n_partitions = n_partitions
self.block_size = block_size
from hail.table import Table
if included_variants is not None:
assert (isinstance(included_variants, Table))
self.included_variants = included_variants
def render(self, r):
reader = {'name': 'MatrixBGENReader',
'files': self.path,
'sampleFile': self.sample_file,
'indexFileMap': self.index_file_map,
'nPartitions': self.n_partitions,
'blockSizeInMB': self.block_size,
'includedVariants': r(self.included_variants._tir) if self.included_variants else None
}
return escape_str(json.dumps(reader))
def __eq__(self, other):
return isinstance(other, MatrixBGENReader) and \
other.path == self.path and \
other.sample_file == self.sample_file and \
other.index_file_map == self.index_file_map and \
other.block_size == self.block_size and \
other.included_variants == self.included_variants
class TextMatrixReader(MatrixReader):
@typecheck_method(paths=oneof(str, sequenceof(str)),
n_partitions=nullable(int),
row_fields=dictof(str, hail_type),
entry_type=enumeration(tint32, tint64, tfloat32, tfloat64, tstr),
missing_value=str,
has_header=bool,
separator=str,
gzip_as_bgzip=bool,
add_row_id=bool,
comment=sequenceof(str))
def __init__(self,
paths,
n_partitions,
row_fields,
entry_type,
missing_value,
has_header,
separator,
gzip_as_bgzip,
add_row_id,
comment):
self.paths = wrap_to_list(paths)
self.n_partitions = n_partitions
self.row_fields = row_fields
self.entry_type = entry_type
self.missing_value = missing_value
self.has_header = has_header
self.separator = separator
self.gzip_as_bgzip = gzip_as_bgzip
self.add_row_id = add_row_id
self.comment = comment
def render(self, r):
reader = {'name': 'TextMatrixReader',
'paths': self.paths,
'nPartitions': self.n_partitions,
'rowFieldsStr': {k: v._parsable_string()
for k, v in self.row_fields.items()},
'entryTypeStr': self.entry_type._parsable_string(),
'missingValue': self.missing_value,
'hasHeader': self.has_header,
'separatorStr': self.separator,
'gzipAsBGZip': self.gzip_as_bgzip,
'addRowId': self.add_row_id,
'comment': self.comment}
return escape_str(json.dumps(reader))
def __eq__(self, other):
return isinstance(other, TextMatrixReader) and \
self.paths == other.paths and \
self.n_partitions == other.n_partitions and \
self.row_fields == other.row_fields and \
self.entry_type == other.entry_type and \
self.missing_value == other.missing_value and \
self.has_header == other.has_header and \
self.separator == other.separator and \
self.gzip_as_bgzip == other.gzip_as_bgzip and \
self.add_row_id == other.add_row_id and \
self.comment == other.comment
class MatrixPLINKReader(MatrixReader):
@typecheck_method(bed=str, bim=str, fam=str, min_partitions=nullable(int),
missing=str, delimiter=str, quant_pheno=bool,
a2_reference=bool, reference_genome=nullable(reference_genome_type),
contig_recoding=nullable(dictof(str, str)), skip_invalid_loci=bool)
def __init__(self, bed, bim, fam, min_partitions, missing, delimiter, quant_pheno, a2_reference,
reference_genome, contig_recoding, skip_invalid_loci):
self.bed = bed
self.bim = bim
self.fam = fam
self.min_partitions = min_partitions
self.missing = missing
self.delimiter = delimiter
self.quant_pheno = quant_pheno
self.a2_reference = a2_reference
self.reference_genome = reference_genome
self.contig_recoding = contig_recoding
self.skip_invalid_loci = skip_invalid_loci
def render(self, r):
reader = {'name': 'MatrixPLINKReader',
'bed': self.bed,
'bim': self.bim,
'fam': self.fam,
'nPartitions': self.min_partitions,
'missing': self.missing,
'delimiter': self.delimiter,
'quantPheno': self.quant_pheno,
'a2Reference': self.a2_reference,
'rg': self.reference_genome.name if self.reference_genome else None,
'contigRecoding': self.contig_recoding if self.contig_recoding else {},
'skipInvalidLoci': self.skip_invalid_loci}
return escape_str(json.dumps(reader))
def __eq__(self, other):
return isinstance(other, MatrixPLINKReader) and \
other.bed == self.bed and \
other.bim == self.bim and \
other.fam == self.fam and \
other.min_partitions == self.min_partitions and \
other.missing == self.missing and \
other.delimiter == self.delimiter and \
other.quant_pheno == self.quant_pheno and \
other.a2_reference == self.a2_reference and \
other.reference_genome == self.reference_genome and \
other.contig_recoding == self.contig_recoding and \
other.skip_invalid_loci == self.skip_invalid_loci
class MatrixGENReader(MatrixReader):
@typecheck_method(files=sequenceof(str), sample_file=str, chromosome=nullable(str),
min_partitions=nullable(int), tolerance=float, rg=nullable(str),
contig_recoding=dictof(str, str), skip_invalid_loci=bool)
def __init__(self, files, sample_file, chromosome, min_partitions, tolerance,
rg, contig_recoding, skip_invalid_loci):
self.config = {
'name': 'MatrixGENReader',
'files': files,
'sampleFile': sample_file,
'chromosome': chromosome,
'nPartitions': min_partitions,
'tolerance': tolerance,
'rg': rg,
'contigRecoding': contig_recoding if contig_recoding else {},
'skipInvalidLoci': skip_invalid_loci
}
def render(self, r):
return escape_str(json.dumps(self.config))
def __eq__(self, other):
return isinstance(other, MatrixGENReader) and \
self.config == other.config
| true | true |
f73e8d67e6a1affd983994564338e98edbfa53ef | 1,356 | py | Python | examples/unwanted/example.py | balakrishnan273818/AdvancedLaneDetection | c0993aa9422654258a41fe9616ab4e24b29e6a7a | [
"MIT"
] | null | null | null | examples/unwanted/example.py | balakrishnan273818/AdvancedLaneDetection | c0993aa9422654258a41fe9616ab4e24b29e6a7a | [
"MIT"
] | null | null | null | examples/unwanted/example.py | balakrishnan273818/AdvancedLaneDetection | c0993aa9422654258a41fe9616ab4e24b29e6a7a | [
"MIT"
] | null | null | null | '''
def warper(img, src, dst):
# Compute and apply perpective transform
img_size = (img.shape[1], img.shape[0])
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_NEAREST) # keep same size as input image
return warped
'''
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
#%matplotlib qt
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('../camera_cal/*.jpg')
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9,6), corners, ret)
cv2.imshow('img',img)
cv2.waitKey(30)
cv2.destroyAllWindows() | 28.851064 | 108 | 0.672566 |
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
objpoints = []
imgpoints = []
images = glob.glob('../camera_cal/*.jpg')
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
img = cv2.drawChessboardCorners(img, (9,6), corners, ret)
cv2.imshow('img',img)
cv2.waitKey(30)
cv2.destroyAllWindows() | true | true |
f73e8e214355ee7241a25ffc71cc6cf79a1e0092 | 830 | py | Python | py_sandbox/moduleInfo_configparser.py | kjgonzalez/codefiles | b86f25182d1b5553a331f8721dd06b51fa157c3e | [
"MIT"
] | null | null | null | py_sandbox/moduleInfo_configparser.py | kjgonzalez/codefiles | b86f25182d1b5553a331f8721dd06b51fa157c3e | [
"MIT"
] | 10 | 2019-10-01T20:48:15.000Z | 2020-04-14T18:21:09.000Z | py_sandbox/moduleInfo_configparser.py | kjgonzalez/codefiles | b86f25182d1b5553a331f8721dd06b51fa157c3e | [
"MIT"
] | null | null | null | '''
date: 200429
objective: check out configparser module, which allows one to read from an ini file
src1: https://docs.python.org/3/library/configparser.html
src2: https://docs.python.org/3/library/configparser.html#mapping-protocol-access
KJG200430: ini files aren't really that appealing to use. in fact, it might be best to use other
filetypes to store your data
'''
import configparser
from pprint import pprint
dat = configparser.ConfigParser()
dat.read('../data/simpsons.ini')
pprint(dat)
# print out structure & information listed
for isec in dat:
print('SEC:',isec)
for ival in dat[isec]:
print(' {}: {}'.format( ival,dat[isec][ival] ))
# can get values in a specific type
x = dat[isec]
y = x[ival]
y2 = x.getfloat(ival)
print('{}: {}'.format( y,type(y) ))
print('{}: {}'.format( y2, type(y2) )) | 28.62069 | 96 | 0.698795 |
import configparser
from pprint import pprint
dat = configparser.ConfigParser()
dat.read('../data/simpsons.ini')
pprint(dat)
for isec in dat:
print('SEC:',isec)
for ival in dat[isec]:
print(' {}: {}'.format( ival,dat[isec][ival] ))
x = dat[isec]
y = x[ival]
y2 = x.getfloat(ival)
print('{}: {}'.format( y,type(y) ))
print('{}: {}'.format( y2, type(y2) )) | true | true |
f73e8ecdd139363c9f8d164bdef6fec9949455c7 | 30,130 | py | Python | src/test/rnn_test.py | DanielSun94/kgenlu | bbf377c6740040cb1a8b656785e7c5bfdb8371d5 | [
"MIT"
] | null | null | null | src/test/rnn_test.py | DanielSun94/kgenlu | bbf377c6740040cb1a8b656785e7c5bfdb8371d5 | [
"MIT"
] | null | null | null | src/test/rnn_test.py | DanielSun94/kgenlu | bbf377c6740040cb1a8b656785e7c5bfdb8371d5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
NLP From Scratch: Translation with a Sequence to Sequence Network and Attention
*******************************************************************************
**Author**: `Sean Robertson <https://github.com/spro/practical-pytorch>`_
This is the third and final tutorial on doing "NLP From Scratch", where we
write our own classes and functions to preprocess the data to do our NLP
modeling tasks. We hope after you complete this tutorial that you'll proceed to
learn how `torchtext` can handle much of this preprocessing for you in the
three tutorials immediately following this one.
In this project we will be teaching a neural network to translate from
French to English.
::
[KEY: > input, = target, < output]
> il est en train de peindre un tableau .
= he is painting a picture .
< he is painting a picture .
> pourquoi ne pas essayer ce vin delicieux ?
= why not try that delicious wine ?
< why not try that delicious wine ?
> elle n est pas poete mais romanciere .
= she is not a poet but a novelist .
< she not not a poet but a novelist .
> vous etes trop maigre .
= you re too skinny .
< you re all alone .
... to varying degrees of success.
This is made possible by the simple but powerful idea of the `sequence
to sequence network <https://arxiv.org/abs/1409.3215>`__, in which two
recurrent neural networks work together to transform one sequence to
another. An encoder network condenses an input sequence into a vector,
and a decoder network unfolds that vector into a new sequence.
.. figure:: /_static/img/seq-seq-images/seq2seq.png
:alt:
To improve upon this model we'll use an `attention
mechanism <https://arxiv.org/abs/1409.0473>`__, which lets the decoder
learn to focus over a specific range of the input sequence.
**Recommended Reading:**
I assume you have at least installed PyTorch, know Python, and
understand Tensors:
- https://pytorch.org/ For installation instructions
- :doc:`/beginner/deep_learning_60min_blitz` to get started with PyTorch in general
- :doc:`/beginner/pytorch_with_examples` for a wide and deep overview
- :doc:`/beginner/former_torchies_tutorial` if you are former Lua Torch user
It would also be useful to know about Sequence to Sequence networks and
how they work:
- `Learning Phrase Representations using RNN Encoder-Decoder for
Statistical Machine Translation <https://arxiv.org/abs/1406.1078>`__
- `Sequence to Sequence Learning with Neural
Networks <https://arxiv.org/abs/1409.3215>`__
- `Neural Machine Translation by Jointly Learning to Align and
Translate <https://arxiv.org/abs/1409.0473>`__
- `A Neural Conversational Model <https://arxiv.org/abs/1506.05869>`__
You will also find the previous tutorials on
:doc:`/intermediate/char_rnn_classification_tutorial`
and :doc:`/intermediate/char_rnn_generation_tutorial`
helpful as those concepts are very similar to the Encoder and Decoder
models, respectively.
**Requirements**
"""
from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
######################################################################
# Loading data files
# ==================
#
# The data for this project is a set of many thousands of English to
# French translation pairs.
#
# `This question on Open Data Stack
# Exchange <https://opendata.stackexchange.com/questions/3888/dataset-of-sentences-translated-into-many-languages>`__
# pointed me to the open translation site https://tatoeba.org/ which has
# downloads available at https://tatoeba.org/eng/downloads - and better
# yet, someone did the extra work of splitting language pairs into
# individual text files here: https://www.manythings.org/anki/
#
# The English to French pairs are too big to include in the repo, so
# download to ``data/eng-fra.txt`` before continuing. The file is a tab
# separated list of translation pairs:
#
# ::
#
# I am cold. J'ai froid.
#
# .. Note::
# Download the data from
# `here <https://download.pytorch.org/tutorial/data.zip>`_
# and extract it to the current directory.
######################################################################
# Similar to the character encoding used in the character-level RNN
# tutorials, we will be representing each word in a language as a one-hot
# vector, or giant vector of zeros except for a single one (at the index
# of the word). Compared to the dozens of characters that might exist in a
# language, there are many many more words, so the encoding vector is much
# larger. We will however cheat a bit and trim the data to only use a few
# thousand words per language.
#
# .. figure:: /_static/img/seq-seq-images/word-encoding.png
# :alt:
#
#
######################################################################
# We'll need a unique index per word to use as the inputs and targets of
# the networks later. To keep track of all this we will use a helper class
# called ``Lang`` which has word → index (``word2index``) and index → word
# (``index2word``) dictionaries, as well as a count of each word
# ``word2count`` which will be used to replace rare words later.
#
SOS_token = 0
EOS_token = 1
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
######################################################################
# The files are all in Unicode, to simplify we will turn Unicode
# characters to ASCII, make everything lowercase, and trim most
# punctuation.
#
# Turn a Unicode string to plain ASCII, thanks to
# https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
######################################################################
# To read the data file we will split the file into lines, and then split
# lines into pairs. The files are all English → Other Language, so if we
# want to translate from Other Language → English I added the ``reverse``
# flag to reverse the pairs.
#
def readLangs(lang1, lang2, reverse=False):
print("Reading lines...")
# Read the file and split into lines
lines = open('data/%s-%s.txt' % (lang1, lang2), encoding='utf-8').\
read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
# Reverse pairs, make Lang instances
if reverse:
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
######################################################################
# Since there are a *lot* of example sentences and we want to train
# something quickly, we'll trim the data set to only relatively short and
# simple sentences. Here the maximum length is 10 words (that includes
# ending punctuation) and we're filtering to sentences that translate to
# the form "I am" or "He is" etc. (accounting for apostrophes replaced
# earlier).
#
MAX_LENGTH = 10
eng_prefixes = (
"i am ", "i m ",
"he is", "he s ",
"she is", "she s ",
"you are", "you re ",
"we are", "we re ",
"they are", "they re "
)
def filterPair(p):
return len(p[0].split(' ')) < MAX_LENGTH and \
len(p[1].split(' ')) < MAX_LENGTH and \
p[1].startswith(eng_prefixes)
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
######################################################################
# The full process for preparing the data is:
#
# - Read text file and split into lines, split lines into pairs
# - Normalize text, filter by length and content
# - Make word lists from sentences in pairs
#
def prepareData(lang1, lang2, reverse=False):
input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse)
print("Read %s sentence pairs" % len(pairs))
pairs = filterPairs(pairs)
print("Trimmed to %s sentence pairs" % len(pairs))
print("Counting words...")
for pair in pairs:
input_lang.addSentence(pair[0])
output_lang.addSentence(pair[1])
print("Counted words:")
print(input_lang.name, input_lang.n_words)
print(output_lang.name, output_lang.n_words)
return input_lang, output_lang, pairs
input_lang, output_lang, pairs = prepareData('eng', 'fra', True)
print(random.choice(pairs))
######################################################################
# The Seq2Seq Model
# =================
#
# A Recurrent Neural Network, or RNN, is a network that operates on a
# sequence and uses its own output as input for subsequent steps.
#
# A `Sequence to Sequence network <https://arxiv.org/abs/1409.3215>`__, or
# seq2seq network, or `Encoder Decoder
# network <https://arxiv.org/pdf/1406.1078v3.pdf>`__, is a model
# consisting of two RNNs called the encoder and decoder. The encoder reads
# an input sequence and outputs a single vector, and the decoder reads
# that vector to produce an output sequence.
#
# .. figure:: /_static/img/seq-seq-images/seq2seq.png
# :alt:
#
# Unlike sequence prediction with a single RNN, where every input
# corresponds to an output, the seq2seq model frees us from sequence
# length and order, which makes it ideal for translation between two
# languages.
#
# Consider the sentence "Je ne suis pas le chat noir" → "I am not the
# black cat". Most of the words in the input sentence have a direct
# translation in the output sentence, but are in slightly different
# orders, e.g. "chat noir" and "black cat". Because of the "ne/pas"
# construction there is also one more word in the input sentence. It would
# be difficult to produce a correct translation directly from the sequence
# of input words.
#
# With a seq2seq model the encoder creates a single vector which, in the
# ideal case, encodes the "meaning" of the input sequence into a single
# vector — a single point in some N dimensional space of sentences.
#
######################################################################
# The Encoder
# -----------
#
# The encoder of a seq2seq network is a RNN that outputs some value for
# every word from the input sentence. For every input word the encoder
# outputs a vector and a hidden state, and uses the hidden state for the
# next input word.
#
# .. figure:: /_static/img/seq-seq-images/encoder-network.png
# :alt:
#
#
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size):
super(EncoderRNN, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
def forward(self, input, hidden):
embedded = self.embedding(input).view(1, 1, -1)
output = embedded
output, hidden = self.gru(output, hidden)
return output, hidden
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
######################################################################
# The Decoder
# -----------
#
# The decoder is another RNN that takes the encoder output vector(s) and
# outputs a sequence of words to create the translation.
#
######################################################################
# Simple Decoder
# ^^^^^^^^^^^^^^
#
# In the simplest seq2seq decoder we use only last output of the encoder.
# This last output is sometimes called the *context vector* as it encodes
# context from the entire sequence. This context vector is used as the
# initial hidden state of the decoder.
#
# At every step of decoding, the decoder is given an input token and
# hidden state. The initial input token is the start-of-string ``<SOS>``
# token, and the first hidden state is the context vector (the encoder's
# last hidden state).
#
# .. figure:: /_static/img/seq-seq-images/decoder-network.png
# :alt:
#
#
class DecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size):
super(DecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(output_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
output = self.embedding(input).view(1, 1, -1)
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = self.softmax(self.out(output[0]))
return output, hidden
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
######################################################################
# I encourage you to train and observe the results of this model, but to
# save space we'll be going straight for the gold and introducing the
# Attention Mechanism.
#
######################################################################
# Attention Decoder
# ^^^^^^^^^^^^^^^^^
#
# If only the context vector is passed between the encoder and decoder,
# that single vector carries the burden of encoding the entire sentence.
#
# Attention allows the decoder network to "focus" on a different part of
# the encoder's outputs for every step of the decoder's own outputs. First
# we calculate a set of *attention weights*. These will be multiplied by
# the encoder output vectors to create a weighted combination. The result
# (called ``attn_applied`` in the code) should contain information about
# that specific part of the input sequence, and thus help the decoder
# choose the right output words.
#
# .. figure:: https://i.imgur.com/1152PYf.png
# :alt:
#
# Calculating the attention weights is done with another feed-forward
# layer ``attn``, using the decoder's input and hidden state as inputs.
# Because there are sentences of all sizes in the training data, to
# actually create and train this layer we have to choose a maximum
# sentence length (input length, for encoder outputs) that it can apply
# to. Sentences of the maximum length will use all the attention weights,
# while shorter sentences will only use the first few.
#
# .. figure:: /_static/img/seq-seq-images/attention-decoder-network.png
# :alt:
#
#
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.dropout_p = dropout_p
self.max_length = max_length
self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = nn.GRU(self.hidden_size, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input, hidden, encoder_outputs):
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
attn_weights = F.softmax(
self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
attn_applied = torch.bmm(attn_weights.unsqueeze(0),
encoder_outputs.unsqueeze(0))
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = F.log_softmax(self.out(output[0]), dim=1)
return output, hidden, attn_weights
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
######################################################################
# .. note:: There are other forms of attention that work around the length
# limitation by using a relative position approach. Read about "local
# attention" in `Effective Approaches to Attention-based Neural Machine
# Translation <https://arxiv.org/abs/1508.04025>`__.
#
# Training
# ========
#
# Preparing Training Data
# -----------------------
#
# To train, for each pair we will need an input tensor (indexes of the
# words in the input sentence) and target tensor (indexes of the words in
# the target sentence). While creating these vectors we will append the
# EOS token to both sequences.
#
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
def tensorFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
def tensorsFromPair(pair):
input_tensor = tensorFromSentence(input_lang, pair[0])
target_tensor = tensorFromSentence(output_lang, pair[1])
return (input_tensor, target_tensor)
######################################################################
# Training the Model
# ------------------
#
# To train we run the input sentence through the encoder, and keep track
# of every output and the latest hidden state. Then the decoder is given
# the ``<SOS>`` token as its first input, and the last hidden state of the
# encoder as its first hidden state.
#
# "Teacher forcing" is the concept of using the real target outputs as
# each next input, instead of using the decoder's guess as the next input.
# Using teacher forcing causes it to converge faster but `when the trained
# network is exploited, it may exhibit
# instability <http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.378.4095&rep=rep1&type=pdf>`__.
#
# You can observe outputs of teacher-forced networks that read with
# coherent grammar but wander far from the correct translation -
# intuitively it has learned to represent the output grammar and can "pick
# up" the meaning once the teacher tells it the first few words, but it
# has not properly learned how to create the sentence from the translation
# in the first place.
#
# Because of the freedom PyTorch's autograd gives us, we can randomly
# choose to use teacher forcing or not with a simple if statement. Turn
# ``teacher_forcing_ratio`` up to use more of it.
#
teacher_forcing_ratio = 0.5
def train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH):
encoder_hidden = encoder.initHidden()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_length = input_tensor.size(0)
target_length = target_tensor.size(0)
encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
loss = 0
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(
input_tensor[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0, 0]
decoder_input = torch.tensor([[SOS_token]], device=device)
decoder_hidden = encoder_hidden
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
loss += criterion(decoder_output, target_tensor[di])
decoder_input = target_tensor[di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.topk(1)
decoder_input = topi.squeeze().detach() # detach from history as input
loss += criterion(decoder_output, target_tensor[di])
if decoder_input.item() == EOS_token:
break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.item() / target_length
######################################################################
# This is a helper function to print time elapsed and estimated time
# remaining given the current time and progress %.
#
import time
import math
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
######################################################################
# The whole training process looks like this:
#
# - Start a timer
# - Initialize optimizers and criterion
# - Create set of training pairs
# - Start empty losses array for plotting
#
# Then we call ``train`` many times and occasionally print the progress (%
# of examples, time so far, estimated time) and average loss.
#
def trainIters(encoder, decoder, n_iters, print_every=1000, plot_every=100, learning_rate=0.01):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
training_pairs = [tensorsFromPair(random.choice(pairs))
for i in range(n_iters)]
criterion = nn.NLLLoss()
for iter in range(1, n_iters + 1):
training_pair = training_pairs[iter - 1]
input_tensor = training_pair[0]
target_tensor = training_pair[1]
loss = train(input_tensor, target_tensor, encoder,
decoder, encoder_optimizer, decoder_optimizer, criterion)
print_loss_total += loss
plot_loss_total += loss
if iter % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),
iter, iter / n_iters * 100, print_loss_avg))
if iter % plot_every == 0:
plot_loss_avg = plot_loss_total / plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
showPlot(plot_losses)
######################################################################
# Plotting results
# ----------------
#
# Plotting is done with matplotlib, using the array of loss values
# ``plot_losses`` saved while training.
#
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib.ticker as ticker
import numpy as np
def showPlot(points):
plt.figure()
fig, ax = plt.subplots()
# this locator puts ticks at regular intervals
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
######################################################################
# Evaluation
# ==========
#
# Evaluation is mostly the same as training, but there are no targets so
# we simply feed the decoder's predictions back to itself for each step.
# Every time it predicts a word we add it to the output string, and if it
# predicts the EOS token we stop there. We also store the decoder's
# attention outputs for display later.
#
def evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH):
with torch.no_grad():
input_tensor = tensorFromSentence(input_lang, sentence)
input_length = input_tensor.size()[0]
encoder_hidden = encoder.initHidden()
encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_tensor[ei],
encoder_hidden)
encoder_outputs[ei] += encoder_output[0, 0]
decoder_input = torch.tensor([[SOS_token]], device=device) # SOS
decoder_hidden = encoder_hidden
decoded_words = []
decoder_attentions = torch.zeros(max_length, max_length)
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
decoder_attentions[di] = decoder_attention.data
topv, topi = decoder_output.data.topk(1)
if topi.item() == EOS_token:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(output_lang.index2word[topi.item()])
decoder_input = topi.squeeze().detach()
return decoded_words, decoder_attentions[:di + 1]
######################################################################
# We can evaluate random sentences from the training set and print out the
# input, target, and output to make some subjective quality judgements:
#
def evaluateRandomly(encoder, decoder, n=10):
for i in range(n):
pair = random.choice(pairs)
print('>', pair[0])
print('=', pair[1])
output_words, attentions = evaluate(encoder, decoder, pair[0])
output_sentence = ' '.join(output_words)
print('<', output_sentence)
print('')
######################################################################
# Training and Evaluating
# =======================
#
# With all these helper functions in place (it looks like extra work, but
# it makes it easier to run multiple experiments) we can actually
# initialize a network and start training.
#
# Remember that the input sentences were heavily filtered. For this small
# dataset we can use relatively small networks of 256 hidden nodes and a
# single GRU layer. After about 40 minutes on a MacBook CPU we'll get some
# reasonable results.
#
# .. Note::
# If you run this notebook you can train, interrupt the kernel,
# evaluate, and continue training later. Comment out the lines where the
# encoder and decoder are initialized and run ``trainIters`` again.
#
hidden_size = 256
encoder1 = EncoderRNN(input_lang.n_words, hidden_size).to(device)
attn_decoder1 = AttnDecoderRNN(hidden_size, output_lang.n_words, dropout_p=0.1).to(device)
trainIters(encoder1, attn_decoder1, 75000, print_every=5000)
######################################################################
#
evaluateRandomly(encoder1, attn_decoder1)
######################################################################
# Visualizing Attention
# ---------------------
#
# A useful property of the attention mechanism is its highly interpretable
# outputs. Because it is used to weight specific encoder outputs of the
# input sequence, we can imagine looking where the network is focused most
# at each time step.
#
# You could simply run ``plt.matshow(attentions)`` to see attention output
# displayed as a matrix, with the columns being input steps and rows being
# output steps:
#
output_words, attentions = evaluate(
encoder1, attn_decoder1, "je suis trop froid .")
plt.matshow(attentions.numpy())
######################################################################
# For a better viewing experience we will do the extra work of adding axes
# and labels:
#
def showAttention(input_sentence, output_words, attentions):
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attentions.numpy(), cmap='bone')
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + input_sentence.split(' ') +
['<EOS>'], rotation=90)
ax.set_yticklabels([''] + output_words)
# Show label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def evaluateAndShowAttention(input_sentence):
output_words, attentions = evaluate(
encoder1, attn_decoder1, input_sentence)
print('input =', input_sentence)
print('output =', ' '.join(output_words))
showAttention(input_sentence, output_words, attentions)
evaluateAndShowAttention("elle a cinq ans de moins que moi .")
evaluateAndShowAttention("elle est trop petit .")
evaluateAndShowAttention("je ne crains pas de mourir .")
evaluateAndShowAttention("c est un jeune directeur plein de talent .")
######################################################################
# Exercises
# =========
#
# - Try with a different dataset
#
# - Another language pair
# - Human → Machine (e.g. IOT commands)
# - Chat → Response
# - Question → Answer
#
# - Replace the embeddings with pre-trained word embeddings such as word2vec or
# GloVe
# - Try with more layers, more hidden units, and more sentences. Compare
# the training time and results.
# - If you use a translation file where pairs have two of the same phrase
# (``I am test \t I am test``), you can use this as an autoencoder. Try
# this:
#
# - Train as an autoencoder
# - Save only the Encoder network
# - Train a new Decoder for translation from there
# | 35.11655 | 129 | 0.651012 |
from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
| true | true |
f73e8fc8b7fc5735d30e5588477a6ef3cdb95e4d | 2,059 | py | Python | learning_text_transformer/config.py | ianozsvald/learning_text_transformer | 23fa318a7c8ed0e2dbd1fc8e68e0cb7d1f15731d | [
"MIT"
] | 19 | 2015-08-28T14:41:16.000Z | 2021-03-05T17:26:42.000Z | learning_text_transformer/config.py | ianozsvald/learning_text_transformer | 23fa318a7c8ed0e2dbd1fc8e68e0cb7d1f15731d | [
"MIT"
] | null | null | null | learning_text_transformer/config.py | ianozsvald/learning_text_transformer | 23fa318a7c8ed0e2dbd1fc8e68e0cb7d1f15731d | [
"MIT"
] | null | null | null | """"""
import os
import logging
# Simple logging configuration, an example output might be:
# 2013-06-03 15:07:55.740 p7470 {start_here.py:31} INFO - This is an example log message
LOG_FILE_NAME = "log.log"
# The date format is ISO 8601, format includes a decimal separator for
# milliseconds (not the default comma) as dateutil.parser cannot read the
# command but it can read the decimal separator (both are allowed in ISO 8601)
fmt = '%(asctime)s.%(msecs)d p%(process)s {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s'
logging.basicConfig(filename=LOG_FILE_NAME,
level=logging.DEBUG,
format=fmt, datefmt='%Y-%m-%d %H:%M:%S')
# note that it might be useful to use the ConcurrentLogHandler or
# RotatingLogHandler here (either require some more setup)
# environment variable for configuration
CONFIG_ENV_VAR = "CONFIG"
class ConfDev(object):
"""Configuration for development scenario"""
name = "dev"
def __init__(self):
self.log_filename = "/home/ian/workspace/projects/nlp_tools/learning_text_transformer/logs/stuff.log"
class ConfDeploy(object):
"""Deployment config on WebFaction"""
name = "deploy"
def __init__(self):
self.log_filename = "/home/ianozsvald/webapps/api_annotate_io/learning_text_transformer/logs/stuff.log"
configurations = [ConfDev, ConfDeploy]
def get(configuration=None):
"""Return a configuration based on name or environment variable"""
if configuration is None:
configuration = os.getenv(CONFIG_ENV_VAR)
if configuration is None:
if os.getenv('USER') == "ianozsvald":
configuration = "deploy"
elif os.getenv('USER') == "ian":
configuration = "dev"
#if configuration is None:
#configuration = "dev" # default
# look through the available configurations, find the
# match and instantiate it
for c in configurations:
if c.name == configuration:
conf = c()
return conf
assert False, "You must choose a configuration"
| 34.316667 | 111 | 0.684798 | import os
import logging
LOG_FILE_NAME = "log.log"
fmt = '%(asctime)s.%(msecs)d p%(process)s {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s'
logging.basicConfig(filename=LOG_FILE_NAME,
level=logging.DEBUG,
format=fmt, datefmt='%Y-%m-%d %H:%M:%S')
CONFIG_ENV_VAR = "CONFIG"
class ConfDev(object):
name = "dev"
def __init__(self):
self.log_filename = "/home/ian/workspace/projects/nlp_tools/learning_text_transformer/logs/stuff.log"
class ConfDeploy(object):
name = "deploy"
def __init__(self):
self.log_filename = "/home/ianozsvald/webapps/api_annotate_io/learning_text_transformer/logs/stuff.log"
configurations = [ConfDev, ConfDeploy]
def get(configuration=None):
if configuration is None:
configuration = os.getenv(CONFIG_ENV_VAR)
if configuration is None:
if os.getenv('USER') == "ianozsvald":
configuration = "deploy"
elif os.getenv('USER') == "ian":
configuration = "dev"
for c in configurations:
if c.name == configuration:
conf = c()
return conf
assert False, "You must choose a configuration"
| true | true |
f73e9005ae34402980d7c1688fb74804bf4165ba | 5,952 | py | Python | Project_2.py | theKasra/14-puzzle-problem-bidirectionalsearch | f6fe4e0d8a1db1b1675933d8b2461981ac08686b | [
"MIT"
] | null | null | null | Project_2.py | theKasra/14-puzzle-problem-bidirectionalsearch | f6fe4e0d8a1db1b1675933d8b2461981ac08686b | [
"MIT"
] | null | null | null | Project_2.py | theKasra/14-puzzle-problem-bidirectionalsearch | f6fe4e0d8a1db1b1675933d8b2461981ac08686b | [
"MIT"
] | null | null | null | from copy import deepcopy
from collections import deque
import time
import numpy as np
class Node:
def __init__(self, parent, grid):
self.parent = parent
self.grid = grid
def print_answer(p1, p2):
initial_to_middle = []
while p1:
initial_to_middle.insert(0, p1.grid)
p1 = p1.parent
print("\nStep by step solution:\n")
for i in initial_to_middle:
print(np.matrix(i), "\n")
print("-----------middle--------------", "\n")
while p2:
print(np.matrix(p2.grid), "\n")
p2 = p2.parent
def search(node, frontier):
frontier_len = len(frontier)
for i in range(frontier_len):
if frontier[i].grid == node.grid:
return frontier[i]
return None
def check_grid(grid, frontier, explored):
frontier_len = len(frontier)
if frontier_len == 0:
if grid not in explored:
return True
else:
if grid not in explored:
for i in range(frontier_len):
if frontier[i].grid == grid:
return False
else:
return False
return True
def expand(node, frontier, explored):
first_0 = [None, None]
second_0 = [None, None]
found_first_0 = False
found_all_0 = False
for i in range(4):
if not found_all_0:
for j in range(4):
if node.grid[i][j] == 0:
if not found_first_0:
first_0 = [i, j]
found_first_0 = True
else:
second_0 = [i, j]
found_all_0 = True
break
else:
break
move_left(node, first_0, frontier, explored)
move_left(node, second_0, frontier, explored)
move_right(node, first_0, frontier, explored)
move_right(node, second_0, frontier, explored)
move_up(node, first_0, frontier, explored)
move_up(node, second_0, frontier, explored)
move_down(node, first_0, frontier, explored)
move_down(node, second_0, frontier, explored)
def add_to_frontier(node, child_grid, frontier):
child = Node(node, child_grid)
frontier.append(child)
def move_left(node, coordinate, frontier, explored):
i, j = coordinate[0], coordinate[1]
if j == 0 or node.grid[i][j-1] == 0:
pass
else:
child_grid = deepcopy(node.grid)
child_grid[i][j], child_grid[i][j-1] = child_grid[i][j-1], child_grid[i][j]
if check_grid(child_grid, frontier, explored):
add_to_frontier(node, child_grid, frontier)
def move_right(node, coordinate, frontier, explored):
i, j = coordinate[0], coordinate[1]
if j == 3 or node.grid[i][j+1] == 0:
pass
else:
child_grid = deepcopy(node.grid)
child_grid[i][j], child_grid[i][j+1] = child_grid[i][j+1], child_grid[i][j]
if check_grid(child_grid, frontier, explored):
add_to_frontier(node, child_grid, frontier)
def move_up(node, coordinate, frontier, explored):
i, j = coordinate[0], coordinate[1]
if i == 0 or node.grid[i-1][j] == 0:
pass
else:
child_grid = deepcopy(node.grid)
child_grid[i][j], child_grid[i-1][j] = child_grid[i-1][j], child_grid[i][j]
if check_grid(child_grid, frontier, explored):
add_to_frontier(node, child_grid, frontier)
def move_down(node, coordinate, frontier, explored):
i, j = coordinate[0], coordinate[1]
if i == 3 or node.grid[i+1][j] == 0:
pass
else:
child_grid = deepcopy(node.grid)
child_grid[i][j], child_grid[i+1][j] = child_grid[i+1][j], child_grid[i][j]
if check_grid(child_grid, frontier, explored):
add_to_frontier(node, child_grid, frontier)
def bidirectional_search(frontier_initial, explored_initial, frontier_goal, explored_goal):
while frontier_initial and frontier_goal:
node_initial = deque.popleft(frontier_initial)
result_initial = search(node_initial, frontier_goal)
if result_initial:
p1 = node_initial
p2 = result_initial
break
else:
explored_initial.append(node_initial.grid)
expand(node_initial, frontier_initial, explored_initial)
node_goal = deque.popleft(frontier_goal)
result_goal = search(node_goal, frontier_initial)
if result_goal:
p1 = result_goal
p2 = node_goal
break
else:
explored_goal.append(node_goal.grid)
expand(node_goal, frontier_goal, explored_goal)
print_answer(p1, p2)
def read_input_file(filename, grid):
numbers = ""
numbers_counter = 0
f = open(filename, "r")
numbers = f.readline().split(" ")
f.close()
for i in range(4):
for j in range(4):
grid[i][j] = int(numbers[numbers_counter])
numbers_counter += 1
return grid
grid = [[None for _ in range(4)] for _ in range(4)]
grid = read_input_file("input.txt", grid)
initial = Node(None, grid)
frontier_initial = deque()
frontier_initial.append(initial)
explored_initial = []
goal_grid = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 0, 0]]
goal = Node(None, goal_grid)
frontier_goal = deque()
frontier_goal.append(goal)
explored_goal = []
start_time = time.time()
bidirectional_search(frontier_initial, explored_initial, frontier_goal, explored_goal)
print("Initial side")
print("frontier: ", len(frontier_initial))
print("explored: ", len(explored_initial), "\n")
print("Goal side")
print("frontier: ", len(frontier_goal))
print("explored: ", len(explored_goal))
print("--- %s seconds ---" % (time.time() - start_time))
| 32 | 92 | 0.585853 | from copy import deepcopy
from collections import deque
import time
import numpy as np
class Node:
def __init__(self, parent, grid):
self.parent = parent
self.grid = grid
def print_answer(p1, p2):
initial_to_middle = []
while p1:
initial_to_middle.insert(0, p1.grid)
p1 = p1.parent
print("\nStep by step solution:\n")
for i in initial_to_middle:
print(np.matrix(i), "\n")
print("-----------middle--------------", "\n")
while p2:
print(np.matrix(p2.grid), "\n")
p2 = p2.parent
def search(node, frontier):
frontier_len = len(frontier)
for i in range(frontier_len):
if frontier[i].grid == node.grid:
return frontier[i]
return None
def check_grid(grid, frontier, explored):
frontier_len = len(frontier)
if frontier_len == 0:
if grid not in explored:
return True
else:
if grid not in explored:
for i in range(frontier_len):
if frontier[i].grid == grid:
return False
else:
return False
return True
def expand(node, frontier, explored):
first_0 = [None, None]
second_0 = [None, None]
found_first_0 = False
found_all_0 = False
for i in range(4):
if not found_all_0:
for j in range(4):
if node.grid[i][j] == 0:
if not found_first_0:
first_0 = [i, j]
found_first_0 = True
else:
second_0 = [i, j]
found_all_0 = True
break
else:
break
move_left(node, first_0, frontier, explored)
move_left(node, second_0, frontier, explored)
move_right(node, first_0, frontier, explored)
move_right(node, second_0, frontier, explored)
move_up(node, first_0, frontier, explored)
move_up(node, second_0, frontier, explored)
move_down(node, first_0, frontier, explored)
move_down(node, second_0, frontier, explored)
def add_to_frontier(node, child_grid, frontier):
child = Node(node, child_grid)
frontier.append(child)
def move_left(node, coordinate, frontier, explored):
i, j = coordinate[0], coordinate[1]
if j == 0 or node.grid[i][j-1] == 0:
pass
else:
child_grid = deepcopy(node.grid)
child_grid[i][j], child_grid[i][j-1] = child_grid[i][j-1], child_grid[i][j]
if check_grid(child_grid, frontier, explored):
add_to_frontier(node, child_grid, frontier)
def move_right(node, coordinate, frontier, explored):
i, j = coordinate[0], coordinate[1]
if j == 3 or node.grid[i][j+1] == 0:
pass
else:
child_grid = deepcopy(node.grid)
child_grid[i][j], child_grid[i][j+1] = child_grid[i][j+1], child_grid[i][j]
if check_grid(child_grid, frontier, explored):
add_to_frontier(node, child_grid, frontier)
def move_up(node, coordinate, frontier, explored):
i, j = coordinate[0], coordinate[1]
if i == 0 or node.grid[i-1][j] == 0:
pass
else:
child_grid = deepcopy(node.grid)
child_grid[i][j], child_grid[i-1][j] = child_grid[i-1][j], child_grid[i][j]
if check_grid(child_grid, frontier, explored):
add_to_frontier(node, child_grid, frontier)
def move_down(node, coordinate, frontier, explored):
i, j = coordinate[0], coordinate[1]
if i == 3 or node.grid[i+1][j] == 0:
pass
else:
child_grid = deepcopy(node.grid)
child_grid[i][j], child_grid[i+1][j] = child_grid[i+1][j], child_grid[i][j]
if check_grid(child_grid, frontier, explored):
add_to_frontier(node, child_grid, frontier)
def bidirectional_search(frontier_initial, explored_initial, frontier_goal, explored_goal):
while frontier_initial and frontier_goal:
node_initial = deque.popleft(frontier_initial)
result_initial = search(node_initial, frontier_goal)
if result_initial:
p1 = node_initial
p2 = result_initial
break
else:
explored_initial.append(node_initial.grid)
expand(node_initial, frontier_initial, explored_initial)
node_goal = deque.popleft(frontier_goal)
result_goal = search(node_goal, frontier_initial)
if result_goal:
p1 = result_goal
p2 = node_goal
break
else:
explored_goal.append(node_goal.grid)
expand(node_goal, frontier_goal, explored_goal)
print_answer(p1, p2)
def read_input_file(filename, grid):
numbers = ""
numbers_counter = 0
f = open(filename, "r")
numbers = f.readline().split(" ")
f.close()
for i in range(4):
for j in range(4):
grid[i][j] = int(numbers[numbers_counter])
numbers_counter += 1
return grid
grid = [[None for _ in range(4)] for _ in range(4)]
grid = read_input_file("input.txt", grid)
initial = Node(None, grid)
frontier_initial = deque()
frontier_initial.append(initial)
explored_initial = []
goal_grid = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 0, 0]]
goal = Node(None, goal_grid)
frontier_goal = deque()
frontier_goal.append(goal)
explored_goal = []
start_time = time.time()
bidirectional_search(frontier_initial, explored_initial, frontier_goal, explored_goal)
print("Initial side")
print("frontier: ", len(frontier_initial))
print("explored: ", len(explored_initial), "\n")
print("Goal side")
print("frontier: ", len(frontier_goal))
print("explored: ", len(explored_goal))
print("--- %s seconds ---" % (time.time() - start_time))
| true | true |
f73e918b2454f5cf8cc06eb4c8d8076e4d180778 | 9,498 | py | Python | crfnet/model/layers/filter_detections.py | XiaoJake/CameraRadarFusionNet | 5506700c21ecda8de7cbbfa0cff25413fbcb2a96 | [
"Apache-2.0"
] | 3,255 | 2016-08-18T17:53:27.000Z | 2022-03-29T19:53:43.000Z | retinanet_aerial_detection/keras_retinanet/layers/filter_detections.py | luke4u/Deep-Learning | f00e21b646f44ce1590e8fd6d8b5ecba56bf68db | [
"MIT"
] | 141 | 2017-07-17T09:14:37.000Z | 2022-03-14T00:00:19.000Z | retinanet_aerial_detection/keras_retinanet/layers/filter_detections.py | luke4u/Deep-Learning | f00e21b646f44ce1590e8fd6d8b5ecba56bf68db | [
"MIT"
] | 2,580 | 2017-05-14T14:33:41.000Z | 2022-03-31T15:04:14.000Z | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import keras
from .. import backend
def filter_detections(
boxes,
classification,
other = [],
class_specific_filter = True,
nms = True,
score_threshold = 0.05,
max_detections = 300,
nms_threshold = 0.5
):
""" Filter detections using the boxes and classification values.
Args
boxes : Tensor of shape (num_boxes, 4) containing the boxes in (x1, y1, x2, y2) format.
classification : Tensor of shape (num_boxes, num_classes) containing the classification scores.
other : List of tensors of shape (num_boxes, ...) to filter along with the boxes and classification scores.
class_specific_filter : Whether to perform filtering per class, or take the best scoring class and filter those.
nms : Flag to enable/disable non maximum suppression.
score_threshold : Threshold used to prefilter the boxes with.
max_detections : Maximum number of detections to keep.
nms_threshold : Threshold for the IoU value to determine when a box should be suppressed.
Returns
A list of [boxes, scores, labels, other[0], other[1], ...].
boxes is shaped (max_detections, 4) and contains the (x1, y1, x2, y2) of the non-suppressed boxes.
scores is shaped (max_detections,) and contains the scores of the predicted class.
labels is shaped (max_detections,) and contains the predicted label.
other[i] is shaped (max_detections, ...) and contains the filtered other[i] data.
In case there are less than max_detections detections, the tensors are padded with -1's.
"""
def _filter_detections(scores, labels):
# threshold based on score
indices = backend.where(keras.backend.greater(scores, score_threshold))
if nms:
filtered_boxes = backend.gather_nd(boxes, indices)
filtered_scores = keras.backend.gather(scores, indices)[:, 0]
# perform NMS
nms_indices = backend.non_max_suppression(filtered_boxes, filtered_scores, max_output_size=max_detections, iou_threshold=nms_threshold)
# filter indices based on NMS
indices = keras.backend.gather(indices, nms_indices)
# add indices to list of all indices
labels = backend.gather_nd(labels, indices)
indices = keras.backend.stack([indices[:, 0], labels], axis=1)
return indices
if class_specific_filter:
all_indices = []
# perform per class filtering
for c in range(int(classification.shape[1])):
scores = classification[:, c]
labels = c * backend.ones((keras.backend.shape(scores)[0],), dtype='int64')
all_indices.append(_filter_detections(scores, labels))
# concatenate indices to single tensor
indices = keras.backend.concatenate(all_indices, axis=0)
else:
scores = keras.backend.max(classification, axis = 1)
labels = keras.backend.argmax(classification, axis = 1)
indices = _filter_detections(scores, labels)
# select top k
scores = backend.gather_nd(classification, indices)
labels = indices[:, 1]
scores, top_indices = backend.top_k(scores, k=keras.backend.minimum(max_detections, keras.backend.shape(scores)[0]))
# filter input using the final set of indices
indices = keras.backend.gather(indices[:, 0], top_indices)
boxes = keras.backend.gather(boxes, indices)
labels = keras.backend.gather(labels, top_indices)
other_ = [keras.backend.gather(o, indices) for o in other]
# zero pad the outputs
pad_size = keras.backend.maximum(0, max_detections - keras.backend.shape(scores)[0])
boxes = backend.pad(boxes, [[0, pad_size], [0, 0]], constant_values=-1)
scores = backend.pad(scores, [[0, pad_size]], constant_values=-1)
labels = backend.pad(labels, [[0, pad_size]], constant_values=-1)
labels = keras.backend.cast(labels, 'int32')
other_ = [backend.pad(o, [[0, pad_size]] + [[0, 0] for _ in range(1, len(o.shape))], constant_values=-1) for o in other_]
# set shapes, since we know what they are
boxes.set_shape([max_detections, 4])
scores.set_shape([max_detections])
labels.set_shape([max_detections])
for o, s in zip(other_, [list(keras.backend.int_shape(o)) for o in other]):
o.set_shape([max_detections] + s[1:])
return [boxes, scores, labels] + other_
class FilterDetections(keras.layers.Layer):
""" Keras layer for filtering detections using score threshold and NMS.
"""
def __init__(
self,
nms = True,
class_specific_filter = True,
nms_threshold = 0.5,
score_threshold = 0.05,
max_detections = 300,
parallel_iterations = 32,
**kwargs
):
""" Filters detections using score threshold, NMS and selecting the top-k detections.
Args
nms : Flag to enable/disable NMS.
class_specific_filter : Whether to perform filtering per class, or take the best scoring class and filter those.
nms_threshold : Threshold for the IoU value to determine when a box should be suppressed.
score_threshold : Threshold used to prefilter the boxes with.
max_detections : Maximum number of detections to keep.
parallel_iterations : Number of batch items to process in parallel.
"""
self.nms = nms
self.class_specific_filter = class_specific_filter
self.nms_threshold = nms_threshold
self.score_threshold = score_threshold
self.max_detections = max_detections
self.parallel_iterations = parallel_iterations
super(FilterDetections, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
""" Constructs the NMS graph.
Args
inputs : List of [boxes, classification, other[0], other[1], ...] tensors.
"""
boxes = inputs[0]
classification = inputs[1]
other = inputs[2:]
# wrap nms with our parameters
def _filter_detections(args):
boxes = args[0]
classification = args[1]
other = args[2]
return filter_detections(
boxes,
classification,
other,
nms = self.nms,
class_specific_filter = self.class_specific_filter,
score_threshold = self.score_threshold,
max_detections = self.max_detections,
nms_threshold = self.nms_threshold,
)
# call filter_detections on each batch
outputs = backend.map_fn(
_filter_detections,
elems=[boxes, classification, other],
dtype=[keras.backend.floatx(), keras.backend.floatx(), 'int32'] + [o.dtype for o in other],
parallel_iterations=self.parallel_iterations
)
return outputs
def compute_output_shape(self, input_shape):
""" Computes the output shapes given the input shapes.
Args
input_shape : List of input shapes [boxes, classification, other[0], other[1], ...].
Returns
List of tuples representing the output shapes:
[filtered_boxes.shape, filtered_scores.shape, filtered_labels.shape, filtered_other[0].shape, filtered_other[1].shape, ...]
"""
return [
(input_shape[0][0], self.max_detections, 4),
(input_shape[1][0], self.max_detections),
(input_shape[1][0], self.max_detections),
] + [
tuple([input_shape[i][0], self.max_detections] + list(input_shape[i][2:])) for i in range(2, len(input_shape))
]
def compute_mask(self, inputs, mask=None):
""" This is required in Keras when there is more than 1 output.
"""
return (len(inputs) + 1) * [None]
def get_config(self):
""" Gets the configuration of this layer.
Returns
Dictionary containing the parameters of this layer.
"""
config = super(FilterDetections, self).get_config()
config.update({
'nms' : self.nms,
'class_specific_filter' : self.class_specific_filter,
'nms_threshold' : self.nms_threshold,
'score_threshold' : self.score_threshold,
'max_detections' : self.max_detections,
'parallel_iterations' : self.parallel_iterations,
})
return config
| 42.401786 | 147 | 0.61634 |
import keras
from .. import backend
def filter_detections(
boxes,
classification,
other = [],
class_specific_filter = True,
nms = True,
score_threshold = 0.05,
max_detections = 300,
nms_threshold = 0.5
):
def _filter_detections(scores, labels):
indices = backend.where(keras.backend.greater(scores, score_threshold))
if nms:
filtered_boxes = backend.gather_nd(boxes, indices)
filtered_scores = keras.backend.gather(scores, indices)[:, 0]
nms_indices = backend.non_max_suppression(filtered_boxes, filtered_scores, max_output_size=max_detections, iou_threshold=nms_threshold)
indices = keras.backend.gather(indices, nms_indices)
labels = backend.gather_nd(labels, indices)
indices = keras.backend.stack([indices[:, 0], labels], axis=1)
return indices
if class_specific_filter:
all_indices = []
for c in range(int(classification.shape[1])):
scores = classification[:, c]
labels = c * backend.ones((keras.backend.shape(scores)[0],), dtype='int64')
all_indices.append(_filter_detections(scores, labels))
indices = keras.backend.concatenate(all_indices, axis=0)
else:
scores = keras.backend.max(classification, axis = 1)
labels = keras.backend.argmax(classification, axis = 1)
indices = _filter_detections(scores, labels)
scores = backend.gather_nd(classification, indices)
labels = indices[:, 1]
scores, top_indices = backend.top_k(scores, k=keras.backend.minimum(max_detections, keras.backend.shape(scores)[0]))
indices = keras.backend.gather(indices[:, 0], top_indices)
boxes = keras.backend.gather(boxes, indices)
labels = keras.backend.gather(labels, top_indices)
other_ = [keras.backend.gather(o, indices) for o in other]
pad_size = keras.backend.maximum(0, max_detections - keras.backend.shape(scores)[0])
boxes = backend.pad(boxes, [[0, pad_size], [0, 0]], constant_values=-1)
scores = backend.pad(scores, [[0, pad_size]], constant_values=-1)
labels = backend.pad(labels, [[0, pad_size]], constant_values=-1)
labels = keras.backend.cast(labels, 'int32')
other_ = [backend.pad(o, [[0, pad_size]] + [[0, 0] for _ in range(1, len(o.shape))], constant_values=-1) for o in other_]
boxes.set_shape([max_detections, 4])
scores.set_shape([max_detections])
labels.set_shape([max_detections])
for o, s in zip(other_, [list(keras.backend.int_shape(o)) for o in other]):
o.set_shape([max_detections] + s[1:])
return [boxes, scores, labels] + other_
class FilterDetections(keras.layers.Layer):
def __init__(
self,
nms = True,
class_specific_filter = True,
nms_threshold = 0.5,
score_threshold = 0.05,
max_detections = 300,
parallel_iterations = 32,
**kwargs
):
self.nms = nms
self.class_specific_filter = class_specific_filter
self.nms_threshold = nms_threshold
self.score_threshold = score_threshold
self.max_detections = max_detections
self.parallel_iterations = parallel_iterations
super(FilterDetections, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
boxes = inputs[0]
classification = inputs[1]
other = inputs[2:]
def _filter_detections(args):
boxes = args[0]
classification = args[1]
other = args[2]
return filter_detections(
boxes,
classification,
other,
nms = self.nms,
class_specific_filter = self.class_specific_filter,
score_threshold = self.score_threshold,
max_detections = self.max_detections,
nms_threshold = self.nms_threshold,
)
outputs = backend.map_fn(
_filter_detections,
elems=[boxes, classification, other],
dtype=[keras.backend.floatx(), keras.backend.floatx(), 'int32'] + [o.dtype for o in other],
parallel_iterations=self.parallel_iterations
)
return outputs
def compute_output_shape(self, input_shape):
return [
(input_shape[0][0], self.max_detections, 4),
(input_shape[1][0], self.max_detections),
(input_shape[1][0], self.max_detections),
] + [
tuple([input_shape[i][0], self.max_detections] + list(input_shape[i][2:])) for i in range(2, len(input_shape))
]
def compute_mask(self, inputs, mask=None):
return (len(inputs) + 1) * [None]
def get_config(self):
config = super(FilterDetections, self).get_config()
config.update({
'nms' : self.nms,
'class_specific_filter' : self.class_specific_filter,
'nms_threshold' : self.nms_threshold,
'score_threshold' : self.score_threshold,
'max_detections' : self.max_detections,
'parallel_iterations' : self.parallel_iterations,
})
return config
| true | true |
f73e92de89b4a24afe0aadf707ce613fc081680c | 469 | py | Python | src/accounts/migrations/0002_auto_20200616_1337.py | albertonl/langsbay | cab14fe63c28dfe22e0a2ab7ca0e3c45b7d8dd25 | [
"MIT"
] | 3 | 2020-07-08T14:36:30.000Z | 2021-01-14T14:42:14.000Z | src/accounts/migrations/0002_auto_20200616_1337.py | albertonl/langsbay | cab14fe63c28dfe22e0a2ab7ca0e3c45b7d8dd25 | [
"MIT"
] | 7 | 2021-04-08T21:21:27.000Z | 2022-01-13T02:58:58.000Z | src/accounts/migrations/0002_auto_20200616_1337.py | albertonl/langsbay | cab14fe63c28dfe22e0a2ab7ca0e3c45b7d8dd25 | [
"MIT"
] | 1 | 2020-07-08T14:36:34.000Z | 2020-07-08T14:36:34.000Z | # Generated by Django 3.0.7 on 2020-06-16 13:37
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('learning', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='UserSettings',
new_name='UserSetting',
),
]
| 22.333333 | 66 | 0.635394 |
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('learning', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='UserSettings',
new_name='UserSetting',
),
]
| true | true |
f73e945accc54a2ccd7f0c6efb911e6060b822ee | 2,451 | py | Python | tools/bpflist.py | jerr/bcc | f3fc87aab83ce3e4f1ca227e33853df21147255a | [
"Apache-2.0"
] | 4 | 2018-01-29T13:38:50.000Z | 2021-06-30T07:28:47.000Z | tools/bpflist.py | jerr/bcc | f3fc87aab83ce3e4f1ca227e33853df21147255a | [
"Apache-2.0"
] | 13 | 2018-02-09T22:24:29.000Z | 2018-06-18T22:33:29.000Z | tools/bpflist.py | jerr/bcc | f3fc87aab83ce3e4f1ca227e33853df21147255a | [
"Apache-2.0"
] | 5 | 2018-01-31T05:04:19.000Z | 2018-06-12T00:45:21.000Z | #!/usr/bin/python
#
# bpflist Display processes currently using BPF programs and maps,
# pinned BPF programs and maps, and enabled probes.
#
# USAGE: bpflist [-v]
#
# Idea by Brendan Gregg.
#
# Copyright 2017, Sasha Goldshtein
# Licensed under the Apache License, Version 2.0
#
# 09-Mar-2017 Sasha Goldshtein Created this.
from bcc import BPF, USDT
import argparse
import re
import os
import subprocess
examples = """examples:
bpflist # display all processes currently using BPF
bpflist -v # also count kprobes/uprobes
bpflist -vv # display kprobes/uprobes and count them
"""
parser = argparse.ArgumentParser(
description="Display processes currently using BPF programs and maps",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="count and display kprobes/uprobes as well")
args = parser.parse_args()
def comm_for_pid(pid):
try:
return open("/proc/%d/comm" % pid).read().strip()
except:
return "[unknown]"
counts = {}
def parse_probes(typ):
if args.verbosity > 1:
print("open %ss:" % typ)
for probe in open("/sys/kernel/debug/tracing/%s_events" % typ):
# Probes opened by bcc have a specific pattern that includes the pid
# of the requesting process.
match = re.search('_bcc_(\\d+)\\s', probe)
if match:
pid = int(match.group(1))
counts[(pid, typ)] = counts.get((pid, typ), 0) + 1
if args.verbosity > 1:
print(probe.strip())
if args.verbosity > 1:
print("")
if args.verbosity > 0:
parse_probes("kprobe")
parse_probes("uprobe")
def find_bpf_fds(pid):
root = '/proc/%d/fd' % pid
for fd in os.listdir(root):
try:
link = os.readlink(os.path.join(root, fd))
except OSError:
continue
match = re.match('.*bpf-(\\w+)', link)
if match:
tup = (pid, match.group(1))
counts[tup] = counts.get(tup, 0) + 1
for pdir in os.listdir('/proc'):
if re.match('\\d+', pdir):
try:
find_bpf_fds(int(pdir))
except OSError:
continue
print("%-6s %-16s %-8s %s" % ("PID", "COMM", "TYPE", "COUNT"))
for (pid, typ), count in sorted(counts.items(), key=lambda t: t[0][0]):
comm = comm_for_pid(pid)
print("%-6d %-16s %-8s %-4d" % (pid, comm, typ, count))
| 29.53012 | 76 | 0.611587 |
from bcc import BPF, USDT
import argparse
import re
import os
import subprocess
examples = """examples:
bpflist # display all processes currently using BPF
bpflist -v # also count kprobes/uprobes
bpflist -vv # display kprobes/uprobes and count them
"""
parser = argparse.ArgumentParser(
description="Display processes currently using BPF programs and maps",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="count and display kprobes/uprobes as well")
args = parser.parse_args()
def comm_for_pid(pid):
try:
return open("/proc/%d/comm" % pid).read().strip()
except:
return "[unknown]"
counts = {}
def parse_probes(typ):
if args.verbosity > 1:
print("open %ss:" % typ)
for probe in open("/sys/kernel/debug/tracing/%s_events" % typ):
match = re.search('_bcc_(\\d+)\\s', probe)
if match:
pid = int(match.group(1))
counts[(pid, typ)] = counts.get((pid, typ), 0) + 1
if args.verbosity > 1:
print(probe.strip())
if args.verbosity > 1:
print("")
if args.verbosity > 0:
parse_probes("kprobe")
parse_probes("uprobe")
def find_bpf_fds(pid):
root = '/proc/%d/fd' % pid
for fd in os.listdir(root):
try:
link = os.readlink(os.path.join(root, fd))
except OSError:
continue
match = re.match('.*bpf-(\\w+)', link)
if match:
tup = (pid, match.group(1))
counts[tup] = counts.get(tup, 0) + 1
for pdir in os.listdir('/proc'):
if re.match('\\d+', pdir):
try:
find_bpf_fds(int(pdir))
except OSError:
continue
print("%-6s %-16s %-8s %s" % ("PID", "COMM", "TYPE", "COUNT"))
for (pid, typ), count in sorted(counts.items(), key=lambda t: t[0][0]):
comm = comm_for_pid(pid)
print("%-6d %-16s %-8s %-4d" % (pid, comm, typ, count))
| true | true |
f73e94b6a87aba0ff9b566e6233ef2caa1fcb323 | 645 | py | Python | backend/manage.py | crowdbotics-apps/beech-finance-holdi-30302 | f12c5cfdffa6b26654411228501b1333edd423c3 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/manage.py | crowdbotics-apps/beech-finance-holdi-30302 | f12c5cfdffa6b26654411228501b1333edd423c3 | [
"FTL",
"AML",
"RSA-MD"
] | 16 | 2021-09-13T20:33:58.000Z | 2022-01-23T17:00:14.000Z | backend/manage.py | crowdbotics-apps/beech-finance-holdi-30302 | f12c5cfdffa6b26654411228501b1333edd423c3 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'beech_finance_holdi_30302.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.318182 | 89 | 0.691473 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'beech_finance_holdi_30302.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
f73e95393cbe0169f2f513fc304f6501adb0d684 | 4,348 | py | Python | synapse/util/distributor.py | jdreichmann/synapse | 6fde6aa9c02d35e0a908437ea49b275df9b58427 | [
"Apache-2.0"
] | 1 | 2021-04-27T19:04:56.000Z | 2021-04-27T19:04:56.000Z | synapse/util/distributor.py | jdreichmann/synapse | 6fde6aa9c02d35e0a908437ea49b275df9b58427 | [
"Apache-2.0"
] | null | null | null | synapse/util/distributor.py | jdreichmann/synapse | 6fde6aa9c02d35e0a908437ea49b275df9b58427 | [
"Apache-2.0"
] | 1 | 2020-09-23T12:36:11.000Z | 2020-09-23T12:36:11.000Z | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
from twisted.internet import defer
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process
logger = logging.getLogger(__name__)
def user_left_room(distributor, user, room_id):
distributor.fire("user_left_room", user=user, room_id=room_id)
class Distributor:
"""A central dispatch point for loosely-connected pieces of code to
register, observe, and fire signals.
Signals are named simply by strings.
TODO(paul): It would be nice to give signals stronger object identities,
so we can attach metadata, docstrings, detect typos, etc... But this
model will do for today.
"""
def __init__(self):
self.signals = {}
self.pre_registration = {}
def declare(self, name):
if name in self.signals:
raise KeyError("%r already has a signal named %s" % (self, name))
self.signals[name] = Signal(name)
if name in self.pre_registration:
signal = self.signals[name]
for observer in self.pre_registration[name]:
signal.observe(observer)
def observe(self, name, observer):
if name in self.signals:
self.signals[name].observe(observer)
else:
# TODO: Avoid strong ordering dependency by allowing people to
# pre-register observations on signals that don't exist yet.
if name not in self.pre_registration:
self.pre_registration[name] = []
self.pre_registration[name].append(observer)
def fire(self, name, *args, **kwargs):
"""Dispatches the given signal to the registered observers.
Runs the observers as a background process. Does not return a deferred.
"""
if name not in self.signals:
raise KeyError("%r does not have a signal named %s" % (self, name))
run_as_background_process(name, self.signals[name].fire, *args, **kwargs)
class Signal:
"""A Signal is a dispatch point that stores a list of callables as
observers of it.
Signals can be "fired", meaning that every callable observing it is
invoked. Firing a signal does not change its state; it can be fired again
at any later point. Firing a signal passes any arguments from the fire
method into all of the observers.
"""
def __init__(self, name):
self.name = name
self.observers = []
def observe(self, observer):
"""Adds a new callable to the observer list which will be invoked by
the 'fire' method.
Each observer callable may return a Deferred."""
self.observers.append(observer)
def fire(self, *args, **kwargs):
"""Invokes every callable in the observer list, passing in the args and
kwargs. Exceptions thrown by observers are logged but ignored. It is
not an error to fire a signal with no observers.
Returns a Deferred that will complete when all the observers have
completed."""
async def do(observer):
try:
result = observer(*args, **kwargs)
if inspect.isawaitable(result):
result = await result
return result
except Exception as e:
logger.warning(
"%s signal observer %s failed: %r", self.name, observer, e,
)
deferreds = [run_in_background(do, o) for o in self.observers]
return make_deferred_yieldable(
defer.gatherResults(deferreds, consumeErrors=True)
)
def __repr__(self):
return "<Signal name=%r>" % (self.name,)
| 34.784 | 81 | 0.657544 |
import inspect
import logging
from twisted.internet import defer
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process
logger = logging.getLogger(__name__)
def user_left_room(distributor, user, room_id):
distributor.fire("user_left_room", user=user, room_id=room_id)
class Distributor:
def __init__(self):
self.signals = {}
self.pre_registration = {}
def declare(self, name):
if name in self.signals:
raise KeyError("%r already has a signal named %s" % (self, name))
self.signals[name] = Signal(name)
if name in self.pre_registration:
signal = self.signals[name]
for observer in self.pre_registration[name]:
signal.observe(observer)
def observe(self, name, observer):
if name in self.signals:
self.signals[name].observe(observer)
else:
if name not in self.pre_registration:
self.pre_registration[name] = []
self.pre_registration[name].append(observer)
def fire(self, name, *args, **kwargs):
if name not in self.signals:
raise KeyError("%r does not have a signal named %s" % (self, name))
run_as_background_process(name, self.signals[name].fire, *args, **kwargs)
class Signal:
def __init__(self, name):
self.name = name
self.observers = []
def observe(self, observer):
self.observers.append(observer)
def fire(self, *args, **kwargs):
async def do(observer):
try:
result = observer(*args, **kwargs)
if inspect.isawaitable(result):
result = await result
return result
except Exception as e:
logger.warning(
"%s signal observer %s failed: %r", self.name, observer, e,
)
deferreds = [run_in_background(do, o) for o in self.observers]
return make_deferred_yieldable(
defer.gatherResults(deferreds, consumeErrors=True)
)
def __repr__(self):
return "<Signal name=%r>" % (self.name,)
| true | true |
f73e9558ca4bf2989dda025f02d1bd2aea28a192 | 33 | py | Python | django_sourcebook/utils/__init__.py | maxblee/django_sourcebook | f90ca62cfe43c875a485f783ca1a06be40d9bbc5 | [
"MIT"
] | null | null | null | django_sourcebook/utils/__init__.py | maxblee/django_sourcebook | f90ca62cfe43c875a485f783ca1a06be40d9bbc5 | [
"MIT"
] | null | null | null | django_sourcebook/utils/__init__.py | maxblee/django_sourcebook | f90ca62cfe43c875a485f783ca1a06be40d9bbc5 | [
"MIT"
] | null | null | null | __all__ = ["auth", "validators"]
| 16.5 | 32 | 0.636364 | __all__ = ["auth", "validators"]
| true | true |
f73e96c4374ab3916b094067064a1150adadbc8b | 688 | py | Python | test/python/parametrized_test_case.py | vsmid/openapi | 75c70e36707f7842fcfcf9c5b5765fbe96dc6caf | [
"MIT"
] | null | null | null | test/python/parametrized_test_case.py | vsmid/openapi | 75c70e36707f7842fcfcf9c5b5765fbe96dc6caf | [
"MIT"
] | 3 | 2021-12-21T08:18:48.000Z | 2022-03-24T10:50:37.000Z | test/python/parametrized_test_case.py | vsmid/openapi | 75c70e36707f7842fcfcf9c5b5765fbe96dc6caf | [
"MIT"
] | 5 | 2021-12-11T06:10:14.000Z | 2022-03-18T11:05:24.000Z | # Manticore Search Client
# Copyright (c) 2020-2021, Manticore Software LTD (https://manticoresearch.com)
#
# All rights reserved
import unittest
class ParametrizedTestCase(unittest.TestCase):
def __init__(self, methodName='runTest', settings=None):
super(ParametrizedTestCase, self).__init__(methodName)
self.settings = settings
@staticmethod
def parametrize(testcase_class, settings=None):
testloader = unittest.TestLoader()
testnames = testloader.getTestCaseNames(testcase_class)
suite = unittest.TestSuite()
for name in testnames:
suite.addTest(testcase_class(name, settings=settings))
return suite
| 31.272727 | 79 | 0.713663 |
import unittest
class ParametrizedTestCase(unittest.TestCase):
def __init__(self, methodName='runTest', settings=None):
super(ParametrizedTestCase, self).__init__(methodName)
self.settings = settings
@staticmethod
def parametrize(testcase_class, settings=None):
testloader = unittest.TestLoader()
testnames = testloader.getTestCaseNames(testcase_class)
suite = unittest.TestSuite()
for name in testnames:
suite.addTest(testcase_class(name, settings=settings))
return suite
| true | true |
f73e97c1f2b58ffa20dbfdf4109bf3cb964e706a | 1,742 | py | Python | test/utils/jmx/broker/remote_jmx_queue.py | julianghionoiu/tdl-client-python | 93a2e4d59e2295c4af73012dfbb75975e4447da3 | [
"Apache-2.0"
] | null | null | null | test/utils/jmx/broker/remote_jmx_queue.py | julianghionoiu/tdl-client-python | 93a2e4d59e2295c4af73012dfbb75975e4447da3 | [
"Apache-2.0"
] | 7 | 2015-12-09T20:56:28.000Z | 2020-01-27T12:48:17.000Z | test/utils/jmx/broker/remote_jmx_queue.py | julianghionoiu/tdl-client-python | 93a2e4d59e2295c4af73012dfbb75975e4447da3 | [
"Apache-2.0"
] | 1 | 2015-09-03T18:33:05.000Z | 2015-09-03T18:33:05.000Z | import re
class RemoteJmxQueue(object):
def __init__(self, jolokia_session, broker_name, queue_name):
self.name = queue_name
self.jolokia_session = jolokia_session
self.queue_bean = (
"org.apache.activemq:type=Broker,brokerName={},"
"destinationType=Queue,destinationName={}"
).format(broker_name, queue_name)
def get_name(self):
return self.name
def send_text_message(self, request):
operation = {
'type': 'exec',
'mbean': self.queue_bean,
'operation': 'sendTextMessage(java.lang.String)',
'arguments': [request]
}
self.jolokia_session.request(operation)
def get_size(self):
attribute = {
'type': 'read',
'mbean': self.queue_bean,
'attribute': 'QueueSize',
}
return self.jolokia_session.request(attribute)
def get_message_contents(self):
operation = {
'type': 'exec',
'mbean': self.queue_bean,
'operation': 'browse()',
}
result = self.jolokia_session.request(operation)
if 'Text' in result[0]:
return [r['Text'] for r in result]
else:
return [self.bytearray_to_string(r) for r in result]
def bytearray_to_string(self, r):
result = str(bytearray(r['BodyPreview']))
result = re.sub("bytearray\(", "", result)
result = re.sub("\\'\)", "", result)
return re.sub("b\\'", "", result)
def purge(self):
operation = {
'type': 'exec',
'mbean': self.queue_bean,
'operation': 'purge()',
}
self.jolokia_session.request(operation)
| 30.034483 | 65 | 0.552239 | import re
class RemoteJmxQueue(object):
def __init__(self, jolokia_session, broker_name, queue_name):
self.name = queue_name
self.jolokia_session = jolokia_session
self.queue_bean = (
"org.apache.activemq:type=Broker,brokerName={},"
"destinationType=Queue,destinationName={}"
).format(broker_name, queue_name)
def get_name(self):
return self.name
def send_text_message(self, request):
operation = {
'type': 'exec',
'mbean': self.queue_bean,
'operation': 'sendTextMessage(java.lang.String)',
'arguments': [request]
}
self.jolokia_session.request(operation)
def get_size(self):
attribute = {
'type': 'read',
'mbean': self.queue_bean,
'attribute': 'QueueSize',
}
return self.jolokia_session.request(attribute)
def get_message_contents(self):
operation = {
'type': 'exec',
'mbean': self.queue_bean,
'operation': 'browse()',
}
result = self.jolokia_session.request(operation)
if 'Text' in result[0]:
return [r['Text'] for r in result]
else:
return [self.bytearray_to_string(r) for r in result]
def bytearray_to_string(self, r):
result = str(bytearray(r['BodyPreview']))
result = re.sub("bytearray\(", "", result)
result = re.sub("\\'\)", "", result)
return re.sub("b\\'", "", result)
def purge(self):
operation = {
'type': 'exec',
'mbean': self.queue_bean,
'operation': 'purge()',
}
self.jolokia_session.request(operation)
| true | true |
f73e9800771ef90292ddd41d4d60f8d6f4683d4a | 236 | py | Python | aula10/aula10.py | thidoa/Udemy_python3 | b4d6b922b3d8eb2cfeb06ba4b5b449b168c32818 | [
"Apache-2.0"
] | null | null | null | aula10/aula10.py | thidoa/Udemy_python3 | b4d6b922b3d8eb2cfeb06ba4b5b449b168c32818 | [
"Apache-2.0"
] | null | null | null | aula10/aula10.py | thidoa/Udemy_python3 | b4d6b922b3d8eb2cfeb06ba4b5b449b168c32818 | [
"Apache-2.0"
] | null | null | null | numero = input('Digite um numero inteiro: ')
if numero.isdigit():
numero = int(numero)
if numero % 2 == 0:
print('O numero e par!')
else:
print('O numero e impar!')
else:
print('Nao e um numero inteiro') | 23.6 | 44 | 0.584746 | numero = input('Digite um numero inteiro: ')
if numero.isdigit():
numero = int(numero)
if numero % 2 == 0:
print('O numero e par!')
else:
print('O numero e impar!')
else:
print('Nao e um numero inteiro') | true | true |
f73e9874d0e1a0b0a97179e0b4b7f8429b0adeaa | 2,031 | py | Python | websitePythonScripts/getCombinedRecommendation.py | itsmartagonzalez/Hunediam-Prime | 183de7e856dd96d0c412d2f6dd0524560512f56f | [
"MIT"
] | null | null | null | websitePythonScripts/getCombinedRecommendation.py | itsmartagonzalez/Hunediam-Prime | 183de7e856dd96d0c412d2f6dd0524560512f56f | [
"MIT"
] | null | null | null | websitePythonScripts/getCombinedRecommendation.py | itsmartagonzalez/Hunediam-Prime | 183de7e856dd96d0c412d2f6dd0524560512f56f | [
"MIT"
] | 1 | 2022-02-20T15:45:34.000Z | 2022-02-20T15:45:34.000Z | #!/usr/bin/python3
import sys
import sqlite3
import logging
from collections import Counter
from random import shuffle
from getSimilarFromContentBased import getSimilarFromContentBased
from getRecommendationFromSVD import getRecommendationFromSVD
from getInfoFromMovieIDs import getInfoFromMovieIDs
logger = logging.getLogger(__name__)
def getCombinedRecommendation(userId):
logger.debug("in function getCombinedRecommendation: " + str(userId))
contentBased = getSimilarFromContentBased(id=userId, listOfMovieIDs=True)
logger.debug("Content based: "+str(contentBased))
svdBased = []
database = "database/test.db"
# connection to database
databaseConnection = sqlite3.connect(database)
dbSql = databaseConnection.cursor()
isUserInSVD = dbSql.execute('''
SELECT count(userStatistics.id) FROM userStatistics INNER JOIN svdTrainBlock
ON userStatistics.id = svdTrainBlock.id
AND userStatistics.id = ?
ORDER BY svdTrainBlock.test_date DESC LIMIT 1
''', (userId,)).fetchall()[0][0]
if isUserInSVD != 0:
svdBased = getRecommendationFromSVD(userId, listOfMovieIDs=True)
logger.debug("SVD data: "+str(svdBased))
# [] []
result = contentBased.copy()
result.extend(svdBased)
result.sort(key=Counter(result).get, reverse=True)
cnt = Counter(result)
result = [k for k, v in cnt.items() if v > 1]
for curMovie in result:
if curMovie in contentBased: contentBased.remove(curMovie)
if curMovie in svdBased: svdBased.remove(curMovie)
contentBased.extend(svdBased)
shuffle(contentBased)
result.extend(contentBased)
result = getInfoFromMovieIDs(result, dbSql)
databaseConnection.close()
return result
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, filemode='w', filename='logs/getCombinedRecommendation.log', format='%(name)s - %(levelname)s - %(message)s')
if len(sys.argv) == 2:
logger.debug('In main of getCombinedRecommendation.py, argv: %s', sys.argv)
print(getCombinedRecommendation(sys.argv[1]))
sys.stdout.flush()
| 36.267857 | 152 | 0.754308 |
import sys
import sqlite3
import logging
from collections import Counter
from random import shuffle
from getSimilarFromContentBased import getSimilarFromContentBased
from getRecommendationFromSVD import getRecommendationFromSVD
from getInfoFromMovieIDs import getInfoFromMovieIDs
logger = logging.getLogger(__name__)
def getCombinedRecommendation(userId):
logger.debug("in function getCombinedRecommendation: " + str(userId))
contentBased = getSimilarFromContentBased(id=userId, listOfMovieIDs=True)
logger.debug("Content based: "+str(contentBased))
svdBased = []
database = "database/test.db"
databaseConnection = sqlite3.connect(database)
dbSql = databaseConnection.cursor()
isUserInSVD = dbSql.execute('''
SELECT count(userStatistics.id) FROM userStatistics INNER JOIN svdTrainBlock
ON userStatistics.id = svdTrainBlock.id
AND userStatistics.id = ?
ORDER BY svdTrainBlock.test_date DESC LIMIT 1
''', (userId,)).fetchall()[0][0]
if isUserInSVD != 0:
svdBased = getRecommendationFromSVD(userId, listOfMovieIDs=True)
logger.debug("SVD data: "+str(svdBased))
result = contentBased.copy()
result.extend(svdBased)
result.sort(key=Counter(result).get, reverse=True)
cnt = Counter(result)
result = [k for k, v in cnt.items() if v > 1]
for curMovie in result:
if curMovie in contentBased: contentBased.remove(curMovie)
if curMovie in svdBased: svdBased.remove(curMovie)
contentBased.extend(svdBased)
shuffle(contentBased)
result.extend(contentBased)
result = getInfoFromMovieIDs(result, dbSql)
databaseConnection.close()
return result
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, filemode='w', filename='logs/getCombinedRecommendation.log', format='%(name)s - %(levelname)s - %(message)s')
if len(sys.argv) == 2:
logger.debug('In main of getCombinedRecommendation.py, argv: %s', sys.argv)
print(getCombinedRecommendation(sys.argv[1]))
sys.stdout.flush()
| true | true |
f73e99e7e285171a17190c7d2c837c106094bc64 | 19,804 | py | Python | test-component/draw_graph_2.py | cloudcomputinghust/IoT | 5db3f9078be427fa23549add1747a067c2add767 | [
"MIT"
] | 2 | 2017-05-02T06:43:10.000Z | 2017-05-30T11:18:03.000Z | test-component/draw_graph_2.py | cloudcomputinghust/IoT | 5db3f9078be427fa23549add1747a067c2add767 | [
"MIT"
] | 10 | 2016-08-09T13:11:00.000Z | 2016-11-10T12:33:02.000Z | test-component/draw_graph_2.py | cloudcomputinghust/IoT | 5db3f9078be427fa23549add1747a067c2add767 | [
"MIT"
] | 5 | 2016-08-07T17:11:20.000Z | 2016-10-22T08:45:42.000Z | import matplotlib.pyplot as plt
import numpy as np
from influxdb import InfluxDBClient
import time
import datetime
import collections
time_min = '2017-04-03 16:35:00'
time_max = '2017-04-03 22:35:00'
time_min_2 = '2017-04-06 09:30:00'
time_max_2 = '2017-04-06 14:30:00'
# time_min = '2017-03-25 00:00:00'
# time_max = '2017-03-25 11:28:16'
time_grouped = '30s'
time_step = 5
onem2m = ['onem2m-1', 'onem2m-2', 'onem2m-3']
onem2m_naming = {'onem2m-1': '10 messages/m', 'onem2m-2': '20 messages/m', 'onem2m-3': '40 messages/m'}
openhab = ['openhab-1', 'openhab-2', 'openhab-3']
openhab_naming = {'openhab-1': '10 messages/m', 'openhab-2': '20 messages/m', 'openhab-3': '40 messages/m'}
cluster = ['128.199.91.17', '139.59.98.138', '139.59.98.157']
fog_mqtt = ['mqtt']
cloud_mqtt = ['mqtt']
cloud_processing = 'measure-data-rate'
time_range = 'AND time >\'' + time_min + '\' AND time < \'' + time_max + '\' '
fog_namespace = 'kube-system'
cloud_namespace = 'cloud-kube-system'
# sensing_topic = ['onem2m_pf_1/temperature', 'onem2m_pf_6/temperature', 'onem2m_pf_11/temperature',
# 'openhab_pf_1/temperature', 'openhab_pf_6/temperature', 'openhab_pf_11/temperature']
sensing_topic = ['onem2m_pf_1/temperature','onem2m_pf_6/temperature', 'onem2m_pf_11/temperature', 'openhab_pf_1/temperature', 'openhab_pf_6/temperature', 'openhab_pf_11/temperature']
def cpu_cluster_query(_cluster_name):
return 'SELECT sum("value")/20 FROM "cpu/usage_rate" WHERE "type" = \'node\' AND "nodename"=\'' + _cluster_name + '\' AND time >\'' + \
time_min + '\' AND time < \'' + time_max + '\' GROUP BY time(' + str(
time_grouped) + '), "nodename" fill(null);'
def memory_cluster_query(_cluster_name):
return 'SELECT sum("value")*100/(1024*1.95) FROM "memory/usage" WHERE "type" = \'node\' ' +time_range+\
' AND "nodename"=\''+_cluster_name+'\' ' +\
'GROUP BY time('+time_grouped+'), "nodename" fill(null);'
def net_cluster_query(_cluster_name):
return 'SELECT sum("value") FROM "network/tx_rate" WHERE "type" = \'node\' '+\
time_range + ' AND "nodename"=\''+_cluster_name+'\' ' + \
' GROUP BY time('+time_grouped+'), "nodename" fill(null);'
def cpu_query(_pod_name, _namespace):
return 'SELECT sum("value") FROM "cpu/usage_rate" WHERE "type" = \'pod_container\' AND "namespace_name" = \''+_namespace+'\' AND "pod_name" = \'{pod_name}\' AND time >\''.format(
pod_name=_pod_name) + \
time_min + '\' AND time < \'' + time_max + '\' GROUP BY time({time_grouped}), "container_name" fill(null);'.format(
time_grouped=time_grouped)
def _cpu_query(_namespace):
return 'SELECT sum("value")/10 FROM "cpu/usage_rate" WHERE "type" = \'pod_container\' AND "namespace_name" = \''+_namespace+'\' AND time >\'' + \
time_min + '\' AND time < \'' + time_max + '\' GROUP BY time({time_grouped}), "container_name" fill(null);'.format(
time_grouped=time_grouped)
def _mem_query(_namespace):
return 'SELECT sum("value")/(1024*1024) FROM "memory/usage" WHERE "type" = \'pod_container\' AND "namespace_name" = \''+_namespace+'\' AND time >\'' + \
time_min + '\' AND time < \'' + time_max + '\' GROUP BY time({time_grouped}), "container_name" fill(null);'.format(
time_grouped=time_grouped)
def _mem_query_2(_namespace):
return 'SELECT * FROM "memory/usage" WHERE "type" = \'pod_container\' AND "namespace_name" = \''+_namespace+'\' AND "container_name"=\'onem2m-1\' AND time =\'' + \
time_min + '\' ;'.format(
time_grouped=time_grouped)
def _net_query(_namespace, _group_by):
return 'SELECT sum("value")/1024 FROM "network/tx_rate" WHERE "type" = \'pod\' AND "namespace_name" = \''+_namespace+'\' AND time >\'' + \
time_min + '\' AND time < \'' + time_max + '\' GROUP BY time({time_grouped}), "{group_by}" fill(null);'.format(
time_grouped=time_grouped, group_by=_group_by)
def mem_query(_pod_name, _namespace):
return 'SELECT sum("value")/(1024*1024) FROM "memory/usage" WHERE "type" = \'pod_container\' AND "namespace_name" = \''+_namespace+'\' AND "pod_name" = \'{pod_name}\' AND time >\''.format(
pod_name=_pod_name) + \
time_min + '\' AND time < \'' + time_max + '\' GROUP BY time({time_grouped}), "container_name" fill(null);'.format(
time_grouped=time_grouped)
def net_query(_pod_name, _namespace):
return 'SELECT sum("value")/1024 FROM "network/tx_rate" WHERE "type" = \'pod\' AND "namespace_name" = \''+_namespace+'\' AND "pod_name" = \'{pod_name}\' AND time >\''.format(
pod_name=_pod_name) + \
time_min + '\' AND time < \'' + time_max + '\' GROUP BY time({time_grouped}) fill(null);'.format(
time_grouped=time_grouped)
def data_rate_query():
return 'SELECT sum("num_of_message") FROM "data_collect_rate" WHERE time >\'' + time_min + '\' AND time < \'' + time_max + '\' GROUP BY time({time_grouped});'.format(
time_grouped=time_grouped)
def data_sensing_query():
return 'SELECT mean("value") FROM "data_collect_rate" WHERE time >\'' + time_min_2 + '\' AND time < \'' + time_max_2 + '\' GROUP BY time({time_grouped}), "topic_id" fill(null);'.format(
time_grouped=time_grouped)
def data_deplay_query(select_field):
return 'SELECT mean("'+select_field+'") FROM "data_collect_rate" WHERE time >\'' + time_min_2 + '\' AND time < \'' + time_max_2 + '\' GROUP BY "num_of_sensor" fill(null);'
# def query_metric(_query):
# result = client.query(_query)
# x_val = list()
# y_val = list()
# for k, v in result.items():
# _list = list(v)
# _time_start = time.mktime(datetime.datetime.strptime(_list[0]['time'], "%Y-%m-%dT%H:%M:%SZ").timetuple())
# for item in _list:
# val = 0
# if len(y_val) > 0:
# val = y_val[len(y_val) - 1]
# if item['sum']:
# val = item['sum']
# time_stamp = time.mktime(datetime.datetime.strptime(item['time'], "%Y-%m-%dT%H:%M:%SZ").timetuple())
# x_val.append((time_stamp - _time_start) / 60)
# y_val.append(val)
# break
# time.sleep(2)
# return {'x': x_val, 'y': y_val}
def query_metric(_query, _group_by=None, _aggre_metric=None):
if (not _group_by) and (not _aggre_metric):
result = client.query(_query)
x_val = list()
y_val = list()
for k, v in result.items():
_list = list(v)
_time_start = time.mktime(datetime.datetime.strptime(_list[0]['time'], "%Y-%m-%dT%H:%M:%SZ").timetuple())
for item in _list:
# val = 0
# if len(y_val) > 0:
# val = y_val[len(y_val) - 1]
val = None
if item['sum']:
val = item['sum']
time_stamp = time.mktime(datetime.datetime.strptime(item['time'], "%Y-%m-%dT%H:%M:%SZ").timetuple())
x_val.append((time_stamp - _time_start) / 60)
y_val.append(val)
break
time.sleep(2)
return {'x': x_val, 'y': y_val}
result = client.query(_query)
lines = dict()
for k, v in result.items():
_list = list(v)
_time_start = time.mktime(datetime.datetime.strptime(_list[0]['time'], "%Y-%m-%dT%H:%M:%SZ").timetuple())
for item in _list:
# val = 0
val = None
if item[_aggre_metric]:
val = item[_aggre_metric]
time_stamp = time.mktime(datetime.datetime.strptime(item['time'], "%Y-%m-%dT%H:%M:%SZ").timetuple())
if not lines.get(k[1][_group_by]):
lines[k[1][_group_by]] = {'x': list(), 'y': list()}
lines.get(k[1][_group_by]).get('x').append((time_stamp - _time_start) / 60)
lines.get(k[1][_group_by]).get('y').append(val)
time.sleep(2)
return lines
def mean_values(values, field_1='x', field_2='y'):
result = []
result_2 = []
min_len = len(values[0][field_2])
if len(values[0][field_1]) > len(values[1][field_1]):
min_len = len(values[1][field_2])
if min_len > len(values[2][field_2]):
min_len = len(values[2][field_2])
for index in range(0, min_len):
if values[0][field_2][index] and values[1][field_2][index] and values[2][field_2][index]:
result.append((values[0][field_2][index] + values[1][field_2][index] + values[2][field_2][index]) / 3)
else:
result.append(None)
result_2.append(values[0][field_1][index])
return {field_1: result_2, field_2: result}
def gen_plot_by_row(plt, data, y_index,num_col, num_row, row_label, titles, line_type, marker=None, scale=False):
# num_of_col = len(data)
x_index = 0
for item in data:
if x_index == 0:
gen_plot(plt=plt, data=item, index=(x_index+y_index*num_col+1), line_type=line_type, y_label=row_label,
title=titles[x_index], num_col=num_col, nul_row=num_row, marker=marker, scale=scale)
else:
gen_plot(plt=plt, data=item, index=(x_index + y_index * num_col + 1), line_type=line_type,
title=titles[x_index], num_col=num_col, nul_row=num_row, marker=marker, scale=scale)
x_index += 1
def gen_plot(plt, data, index, line_type, num_col, nul_row,y_label=None, x_label='time(s)', title=None, marker=None, scale=False):
plt.subplot(int('{}{}{}'.format(nul_row, num_col, index)))
if isinstance(data, list):
for line in data:
plt.plot(line['x'], line['y'])
elif isinstance(data, dict):
if data.get('x', 0) == 0:
count = 0
temp = dict()
keys = data.keys()
sorted(keys)
for k in keys:
temp[k] = data[k]
for _key_group, _values in temp.items():
series1 = np.array(_values['y']).astype(np.double)
s1mask = np.isfinite(series1)
series = np.array(_values['x'])
if len(data) > 3:
# plt.plot(series[s1mask], series1[s1mask], marker=marker[count], linewidth=1)
plt.plot(series[s1mask], series1[s1mask], linewidth=2, linestyle = line_type[count])
else:
plt.plot(series[s1mask], series1[s1mask], linewidth=1)
if scale:
plt.yscale('log')
count += 1
# plt.plot(_values['x'], _values['y'])
# plt.legend(data.keys(), ncol=int(len(data.keys())/3), loc='upper left')
plt.legend(data.keys(), ncol=int(len(data.keys())/3), loc='upper right', columnspacing=1.5, labelspacing=0.0,
handletextpad=0.0, handlelength=1.0, fontsize='small')
else:
plt.plot(data['x'], data['y'], line_type[0])
if y_label:
plt.ylabel(y_label)
if x_label:
plt.xlabel(x_label)
plt.title(title)
plt.grid(True)
plt.xticks(np.arange(0, 360 + 1, 30.0))
# plt.xticks(np.arange(0, 120 + 1, 10.0))
def draw_graps(data=dict()):
line_type = ['-', '-.', '--', ':', '-.', '--']
marker = ['.', 'o', 'v', 'x', '+', '<', '*']
# plot with various axes scales
plt.figure(1)
# cpu
# col_1 = {onem2m_naming[k]: data['fog']['cpu'][k] for k in onem2m}
# # col_1['mean'] = mean_values(list(col_1.values()))
# col_2 = {openhab_naming[k]: data['fog']['cpu'][k] for k in openhab}
# # col_2['mean'] = mean_values(list(col_2.values()))
# col_3 = {k: data['fog']['cpu'][k] for k in fog_mqtt}
# rows = [col_1, col_2, col_3]
# titles = ['ONEM2M CPU USAGE', 'OPENHAB CPU USAGE', 'MQTT CPU USAGE']
# gen_plot_by_row(plt=plt, data=rows, y_index=0, row_label='cpu_usage(%)', titles=titles, num_col=len(data['fog']), num_row=3,
# line_type=line_type)
#
# col_1 = {onem2m_naming[k]: data['fog']['memory'][k] for k in onem2m}
# # col_1['mean'] = mean_values(list(col_1.values()))
# col_2 = {openhab_naming[k]: data['fog']['memory'][k] for k in openhab}
# # col_2['mean'] = mean_values(list(col_2.values()))
# col_3 = {k: data['fog']['memory'][k] for k in fog_mqtt}
# rows = [col_1, col_2, col_3]
# titles = ['ONEM2M MEM USAGE', 'OPENHAB MEM USAGE', 'MQTT MEM USAGE']
# gen_plot_by_row(plt=plt, data=rows, y_index=1, row_label='memory_usage(MB)', titles=titles, num_col=len(data['fog']), num_row=3,
# line_type=line_type)
#
# col_1 = {onem2m_naming[k]: data['fog']['network'].get('app:{}'.format(k)) for k in onem2m}
# # col_1['mean'] = mean_values(list(col_1.values()))
# col_2 = {openhab_naming[k]: data['fog']['network'].get('app:{}'.format(k)) for k in openhab}
# # col_2['mean'] = mean_values(list(col_2.values()))
# col_3 = {k: data['fog']['network'].get('app:{}'.format(k)) for k in fog_mqtt}
# rows = [col_1, col_2, col_3]
# titles = ['ONEM2M NET USAGE', 'OPENHAB NET USAGE', 'MQTT NET USAGE']
# gen_plot_by_row(plt=plt, data=rows, y_index=2, row_label='network_usage(kBps)', titles=titles, num_col=len(data['fog']), num_row=3,
# line_type=line_type)
# plt.subplots_adjust(top=0.93, bottom=0.07, left=0.05, right=0.96, hspace=0.51,
# wspace=0.19)
# plt.show()
# #
# # ################
# plt.figure(2)
# col_1 = {cloud_processing: data['cloud']['cpu'][cloud_processing]}
# # col_2 = {cloud_mqtt: data['cloud']['cpu'][cloud_mqtt]}
# col_2 = {k: data['cloud']['cpu'][k] for k in cloud_mqtt}
# rows = [col_1, col_2]
# titles = ['DATA_PROCESSING CPU USAGE', 'CLOUD MQTT CPU USAGE']
# gen_plot_by_row(plt=plt, data=rows, y_index=0, row_label='cpu_usage(%)', titles=titles, num_col=2, num_row=3,
# line_type=line_type)
#
# col_1 = {cloud_processing: data['cloud']['memory'][cloud_processing]}
# # col_2 = {cloud_mqtt: data['cloud']['memory'][cloud_mqtt]}
# col_2 = {k: data['cloud']['memory'][k] for k in cloud_mqtt}
# rows = [col_1, col_2]
# # rows = [data['cloud']['memory'][cloud_processing], data['cloud']['memory'][cloud_mqtt]]
# titles = ['DATA_PROCESSING MEM USAGE', 'CLOUD MQTT MEM USAGE']
# gen_plot_by_row(plt=plt, data=rows, y_index=1, row_label='memory_usage(MB)', titles=titles, num_col=2, num_row=3,
# line_type=line_type)
#
# col_1 = {cloud_processing: data['cloud']['network'][cloud_processing]}
# # col_2 = {cloud_mqtt: data['cloud']['network'][cloud_mqtt]}
# col_2 = {k: data['cloud']['network'][k] for k in cloud_mqtt}
# rows = [col_1, col_2]
# # rows = [data['cloud']['network'][cloud_processing], data['cloud']['network'][cloud_mqtt]]
# titles = ['DATA_PROCESSING NET USAGE', 'CLOUD MQTT NET USAGE']
# gen_plot_by_row(plt=plt, data=rows, y_index=2, row_label='network_usage(kBps)', titles=titles, num_col=2, num_row=3,
# line_type=line_type)
# plt.show()
#################
plt.figure(3)
rows = [{k: data['cloud']['sensing_data'][k] for k in sensing_topic}]
titles = ['SENSING DATA']
gen_plot_by_row(plt=plt, data=rows, y_index=0, row_label='Value', titles=titles, num_col=1,
num_row=1,
line_type=line_type, marker=marker)
# show
plt.subplots_adjust(top=0.93, bottom=0.07, left=0.05, right=0.99, hspace=0.85,
wspace=0.19)
plt.show()
return
client = InfluxDBClient('188.166.238.158', 32485, 'root', 'root', 'k8s')
data = dict()
# get metric
pod_names = {'fog': {'onem2m': onem2m, 'openhab': openhab, 'mqtt': fog_mqtt}, 'cloud': {'mqtt': cloud_mqtt, 'processing': cloud_processing}}
namespaces = {'fog': fog_namespace, 'cloud': cloud_namespace}
resource_metrics = {'cpu', 'memory', 'network'}
resource_query = {'cpu': _cpu_query, 'memory': _mem_query, 'network': _net_query}
data['fog'] = dict()
data['cloud'] = dict()
# data['fog']['cpu'] = query_metric(_cpu_query(namespaces['fog']), 'container_name', 'sum')
# data['fog']['memory'] = query_metric(_mem_query(namespaces['fog']), 'container_name', 'sum')
# data['fog']['network'] = query_metric(_net_query(namespaces['fog'], 'labels'), 'labels', 'sum')
# temp = dict(data['fog']['network'])
# for key, value in temp.items():
# for check_key in onem2m:
# if key.find(check_key) >= 0:
# data['fog']['network'][check_key] = value
# continue
# for check_key in openhab:
# if key.find(check_key) >= 0:
# data['fog']['network'][check_key] = value
# continue
# for check_key in fog_mqtt:
# if key.find(check_key) >= 0:
# data['fog']['network'][check_key] = value
# continue
#
# print('query fog done')
# data['cloud']['cpu'] = query_metric(_cpu_query(namespaces['cloud']), 'container_name', 'sum')
# data['cloud']['memory'] = query_metric(_mem_query(namespaces['cloud']), 'container_name', 'sum')
# data['cloud']['network'] = query_metric(_net_query(namespaces['cloud'], 'pod_name'), 'pod_name', 'sum')
# temp = dict(data['cloud']['network'])
# for key, value in temp.items():
# for check_key in cloud_mqtt:
# if key.find(check_key) >= 0:
# data['cloud']['network'][check_key] = value
# continue
# if key.find(cloud_processing) >= 0:
# data['cloud']['network'][cloud_processing] = value
# continue
# data['cloud']['sensing_data'] = query_metric(data_sensing_query(), 'topic_id', 'mean')
# for k,v in data['cloud']['sensing_data'].items():
# print(k)
# print(v)
print('query cloud done')
# draw_graps(data)
# _data = client.query(data_deplay_query('round_trip_3'))
# for k, v in _data.items():
# print(k[1]['num_of_sensor'])
# print(list(v)[0]['mean'])
# print('-----------------------------------------------')
# _data_1 = client.query(data_deplay_query('time_send_cloud'))
# series_1 = {'x': list(), 'y': list()}
# for k, v in _data_1.items():
# # series_1['x'].append(int(k[1]['num_of_sensor']))
# # series_1['y'].append(float(list(v)[0]['mean']))
# print(k[1]['num_of_sensor'])
# print(list(v)[0])
# print(list(v)[0]['mean'])
#
_data_1 = client.query(data_deplay_query('round_trip_1'))
series_1 = {'x': list(), 'y': list()}
for k, v in _data_1.items():
series_1['x'].append(int(k[1]['num_of_sensor']))
series_1['y'].append(float(list(v)[0]['mean']))
# print(k[1]['num_of_sensor'])
# print(list(v)[0]['mean'])
print('-----------------------------------------------')
series_2 = {'x': list(), 'y': list()}
_data_2 = client.query(data_deplay_query('round_trip_2'))
for k, v in _data_2.items():
# print(k[1]['num_of_sensor'])
# print(list(v)[0]['mean'])
series_2['x'].append(int(k[1]['num_of_sensor']))
series_2['y'].append(float(list(v)[0]['mean']+1))
print(series_1)
print(series_2)
width = 1 # the width of the bars: can also be len(x) sequence
p1 = plt.bar(series_1['x'], series_1['y'], width, color='#d62728')
p2 = plt.bar(series_2['x'], series_2['y'], width,
bottom=series_1['y'])
plt.ylabel('Transmission Time (seconds)')
plt.xlabel('Number of sensors per platform (on 5 platforms)')
plt.title('Tranmission time by number of sensor')
plt.xticks(series_1['x'])
# plt.yticks(np.arange(0, 300, 10))
plt.legend((p1[0], p2[0]), ('Sensor - Platform Transmission Time', 'Platform - Cloud Transmission Time'))
# def autolabel(rects):
# """
# Attach a text label above each bar displaying its height
# """
# for rect in rects:
# height = rect.get_height()
# plt.text(rect.get_x() + rect.get_width()/2., 1.05*height,
# '%d' % int(height),
# ha='center', va='bottom')
plt.show()
| 47.37799 | 192 | 0.59387 | import matplotlib.pyplot as plt
import numpy as np
from influxdb import InfluxDBClient
import time
import datetime
import collections
time_min = '2017-04-03 16:35:00'
time_max = '2017-04-03 22:35:00'
time_min_2 = '2017-04-06 09:30:00'
time_max_2 = '2017-04-06 14:30:00'
time_grouped = '30s'
time_step = 5
onem2m = ['onem2m-1', 'onem2m-2', 'onem2m-3']
onem2m_naming = {'onem2m-1': '10 messages/m', 'onem2m-2': '20 messages/m', 'onem2m-3': '40 messages/m'}
openhab = ['openhab-1', 'openhab-2', 'openhab-3']
openhab_naming = {'openhab-1': '10 messages/m', 'openhab-2': '20 messages/m', 'openhab-3': '40 messages/m'}
cluster = ['128.199.91.17', '139.59.98.138', '139.59.98.157']
fog_mqtt = ['mqtt']
cloud_mqtt = ['mqtt']
cloud_processing = 'measure-data-rate'
time_range = 'AND time >\'' + time_min + '\' AND time < \'' + time_max + '\' '
fog_namespace = 'kube-system'
cloud_namespace = 'cloud-kube-system'
sensing_topic = ['onem2m_pf_1/temperature','onem2m_pf_6/temperature', 'onem2m_pf_11/temperature', 'openhab_pf_1/temperature', 'openhab_pf_6/temperature', 'openhab_pf_11/temperature']
def cpu_cluster_query(_cluster_name):
return 'SELECT sum("value")/20 FROM "cpu/usage_rate" WHERE "type" = \'node\' AND "nodename"=\'' + _cluster_name + '\' AND time >\'' + \
time_min + '\' AND time < \'' + time_max + '\' GROUP BY time(' + str(
time_grouped) + '), "nodename" fill(null);'
def memory_cluster_query(_cluster_name):
return 'SELECT sum("value")*100/(1024*1.95) FROM "memory/usage" WHERE "type" = \'node\' ' +time_range+\
' AND "nodename"=\''+_cluster_name+'\' ' +\
'GROUP BY time('+time_grouped+'), "nodename" fill(null);'
def net_cluster_query(_cluster_name):
return 'SELECT sum("value") FROM "network/tx_rate" WHERE "type" = \'node\' '+\
time_range + ' AND "nodename"=\''+_cluster_name+'\' ' + \
' GROUP BY time('+time_grouped+'), "nodename" fill(null);'
def cpu_query(_pod_name, _namespace):
return 'SELECT sum("value") FROM "cpu/usage_rate" WHERE "type" = \'pod_container\' AND "namespace_name" = \''+_namespace+'\' AND "pod_name" = \'{pod_name}\' AND time >\''.format(
pod_name=_pod_name) + \
time_min + '\' AND time < \'' + time_max + '\' GROUP BY time({time_grouped}), "container_name" fill(null);'.format(
time_grouped=time_grouped)
def _cpu_query(_namespace):
return 'SELECT sum("value")/10 FROM "cpu/usage_rate" WHERE "type" = \'pod_container\' AND "namespace_name" = \''+_namespace+'\' AND time >\'' + \
time_min + '\' AND time < \'' + time_max + '\' GROUP BY time({time_grouped}), "container_name" fill(null);'.format(
time_grouped=time_grouped)
def _mem_query(_namespace):
return 'SELECT sum("value")/(1024*1024) FROM "memory/usage" WHERE "type" = \'pod_container\' AND "namespace_name" = \''+_namespace+'\' AND time >\'' + \
time_min + '\' AND time < \'' + time_max + '\' GROUP BY time({time_grouped}), "container_name" fill(null);'.format(
time_grouped=time_grouped)
def _mem_query_2(_namespace):
return 'SELECT * FROM "memory/usage" WHERE "type" = \'pod_container\' AND "namespace_name" = \''+_namespace+'\' AND "container_name"=\'onem2m-1\' AND time =\'' + \
time_min + '\' ;'.format(
time_grouped=time_grouped)
def _net_query(_namespace, _group_by):
return 'SELECT sum("value")/1024 FROM "network/tx_rate" WHERE "type" = \'pod\' AND "namespace_name" = \''+_namespace+'\' AND time >\'' + \
time_min + '\' AND time < \'' + time_max + '\' GROUP BY time({time_grouped}), "{group_by}" fill(null);'.format(
time_grouped=time_grouped, group_by=_group_by)
def mem_query(_pod_name, _namespace):
return 'SELECT sum("value")/(1024*1024) FROM "memory/usage" WHERE "type" = \'pod_container\' AND "namespace_name" = \''+_namespace+'\' AND "pod_name" = \'{pod_name}\' AND time >\''.format(
pod_name=_pod_name) + \
time_min + '\' AND time < \'' + time_max + '\' GROUP BY time({time_grouped}), "container_name" fill(null);'.format(
time_grouped=time_grouped)
def net_query(_pod_name, _namespace):
return 'SELECT sum("value")/1024 FROM "network/tx_rate" WHERE "type" = \'pod\' AND "namespace_name" = \''+_namespace+'\' AND "pod_name" = \'{pod_name}\' AND time >\''.format(
pod_name=_pod_name) + \
time_min + '\' AND time < \'' + time_max + '\' GROUP BY time({time_grouped}) fill(null);'.format(
time_grouped=time_grouped)
def data_rate_query():
return 'SELECT sum("num_of_message") FROM "data_collect_rate" WHERE time >\'' + time_min + '\' AND time < \'' + time_max + '\' GROUP BY time({time_grouped});'.format(
time_grouped=time_grouped)
def data_sensing_query():
return 'SELECT mean("value") FROM "data_collect_rate" WHERE time >\'' + time_min_2 + '\' AND time < \'' + time_max_2 + '\' GROUP BY time({time_grouped}), "topic_id" fill(null);'.format(
time_grouped=time_grouped)
def data_deplay_query(select_field):
return 'SELECT mean("'+select_field+'") FROM "data_collect_rate" WHERE time >\'' + time_min_2 + '\' AND time < \'' + time_max_2 + '\' GROUP BY "num_of_sensor" fill(null);'
def query_metric(_query, _group_by=None, _aggre_metric=None):
if (not _group_by) and (not _aggre_metric):
result = client.query(_query)
x_val = list()
y_val = list()
for k, v in result.items():
_list = list(v)
_time_start = time.mktime(datetime.datetime.strptime(_list[0]['time'], "%Y-%m-%dT%H:%M:%SZ").timetuple())
for item in _list:
val = None
if item['sum']:
val = item['sum']
time_stamp = time.mktime(datetime.datetime.strptime(item['time'], "%Y-%m-%dT%H:%M:%SZ").timetuple())
x_val.append((time_stamp - _time_start) / 60)
y_val.append(val)
break
time.sleep(2)
return {'x': x_val, 'y': y_val}
result = client.query(_query)
lines = dict()
for k, v in result.items():
_list = list(v)
_time_start = time.mktime(datetime.datetime.strptime(_list[0]['time'], "%Y-%m-%dT%H:%M:%SZ").timetuple())
for item in _list:
val = None
if item[_aggre_metric]:
val = item[_aggre_metric]
time_stamp = time.mktime(datetime.datetime.strptime(item['time'], "%Y-%m-%dT%H:%M:%SZ").timetuple())
if not lines.get(k[1][_group_by]):
lines[k[1][_group_by]] = {'x': list(), 'y': list()}
lines.get(k[1][_group_by]).get('x').append((time_stamp - _time_start) / 60)
lines.get(k[1][_group_by]).get('y').append(val)
time.sleep(2)
return lines
def mean_values(values, field_1='x', field_2='y'):
result = []
result_2 = []
min_len = len(values[0][field_2])
if len(values[0][field_1]) > len(values[1][field_1]):
min_len = len(values[1][field_2])
if min_len > len(values[2][field_2]):
min_len = len(values[2][field_2])
for index in range(0, min_len):
if values[0][field_2][index] and values[1][field_2][index] and values[2][field_2][index]:
result.append((values[0][field_2][index] + values[1][field_2][index] + values[2][field_2][index]) / 3)
else:
result.append(None)
result_2.append(values[0][field_1][index])
return {field_1: result_2, field_2: result}
def gen_plot_by_row(plt, data, y_index,num_col, num_row, row_label, titles, line_type, marker=None, scale=False):
x_index = 0
for item in data:
if x_index == 0:
gen_plot(plt=plt, data=item, index=(x_index+y_index*num_col+1), line_type=line_type, y_label=row_label,
title=titles[x_index], num_col=num_col, nul_row=num_row, marker=marker, scale=scale)
else:
gen_plot(plt=plt, data=item, index=(x_index + y_index * num_col + 1), line_type=line_type,
title=titles[x_index], num_col=num_col, nul_row=num_row, marker=marker, scale=scale)
x_index += 1
def gen_plot(plt, data, index, line_type, num_col, nul_row,y_label=None, x_label='time(s)', title=None, marker=None, scale=False):
plt.subplot(int('{}{}{}'.format(nul_row, num_col, index)))
if isinstance(data, list):
for line in data:
plt.plot(line['x'], line['y'])
elif isinstance(data, dict):
if data.get('x', 0) == 0:
count = 0
temp = dict()
keys = data.keys()
sorted(keys)
for k in keys:
temp[k] = data[k]
for _key_group, _values in temp.items():
series1 = np.array(_values['y']).astype(np.double)
s1mask = np.isfinite(series1)
series = np.array(_values['x'])
if len(data) > 3:
plt.plot(series[s1mask], series1[s1mask], linewidth=2, linestyle = line_type[count])
else:
plt.plot(series[s1mask], series1[s1mask], linewidth=1)
if scale:
plt.yscale('log')
count += 1
plt.legend(data.keys(), ncol=int(len(data.keys())/3), loc='upper right', columnspacing=1.5, labelspacing=0.0,
handletextpad=0.0, handlelength=1.0, fontsize='small')
else:
plt.plot(data['x'], data['y'], line_type[0])
if y_label:
plt.ylabel(y_label)
if x_label:
plt.xlabel(x_label)
plt.title(title)
plt.grid(True)
plt.xticks(np.arange(0, 360 + 1, 30.0))
def draw_graps(data=dict()):
line_type = ['-', '-.', '--', ':', '-.', '--']
marker = ['.', 'o', 'v', 'x', '+', '<', '*']
plt.figure(1)
_mqtt, 'processing': cloud_processing}}
namespaces = {'fog': fog_namespace, 'cloud': cloud_namespace}
resource_metrics = {'cpu', 'memory', 'network'}
resource_query = {'cpu': _cpu_query, 'memory': _mem_query, 'network': _net_query}
data['fog'] = dict()
data['cloud'] = dict()
print('query cloud done')
ist()}
for k, v in _data_1.items():
series_1['x'].append(int(k[1]['num_of_sensor']))
series_1['y'].append(float(list(v)[0]['mean']))
print('-----------------------------------------------')
series_2 = {'x': list(), 'y': list()}
_data_2 = client.query(data_deplay_query('round_trip_2'))
for k, v in _data_2.items():
series_2['x'].append(int(k[1]['num_of_sensor']))
series_2['y'].append(float(list(v)[0]['mean']+1))
print(series_1)
print(series_2)
width = 1
p1 = plt.bar(series_1['x'], series_1['y'], width, color='#d62728')
p2 = plt.bar(series_2['x'], series_2['y'], width,
bottom=series_1['y'])
plt.ylabel('Transmission Time (seconds)')
plt.xlabel('Number of sensors per platform (on 5 platforms)')
plt.title('Tranmission time by number of sensor')
plt.xticks(series_1['x'])
plt.legend((p1[0], p2[0]), ('Sensor - Platform Transmission Time', 'Platform - Cloud Transmission Time'))
# Attach a text label above each bar displaying its height
# """
plt.show()
| true | true |
f73e99f6bd947fc8027d455d3d18f2e8bd7b71d7 | 8,095 | py | Python | tests/scripts/thread-cert/Cert_9_2_15_PendingPartition.py | AdityaHPatwardhan/openthread | a201e9d5d0273bb51fa20efc8758be20a725018e | [
"BSD-3-Clause"
] | 2,962 | 2016-05-11T15:06:06.000Z | 2022-03-27T20:06:16.000Z | tests/scripts/thread-cert/Cert_9_2_15_PendingPartition.py | AdityaHPatwardhan/openthread | a201e9d5d0273bb51fa20efc8758be20a725018e | [
"BSD-3-Clause"
] | 5,899 | 2016-05-11T19:21:49.000Z | 2022-03-31T18:17:20.000Z | tests/scripts/thread-cert/Cert_9_2_15_PendingPartition.py | AdityaHPatwardhan/openthread | a201e9d5d0273bb51fa20efc8758be20a725018e | [
"BSD-3-Clause"
] | 1,113 | 2016-05-11T15:37:42.000Z | 2022-03-31T09:37:04.000Z | #!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import thread_cert
from pktverify.consts import MLE_ADVERTISEMENT, MLE_PARENT_REQUEST, MLE_CHILD_ID_RESPONSE, MLE_CHILD_ID_REQUEST, MGMT_ACTIVE_SET_URI, MGMT_ACTIVE_GET_URI, RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, MODE_TLV, TIMEOUT_TLV, VERSION_TLV, TLV_REQUEST_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, ADDRESS_REGISTRATION_TLV
from pktverify.packet_verifier import PacketVerifier
CHANNEL_INIT = 19
PANID_INIT = 0xface
PANID_FINAL = 0xabcd
COMMISSIONER = 1
LEADER = 2
ROUTER1 = 3
ROUTER2 = 4
class Cert_9_2_15_PendingPartition(thread_cert.TestCase):
SUPPORT_NCP = False
TOPOLOGY = {
COMMISSIONER: {
'name': 'COMMISSIONER',
'active_dataset': {
'timestamp': 15,
'panid': PANID_INIT,
'channel': CHANNEL_INIT
},
'mode': 'rdn',
'allowlist': [LEADER]
},
LEADER: {
'name': 'LEADER',
'active_dataset': {
'timestamp': 15,
'panid': PANID_INIT,
'channel': CHANNEL_INIT
},
'mode': 'rdn',
'partition_id': 0xffffffff,
'allowlist': [COMMISSIONER, ROUTER1]
},
ROUTER1: {
'name': 'ROUTER_1',
'active_dataset': {
'timestamp': 15,
'panid': PANID_INIT,
'channel': CHANNEL_INIT
},
'mode': 'rdn',
'allowlist': [LEADER, ROUTER2]
},
ROUTER2: {
'name': 'ROUTER_2',
'active_dataset': {
'timestamp': 15,
'panid': PANID_INIT,
'channel': CHANNEL_INIT
},
'mode': 'rdn',
'allowlist': [ROUTER1]
},
}
def _setUpRouter2(self):
self.nodes[ROUTER2].add_allowlist(self.nodes[ROUTER1].get_addr64())
self.nodes[ROUTER2].enable_allowlist()
self.nodes[ROUTER2].set_router_selection_jitter(1)
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[COMMISSIONER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(3)
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[COMMISSIONER].send_mgmt_pending_set(
pending_timestamp=10,
active_timestamp=70,
delay_timer=600000,
mesh_local='fd00:0db9::',
)
self.simulator.go(5)
self.nodes[ROUTER2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.nodes[ROUTER2].reset()
self._setUpRouter2()
self.simulator.go(100)
self.nodes[COMMISSIONER].send_mgmt_pending_set(
pending_timestamp=20,
active_timestamp=80,
delay_timer=200000,
mesh_local='fd00:0db7::',
panid=PANID_FINAL,
)
self.simulator.go(100)
self.nodes[ROUTER2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.simulator.go(100)
self.assertEqual(self.nodes[COMMISSIONER].get_panid(), PANID_FINAL)
self.assertEqual(self.nodes[LEADER].get_panid(), PANID_FINAL)
self.assertEqual(self.nodes[ROUTER1].get_panid(), PANID_FINAL)
self.assertEqual(self.nodes[ROUTER2].get_panid(), PANID_FINAL)
ipaddrs = self.nodes[ROUTER2].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
break
self.assertTrue(self.nodes[LEADER].ping(ipaddr))
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
LEADER = pv.vars['LEADER']
COMMISSIONER = pv.vars['COMMISSIONER']
ROUTER_1 = pv.vars['ROUTER_1']
ROUTER_2 = pv.vars['ROUTER_2']
_router2_pkts = pkts.filter_wpan_src64(ROUTER_2)
# Step 1: Ensure the topology is formed correctly
# Verify Commissioner, Leader and Router_1 are sending MLE advertisements
pkts.copy().filter_wpan_src64(LEADER).filter_mle_cmd(MLE_ADVERTISEMENT).must_next()
pkts.filter_wpan_dst64(COMMISSIONER).filter_mle_cmd(MLE_CHILD_ID_RESPONSE).must_next()
pkts.copy().filter_wpan_src64(COMMISSIONER).filter_mle_cmd(MLE_ADVERTISEMENT).must_next()
pkts.filter_wpan_dst64(ROUTER_1).filter_mle_cmd(MLE_CHILD_ID_RESPONSE).must_next()
pkts.copy().filter_wpan_src64(ROUTER_1).filter_mle_cmd(MLE_ADVERTISEMENT).must_next()
# Step 5: Router_2 begins attach process by sending a multicast MLE Parent Request
# The first MLE Parent Request sent MUST NOT be sent to all routers and REEDS
_router2_pkts.range(pkts.index).filter_mle_cmd(MLE_PARENT_REQUEST).must_next().must_verify(
lambda p: {MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV} == set(
p.mle.tlv.type) and p.mle.tlv.scan_mask.r == 1 and p.mle.tlv.scan_mask.e == 0)
# Step 7: Router_2 MUST send a MLE Child ID Request to Router_1
_router2_pkts.filter_mle_cmd(MLE_CHILD_ID_REQUEST).must_next().must_verify(lambda p: {
RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, MODE_TLV, TIMEOUT_TLV, VERSION_TLV, TLV_REQUEST_TLV
} < set(p.mle.tlv.type) and ADDRESS_REGISTRATION_TLV not in p.mle.tlv.type)
# Step 14: Router_2 begins attach process by sending a multicast MLE Parent Request
# The first MLE Parent Request sent MUST NOT be sent to all routers and REEDS
_router2_pkts.filter_mle_cmd(MLE_PARENT_REQUEST).must_next().must_verify(
lambda p: {MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV} == set(
p.mle.tlv.type) and p.mle.tlv.scan_mask.r == 1 and p.mle.tlv.scan_mask.e == 0)
# Step 16: Router_2 MUST send a MLE Child ID Request to Router_1
_router2_pkts.filter_mle_cmd(MLE_CHILD_ID_REQUEST).must_next().must_verify(lambda p: {
RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, MODE_TLV, TIMEOUT_TLV, VERSION_TLV, TLV_REQUEST_TLV
} < set(p.mle.tlv.type) and ADDRESS_REGISTRATION_TLV not in p.mle.tlv.type)
if __name__ == '__main__':
unittest.main()
| 40.883838 | 306 | 0.658184 |
import unittest
import thread_cert
from pktverify.consts import MLE_ADVERTISEMENT, MLE_PARENT_REQUEST, MLE_CHILD_ID_RESPONSE, MLE_CHILD_ID_REQUEST, MGMT_ACTIVE_SET_URI, MGMT_ACTIVE_GET_URI, RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, MODE_TLV, TIMEOUT_TLV, VERSION_TLV, TLV_REQUEST_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, ADDRESS_REGISTRATION_TLV
from pktverify.packet_verifier import PacketVerifier
CHANNEL_INIT = 19
PANID_INIT = 0xface
PANID_FINAL = 0xabcd
COMMISSIONER = 1
LEADER = 2
ROUTER1 = 3
ROUTER2 = 4
class Cert_9_2_15_PendingPartition(thread_cert.TestCase):
SUPPORT_NCP = False
TOPOLOGY = {
COMMISSIONER: {
'name': 'COMMISSIONER',
'active_dataset': {
'timestamp': 15,
'panid': PANID_INIT,
'channel': CHANNEL_INIT
},
'mode': 'rdn',
'allowlist': [LEADER]
},
LEADER: {
'name': 'LEADER',
'active_dataset': {
'timestamp': 15,
'panid': PANID_INIT,
'channel': CHANNEL_INIT
},
'mode': 'rdn',
'partition_id': 0xffffffff,
'allowlist': [COMMISSIONER, ROUTER1]
},
ROUTER1: {
'name': 'ROUTER_1',
'active_dataset': {
'timestamp': 15,
'panid': PANID_INIT,
'channel': CHANNEL_INIT
},
'mode': 'rdn',
'allowlist': [LEADER, ROUTER2]
},
ROUTER2: {
'name': 'ROUTER_2',
'active_dataset': {
'timestamp': 15,
'panid': PANID_INIT,
'channel': CHANNEL_INIT
},
'mode': 'rdn',
'allowlist': [ROUTER1]
},
}
def _setUpRouter2(self):
self.nodes[ROUTER2].add_allowlist(self.nodes[ROUTER1].get_addr64())
self.nodes[ROUTER2].enable_allowlist()
self.nodes[ROUTER2].set_router_selection_jitter(1)
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[COMMISSIONER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(3)
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[COMMISSIONER].send_mgmt_pending_set(
pending_timestamp=10,
active_timestamp=70,
delay_timer=600000,
mesh_local='fd00:0db9::',
)
self.simulator.go(5)
self.nodes[ROUTER2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.nodes[ROUTER2].reset()
self._setUpRouter2()
self.simulator.go(100)
self.nodes[COMMISSIONER].send_mgmt_pending_set(
pending_timestamp=20,
active_timestamp=80,
delay_timer=200000,
mesh_local='fd00:0db7::',
panid=PANID_FINAL,
)
self.simulator.go(100)
self.nodes[ROUTER2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.simulator.go(100)
self.assertEqual(self.nodes[COMMISSIONER].get_panid(), PANID_FINAL)
self.assertEqual(self.nodes[LEADER].get_panid(), PANID_FINAL)
self.assertEqual(self.nodes[ROUTER1].get_panid(), PANID_FINAL)
self.assertEqual(self.nodes[ROUTER2].get_panid(), PANID_FINAL)
ipaddrs = self.nodes[ROUTER2].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
break
self.assertTrue(self.nodes[LEADER].ping(ipaddr))
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
LEADER = pv.vars['LEADER']
COMMISSIONER = pv.vars['COMMISSIONER']
ROUTER_1 = pv.vars['ROUTER_1']
ROUTER_2 = pv.vars['ROUTER_2']
_router2_pkts = pkts.filter_wpan_src64(ROUTER_2)
pkts.copy().filter_wpan_src64(LEADER).filter_mle_cmd(MLE_ADVERTISEMENT).must_next()
pkts.filter_wpan_dst64(COMMISSIONER).filter_mle_cmd(MLE_CHILD_ID_RESPONSE).must_next()
pkts.copy().filter_wpan_src64(COMMISSIONER).filter_mle_cmd(MLE_ADVERTISEMENT).must_next()
pkts.filter_wpan_dst64(ROUTER_1).filter_mle_cmd(MLE_CHILD_ID_RESPONSE).must_next()
pkts.copy().filter_wpan_src64(ROUTER_1).filter_mle_cmd(MLE_ADVERTISEMENT).must_next()
_router2_pkts.range(pkts.index).filter_mle_cmd(MLE_PARENT_REQUEST).must_next().must_verify(
lambda p: {MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV} == set(
p.mle.tlv.type) and p.mle.tlv.scan_mask.r == 1 and p.mle.tlv.scan_mask.e == 0)
_router2_pkts.filter_mle_cmd(MLE_CHILD_ID_REQUEST).must_next().must_verify(lambda p: {
RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, MODE_TLV, TIMEOUT_TLV, VERSION_TLV, TLV_REQUEST_TLV
} < set(p.mle.tlv.type) and ADDRESS_REGISTRATION_TLV not in p.mle.tlv.type)
_router2_pkts.filter_mle_cmd(MLE_PARENT_REQUEST).must_next().must_verify(
lambda p: {MODE_TLV, CHALLENGE_TLV, SCAN_MASK_TLV, VERSION_TLV} == set(
p.mle.tlv.type) and p.mle.tlv.scan_mask.r == 1 and p.mle.tlv.scan_mask.e == 0)
_router2_pkts.filter_mle_cmd(MLE_CHILD_ID_REQUEST).must_next().must_verify(lambda p: {
RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, MODE_TLV, TIMEOUT_TLV, VERSION_TLV, TLV_REQUEST_TLV
} < set(p.mle.tlv.type) and ADDRESS_REGISTRATION_TLV not in p.mle.tlv.type)
if __name__ == '__main__':
unittest.main()
| true | true |
f73e9b285a96c3377ffcbc581d68e32068f527dc | 1,410 | py | Python | backend/users/views.py | OkothPius/Arexa | 50c280ce00baf1e6ec8c1db69fbebe2a2b6c01da | [
"MIT"
] | null | null | null | backend/users/views.py | OkothPius/Arexa | 50c280ce00baf1e6ec8c1db69fbebe2a2b6c01da | [
"MIT"
] | 1 | 2021-11-18T20:05:04.000Z | 2021-11-18T20:05:04.000Z | users/views.py | OkothPius/CRS | 75111b82203adbdc3f50b1127b6db12bf3428f97 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from . forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Account created for {username} Log In to Proceed!')
return redirect('login')
else:
form = UserRegisterForm()
context = {'form':form}
return render(request, 'users/register.html', context)
@login_required
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance = request.user)
p_form = ProfileUpdateForm(request.POST, request.FILES, instance = request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account has been Updated!')
return redirect('profile')
else:
u_form = UserUpdateForm(instance = request.user)
p_form = ProfileUpdateForm(instance = request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form
}
return render(request, 'users/profile.html', context)
| 34.390244 | 96 | 0.659574 | from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from . forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Account created for {username} Log In to Proceed!')
return redirect('login')
else:
form = UserRegisterForm()
context = {'form':form}
return render(request, 'users/register.html', context)
@login_required
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance = request.user)
p_form = ProfileUpdateForm(request.POST, request.FILES, instance = request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account has been Updated!')
return redirect('profile')
else:
u_form = UserUpdateForm(instance = request.user)
p_form = ProfileUpdateForm(instance = request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form
}
return render(request, 'users/profile.html', context)
| true | true |
f73e9bc0c3eb2e3e6e845776ae83116a4355c414 | 3,509 | py | Python | dashboard/modules/log/test_log.py | carlos-aguayo/ray | fedbdd5dc6a47aa9cba170816f8c0950193b4fd6 | [
"Apache-2.0"
] | null | null | null | dashboard/modules/log/test_log.py | carlos-aguayo/ray | fedbdd5dc6a47aa9cba170816f8c0950193b4fd6 | [
"Apache-2.0"
] | null | null | null | dashboard/modules/log/test_log.py | carlos-aguayo/ray | fedbdd5dc6a47aa9cba170816f8c0950193b4fd6 | [
"Apache-2.0"
] | null | null | null | import os
import sys
import logging
import requests
import time
import traceback
import html.parser
import urllib.parse
import pytest
import ray
from ray.new_dashboard.tests.conftest import * # noqa
from ray.test_utils import (
format_web_url,
wait_until_server_available,
)
os.environ["RAY_USE_NEW_DASHBOARD"] = "1"
logger = logging.getLogger(__name__)
class LogUrlParser(html.parser.HTMLParser):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._urls = []
def handle_starttag(self, tag, attrs):
if tag == "a":
self._urls.append(dict(attrs)["href"])
def error(self, message):
logger.error(message)
def get_urls(self):
return self._urls
def test_log(ray_start_with_dashboard):
@ray.remote
def write_log(s):
print(s)
test_log_text = "test_log_text"
ray.get(write_log.remote(test_log_text))
assert (wait_until_server_available(ray_start_with_dashboard["webui_url"])
is True)
webui_url = ray_start_with_dashboard["webui_url"]
webui_url = format_web_url(webui_url)
node_id = ray_start_with_dashboard["node_id"]
timeout_seconds = 10
start_time = time.time()
last_ex = None
while True:
time.sleep(1)
try:
response = requests.get(webui_url + "/log_index")
response.raise_for_status()
parser = LogUrlParser()
parser.feed(response.text)
all_nodes_log_urls = parser.get_urls()
assert len(all_nodes_log_urls) == 1
response = requests.get(all_nodes_log_urls[0])
response.raise_for_status()
parser = LogUrlParser()
parser.feed(response.text)
# Search test_log_text from all worker logs.
parsed_url = urllib.parse.urlparse(all_nodes_log_urls[0])
paths = parser.get_urls()
urls = []
for p in paths:
if "worker" in p:
urls.append(parsed_url._replace(path=p).geturl())
for u in urls:
response = requests.get(u)
response.raise_for_status()
if test_log_text in response.text:
break
else:
raise Exception(f"Can't find {test_log_text} from {urls}")
# Test range request.
response = requests.get(
webui_url + "/logs/dashboard.log",
headers={"Range": "bytes=43-51"})
response.raise_for_status()
assert response.text == "Dashboard"
# Test logUrl in node info.
response = requests.get(webui_url + f"/nodes/{node_id}")
response.raise_for_status()
node_info = response.json()
assert node_info["result"] is True
node_info = node_info["data"]["detail"]
assert "logUrl" in node_info
assert node_info["logUrl"] in all_nodes_log_urls
break
except Exception as ex:
last_ex = ex
finally:
if time.time() > start_time + timeout_seconds:
ex_stack = traceback.format_exception(
type(last_ex), last_ex,
last_ex.__traceback__) if last_ex else []
ex_stack = "".join(ex_stack)
raise Exception(f"Timed out while testing, {ex_stack}")
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| 30.513043 | 78 | 0.593046 | import os
import sys
import logging
import requests
import time
import traceback
import html.parser
import urllib.parse
import pytest
import ray
from ray.new_dashboard.tests.conftest import *
from ray.test_utils import (
format_web_url,
wait_until_server_available,
)
os.environ["RAY_USE_NEW_DASHBOARD"] = "1"
logger = logging.getLogger(__name__)
class LogUrlParser(html.parser.HTMLParser):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._urls = []
def handle_starttag(self, tag, attrs):
if tag == "a":
self._urls.append(dict(attrs)["href"])
def error(self, message):
logger.error(message)
def get_urls(self):
return self._urls
def test_log(ray_start_with_dashboard):
@ray.remote
def write_log(s):
print(s)
test_log_text = "test_log_text"
ray.get(write_log.remote(test_log_text))
assert (wait_until_server_available(ray_start_with_dashboard["webui_url"])
is True)
webui_url = ray_start_with_dashboard["webui_url"]
webui_url = format_web_url(webui_url)
node_id = ray_start_with_dashboard["node_id"]
timeout_seconds = 10
start_time = time.time()
last_ex = None
while True:
time.sleep(1)
try:
response = requests.get(webui_url + "/log_index")
response.raise_for_status()
parser = LogUrlParser()
parser.feed(response.text)
all_nodes_log_urls = parser.get_urls()
assert len(all_nodes_log_urls) == 1
response = requests.get(all_nodes_log_urls[0])
response.raise_for_status()
parser = LogUrlParser()
parser.feed(response.text)
parsed_url = urllib.parse.urlparse(all_nodes_log_urls[0])
paths = parser.get_urls()
urls = []
for p in paths:
if "worker" in p:
urls.append(parsed_url._replace(path=p).geturl())
for u in urls:
response = requests.get(u)
response.raise_for_status()
if test_log_text in response.text:
break
else:
raise Exception(f"Can't find {test_log_text} from {urls}")
# Test range request.
response = requests.get(
webui_url + "/logs/dashboard.log",
headers={"Range": "bytes=43-51"})
response.raise_for_status()
assert response.text == "Dashboard"
# Test logUrl in node info.
response = requests.get(webui_url + f"/nodes/{node_id}")
response.raise_for_status()
node_info = response.json()
assert node_info["result"] is True
node_info = node_info["data"]["detail"]
assert "logUrl" in node_info
assert node_info["logUrl"] in all_nodes_log_urls
break
except Exception as ex:
last_ex = ex
finally:
if time.time() > start_time + timeout_seconds:
ex_stack = traceback.format_exception(
type(last_ex), last_ex,
last_ex.__traceback__) if last_ex else []
ex_stack = "".join(ex_stack)
raise Exception(f"Timed out while testing, {ex_stack}")
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| true | true |
f73e9c93ea86cba13818c06c8b68af09a3b326f5 | 1,658 | py | Python | jesse/indicators/gatorosc.py | slipperlobster/flipper | 8482edd77604fcec2ea08913f1748c21be80dac7 | [
"MIT"
] | 3,999 | 2018-11-09T10:38:51.000Z | 2022-03-31T12:29:12.000Z | jesse/indicators/gatorosc.py | slipperlobster/flipper | 8482edd77604fcec2ea08913f1748c21be80dac7 | [
"MIT"
] | 172 | 2020-04-16T16:19:08.000Z | 2022-03-28T13:28:55.000Z | jesse/indicators/gatorosc.py | pmondal08/jesse | 527952a74bc76f76cf3a2d25755386f8db285885 | [
"MIT"
] | 495 | 2019-03-01T21:48:53.000Z | 2022-03-30T15:35:19.000Z | from collections import namedtuple
import numpy as np
import talib
from jesse.helpers import get_candle_source, np_shift
from jesse.helpers import slice_candles
GATOR = namedtuple('GATOR', ['upper', 'lower', 'upper_change', 'lower_change'])
def gatorosc(candles: np.ndarray, source_type: str = "close", sequential: bool = False) -> GATOR:
"""
Gator Oscillator by Bill M. Williams
:param candles: np.ndarray
:param source_type: str - default: "close"
:param sequential: bool - default: False
:return: GATOR(upper, lower, upper_change, lower_change)
"""
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
jaw = np_shift(numpy_ewma(source, 13), 8, fill_value=np.nan)
teeth = np_shift(numpy_ewma(source, 8), 5, fill_value=np.nan)
lips = np_shift(numpy_ewma(source, 5), 3, fill_value=np.nan)
upper = np.abs(jaw - teeth)
lower = -np.abs(teeth - lips)
upper_change = talib.MOM(upper, timeperiod=1)
lower_change = -talib.MOM(lower, timeperiod=1)
if sequential:
return GATOR(upper, lower, upper_change, lower_change)
else:
return GATOR(upper[-1], lower[-1], upper_change[-1], lower_change[-1])
def numpy_ewma(data, window):
"""
:param data:
:param window:
:return:
"""
alpha = 1 / window
# scale = 1 / (1 - alpha)
n = data.shape[0]
scale_arr = (1 - alpha) ** (-1 * np.arange(n))
weights = (1 - alpha) ** np.arange(n)
pw0 = (1 - alpha) ** (n - 1)
mult = data * pw0 * scale_arr
cumsums = mult.cumsum()
return cumsums * scale_arr[::-1] / weights.cumsum()
| 28.101695 | 97 | 0.651387 | from collections import namedtuple
import numpy as np
import talib
from jesse.helpers import get_candle_source, np_shift
from jesse.helpers import slice_candles
GATOR = namedtuple('GATOR', ['upper', 'lower', 'upper_change', 'lower_change'])
def gatorosc(candles: np.ndarray, source_type: str = "close", sequential: bool = False) -> GATOR:
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
jaw = np_shift(numpy_ewma(source, 13), 8, fill_value=np.nan)
teeth = np_shift(numpy_ewma(source, 8), 5, fill_value=np.nan)
lips = np_shift(numpy_ewma(source, 5), 3, fill_value=np.nan)
upper = np.abs(jaw - teeth)
lower = -np.abs(teeth - lips)
upper_change = talib.MOM(upper, timeperiod=1)
lower_change = -talib.MOM(lower, timeperiod=1)
if sequential:
return GATOR(upper, lower, upper_change, lower_change)
else:
return GATOR(upper[-1], lower[-1], upper_change[-1], lower_change[-1])
def numpy_ewma(data, window):
alpha = 1 / window
n = data.shape[0]
scale_arr = (1 - alpha) ** (-1 * np.arange(n))
weights = (1 - alpha) ** np.arange(n)
pw0 = (1 - alpha) ** (n - 1)
mult = data * pw0 * scale_arr
cumsums = mult.cumsum()
return cumsums * scale_arr[::-1] / weights.cumsum()
| true | true |
f73e9da59f7d2905985f41ece3b023016c18acfa | 3,801 | py | Python | epubcreator/misc/settings_store.py | JFTavares/ePubCreator | 56871800e9faff43643c85198f516643cdd5b4fd | [
"Unlicense"
] | 6 | 2015-04-07T23:27:35.000Z | 2022-03-03T20:31:28.000Z | epubcreator/misc/settings_store.py | JFTavares/ePubCreator | 56871800e9faff43643c85198f516643cdd5b4fd | [
"Unlicense"
] | null | null | null | epubcreator/misc/settings_store.py | JFTavares/ePubCreator | 56871800e9faff43643c85198f516643cdd5b4fd | [
"Unlicense"
] | 1 | 2021-04-17T11:36:22.000Z | 2021-04-17T11:36:22.000Z | import os
from PyQt4 import QtCore, QtGui
from epubcreator.epubbase.ebook import Ebook
from epubcreator.converters.converter_factory import ConverterFactory
class SettingsStore(QtCore.QSettings):
"""
Permite guardar y recuperar las diversas opciones de configuración. Expone además
todos los atributos relacionados con las preferencias generales del usuario, que pueden
ser leídos desde cualquier parte de la aplicación. Los atributos de la clase son:
-- Un atributo por cada opción de cada converter. El nombre del atributo resulta
de concatenar el tipo de archivo sobre el que opera el converter, más el nombre
de la opción capitalizando la primer letra. Ejemplo: la opción "ignoreEmptyParagraphs"
del docx converter, se traduce en: "docxIgnoreEmptyParagraphs". Con ese nombre es como
la opción se guarda en disco, y como el consumer debe leer el atributo de la clase.
-- Un atributo por cada opción de la clase Ebook. La diferencia en el nombre del atributo
con el procedimiento descrito arriba radica en que el prefijo de cada atributo
es: "epubOutput".
-- Todas las keys del diccionario _SETTINGS.
"""
_SETTINGS_GROUP = "userPreferences"
# Lista de atributos que SettingsStore expone.
# Key = nombre de atributo.
# Value = valor por defecto.
_SETTINGS = dict(editor="",
sigilPath="",
allowImageProcessing=True)
# Agrego todas las opciones posibles de todos los converters.
_SETTINGS.update({c.FILE_TYPE + o.name[0].upper() + o.name[1:]: o.value for c in ConverterFactory.getAllConverters() for o in c.OPTIONS})
# Agrego todas las opciones posibles de la clase Ebook.
_SETTINGS.update({"epubOutput" + o.name[0].upper() + o.name[1:]: o.value for o in Ebook.OPTIONS})
def getAllSettingsForConverter(self, fileType):
"""
Retorna todas las opciones de un converter dado. Es más que nada un
método que facilita el poder pasarle a un converter todas las opciones guardadas, sin
necesidad de que el consumer tenga que realizar esto:
op1 = settings.op1
op2 = settings.op2
...
@param fileType: un string, que indica de qué converter retornar las opciones.
Ejemplo: "docx", "fb2".
@return: un diccionario.
Key: el nombre la opción.
Value: el valor almacenado de la opción.
"""
return self._getAllSettingsByPrefix(fileType)
def getAllSettingsForEbook(self):
"""
Similar al método getAllSettingsForConverter, pero para las opciones de
la clase Ebook.
"""
return self._getAllSettingsByPrefix("epubOutput")
def __getattr__(self, item):
if item not in SettingsStore._SETTINGS:
raise AttributeError("'{0}' object has no attribute '{1}'".format(self.__class__.__name__, item))
defaultValue = SettingsStore._SETTINGS[item]
return self.value("{0}/{1}".format(SettingsStore._SETTINGS_GROUP, item), defaultValue, type(defaultValue))
def __setattr__(self, key, value):
if key in SettingsStore._SETTINGS:
self.setValue("{0}/{1}".format(SettingsStore._SETTINGS_GROUP, key), value)
else:
object.__setattr__(self, key, value)
def _getAllSettingsByPrefix(self, prefix):
i = len(prefix)
return {s[i].lower() + s[i + 1:]: getattr(self, s) for s in SettingsStore._SETTINGS if s.startswith(prefix)}
def __init__(self):
iniPath = os.path.join(QtGui.QDesktopServices.storageLocation(QtGui.QDesktopServices.DataLocation), "epubcreator.ini")
super().__init__(iniPath, QtCore.QSettings.IniFormat) | 43.193182 | 141 | 0.674033 | import os
from PyQt4 import QtCore, QtGui
from epubcreator.epubbase.ebook import Ebook
from epubcreator.converters.converter_factory import ConverterFactory
class SettingsStore(QtCore.QSettings):
_SETTINGS_GROUP = "userPreferences"
_SETTINGS = dict(editor="",
sigilPath="",
allowImageProcessing=True)
_SETTINGS.update({c.FILE_TYPE + o.name[0].upper() + o.name[1:]: o.value for c in ConverterFactory.getAllConverters() for o in c.OPTIONS})
_SETTINGS.update({"epubOutput" + o.name[0].upper() + o.name[1:]: o.value for o in Ebook.OPTIONS})
def getAllSettingsForConverter(self, fileType):
return self._getAllSettingsByPrefix(fileType)
def getAllSettingsForEbook(self):
return self._getAllSettingsByPrefix("epubOutput")
def __getattr__(self, item):
if item not in SettingsStore._SETTINGS:
raise AttributeError("'{0}' object has no attribute '{1}'".format(self.__class__.__name__, item))
defaultValue = SettingsStore._SETTINGS[item]
return self.value("{0}/{1}".format(SettingsStore._SETTINGS_GROUP, item), defaultValue, type(defaultValue))
def __setattr__(self, key, value):
if key in SettingsStore._SETTINGS:
self.setValue("{0}/{1}".format(SettingsStore._SETTINGS_GROUP, key), value)
else:
object.__setattr__(self, key, value)
def _getAllSettingsByPrefix(self, prefix):
i = len(prefix)
return {s[i].lower() + s[i + 1:]: getattr(self, s) for s in SettingsStore._SETTINGS if s.startswith(prefix)}
def __init__(self):
iniPath = os.path.join(QtGui.QDesktopServices.storageLocation(QtGui.QDesktopServices.DataLocation), "epubcreator.ini")
super().__init__(iniPath, QtCore.QSettings.IniFormat) | true | true |
f73e9e5f34bc40ffc35264b8d90bb26630570c74 | 1,378 | py | Python | distcache/utils.py | ujas09/distcache | a9d8124fd727472b2e79f8146b8bf73d6957a767 | [
"MIT"
] | 1 | 2020-06-28T22:43:23.000Z | 2020-06-28T22:43:23.000Z | distcache/utils.py | ujas09/distcache | a9d8124fd727472b2e79f8146b8bf73d6957a767 | [
"MIT"
] | null | null | null | distcache/utils.py | ujas09/distcache | a9d8124fd727472b2e79f8146b8bf73d6957a767 | [
"MIT"
] | null | null | null | """
Implements network utils like sending and receiving message over socket
"""
import pickle
def send_message(message, client_socket, HEADER_LENGTH, FORMAT):
"""
sends message on the client_socket
"""
message = pickle.dumps(message)
send_length = "{:<{}}".format(len(message), HEADER_LENGTH)
client_socket.send(bytes(send_length, FORMAT))
client_socket.send(message)
def receive_message(client_socket, HEADER_LENGTH, FORMAT):
"""
Receives message on the client_socket
"""
client_socket.settimeout(5)
response = False # In case of no response from cache servers, the response will be False (failed)
while True:
try:
response = client_socket.recv(HEADER_LENGTH)
if not response:
continue
message_length = int(response.decode(FORMAT))
response = client_socket.recv(message_length)
response = pickle.loads(response)
finally:
break
return response
def send_receive_ack(message, client_socket, HEADER_LENGTH, FORMAT):
"""
Sends message on the client_socket.
Receives message on the client_socket
:param message: Any message/object.
:return: response received
"""
send_message(message, client_socket, HEADER_LENGTH, FORMAT)
return receive_message(client_socket, HEADER_LENGTH, FORMAT)
| 30.622222 | 102 | 0.684325 | import pickle
def send_message(message, client_socket, HEADER_LENGTH, FORMAT):
message = pickle.dumps(message)
send_length = "{:<{}}".format(len(message), HEADER_LENGTH)
client_socket.send(bytes(send_length, FORMAT))
client_socket.send(message)
def receive_message(client_socket, HEADER_LENGTH, FORMAT):
client_socket.settimeout(5)
response = False
while True:
try:
response = client_socket.recv(HEADER_LENGTH)
if not response:
continue
message_length = int(response.decode(FORMAT))
response = client_socket.recv(message_length)
response = pickle.loads(response)
finally:
break
return response
def send_receive_ack(message, client_socket, HEADER_LENGTH, FORMAT):
send_message(message, client_socket, HEADER_LENGTH, FORMAT)
return receive_message(client_socket, HEADER_LENGTH, FORMAT)
| true | true |
f73e9fd4d3660b579a96e1ebca56b74f6b47d341 | 24,219 | py | Python | examples/agent_policy.py | legend-of-zyda/LuxPythonEnvGym | 7d818b5943dad1b7fae3c66b612aae93c743bd0e | [
"MIT"
] | 61 | 2021-08-23T00:13:10.000Z | 2022-03-26T13:11:57.000Z | examples/agent_policy.py | legend-of-zyda/LuxPythonEnvGym | 7d818b5943dad1b7fae3c66b612aae93c743bd0e | [
"MIT"
] | 36 | 2021-08-25T03:32:29.000Z | 2021-11-20T05:15:29.000Z | examples/agent_policy.py | legend-of-zyda/LuxPythonEnvGym | 7d818b5943dad1b7fae3c66b612aae93c743bd0e | [
"MIT"
] | 28 | 2021-09-03T22:43:18.000Z | 2022-01-24T14:57:18.000Z | import sys
import time
from functools import partial # pip install functools
import copy
import random
import numpy as np
from gym import spaces
from luxai2021.env.agent import Agent, AgentWithModel
from luxai2021.game.actions import *
from luxai2021.game.game_constants import GAME_CONSTANTS
from luxai2021.game.position import Position
# https://codereview.stackexchange.com/questions/28207/finding-the-closest-point-to-a-list-of-points
def closest_node(node, nodes):
dist_2 = np.sum((nodes - node) ** 2, axis=1)
return np.argmin(dist_2)
def furthest_node(node, nodes):
dist_2 = np.sum((nodes - node) ** 2, axis=1)
return np.argmax(dist_2)
def smart_transfer_to_nearby(game, team, unit_id, unit, target_type_restriction=None, **kwarg):
"""
Smart-transfers from the specified unit to a nearby neighbor. Prioritizes any
nearby carts first, then any worker. Transfers the resource type which the unit
has most of. Picks which cart/worker based on choosing a target that is most-full
but able to take the most amount of resources.
Args:
team ([type]): [description]
unit_id ([type]): [description]
Returns:
Action: Returns a TransferAction object, even if the request is an invalid
transfer. Use TransferAction.is_valid() to check validity.
"""
# Calculate how much resources could at-most be transferred
resource_type = None
resource_amount = 0
target_unit = None
if unit != None:
for type, amount in unit.cargo.items():
if amount > resource_amount:
resource_type = type
resource_amount = amount
# Find the best nearby unit to transfer to
unit_cell = game.map.get_cell_by_pos(unit.pos)
adjacent_cells = game.map.get_adjacent_cells(unit_cell)
for c in adjacent_cells:
for id, u in c.units.items():
# Apply the unit type target restriction
if target_type_restriction == None or u.type == target_type_restriction:
if u.team == team:
# This unit belongs to our team, set it as the winning transfer target
# if it's the best match.
if target_unit is None:
target_unit = u
else:
# Compare this unit to the existing target
if target_unit.type == u.type:
# Transfer to the target with the least capacity, but can accept
# all of our resources
if( u.get_cargo_space_left() >= resource_amount and
target_unit.get_cargo_space_left() >= resource_amount ):
# Both units can accept all our resources. Prioritize one that is most-full.
if u.get_cargo_space_left() < target_unit.get_cargo_space_left():
# This new target it better, it has less space left and can take all our
# resources
target_unit = u
elif( target_unit.get_cargo_space_left() >= resource_amount ):
# Don't change targets. Current one is best since it can take all
# the resources, but new target can't.
pass
elif( u.get_cargo_space_left() > target_unit.get_cargo_space_left() ):
# Change targets, because neither target can accept all our resources and
# this target can take more resources.
target_unit = u
elif u.type == Constants.UNIT_TYPES.CART:
# Transfer to this cart instead of the current worker target
target_unit = u
# Build the transfer action request
target_unit_id = None
if target_unit is not None:
target_unit_id = target_unit.id
# Update the transfer amount based on the room of the target
if target_unit.get_cargo_space_left() < resource_amount:
resource_amount = target_unit.get_cargo_space_left()
return TransferAction(team, unit_id, target_unit_id, resource_type, resource_amount)
########################################################################################################################
# This is the Agent that you need to design for the competition
########################################################################################################################
class AgentPolicy(AgentWithModel):
def __init__(self, mode="train", model=None) -> None:
"""
Arguments:
mode: "train" or "inference", which controls if this agent is for training or not.
model: The pretrained model, or if None it will operate in training mode.
"""
super().__init__(mode, model)
# Define action and observation space
# They must be gym.spaces objects
# Example when using discrete actions:
self.actions_units = [
partial(MoveAction, direction=Constants.DIRECTIONS.CENTER), # This is the do-nothing action
partial(MoveAction, direction=Constants.DIRECTIONS.NORTH),
partial(MoveAction, direction=Constants.DIRECTIONS.WEST),
partial(MoveAction, direction=Constants.DIRECTIONS.SOUTH),
partial(MoveAction, direction=Constants.DIRECTIONS.EAST),
partial(smart_transfer_to_nearby, target_type_restriction=Constants.UNIT_TYPES.CART), # Transfer to nearby cart
partial(smart_transfer_to_nearby, target_type_restriction=Constants.UNIT_TYPES.WORKER), # Transfer to nearby worker
SpawnCityAction,
PillageAction,
]
self.actions_cities = [
SpawnWorkerAction,
SpawnCartAction,
ResearchAction,
]
self.action_space = spaces.Discrete(max(len(self.actions_units), len(self.actions_cities)))
# Observation space: (Basic minimum for a miner agent)
# Object:
# 1x is worker
# 1x is cart
# 1x is citytile
#
# 5x direction_nearest_wood
# 1x distance_nearest_wood
# 1x amount
#
# 5x direction_nearest_coal
# 1x distance_nearest_coal
# 1x amount
#
# 5x direction_nearest_uranium
# 1x distance_nearest_uranium
# 1x amount
#
# 5x direction_nearest_city
# 1x distance_nearest_city
# 1x amount of fuel
#
# 28x (the same as above, but direction, distance, and amount to the furthest of each)
#
# 5x direction_nearest_worker
# 1x distance_nearest_worker
# 1x amount of cargo
# Unit:
# 1x cargo size
# State:
# 1x is night
# 1x percent of game done
# 2x citytile counts [cur player, opponent]
# 2x worker counts [cur player, opponent]
# 2x cart counts [cur player, opponent]
# 1x research points [cur player]
# 1x researched coal [cur player]
# 1x researched uranium [cur player]
self.observation_shape = (3 + 7 * 5 * 2 + 1 + 1 + 1 + 2 + 2 + 2 + 3,)
self.observation_space = spaces.Box(low=0, high=1, shape=
self.observation_shape, dtype=np.float16)
self.object_nodes = {}
def get_agent_type(self):
"""
Returns the type of agent. Use AGENT for inference, and LEARNING for training a model.
"""
if self.mode == "train":
return Constants.AGENT_TYPE.LEARNING
else:
return Constants.AGENT_TYPE.AGENT
def get_observation(self, game, unit, city_tile, team, is_new_turn):
"""
Implements getting a observation from the current game for this unit or city
"""
observation_index = 0
if is_new_turn:
# It's a new turn this event. This flag is set True for only the first observation from each turn.
# Update any per-turn fixed observation space that doesn't change per unit/city controlled.
# Build a list of object nodes by type for quick distance-searches
self.object_nodes = {}
# Add resources
for cell in game.map.resources:
if cell.resource.type not in self.object_nodes:
self.object_nodes[cell.resource.type] = np.array([[cell.pos.x, cell.pos.y]])
else:
self.object_nodes[cell.resource.type] = np.concatenate(
(
self.object_nodes[cell.resource.type],
[[cell.pos.x, cell.pos.y]]
),
axis=0
)
# Add your own and opponent units
for t in [team, (team + 1) % 2]:
for u in game.state["teamStates"][team]["units"].values():
key = str(u.type)
if t != team:
key = str(u.type) + "_opponent"
if key not in self.object_nodes:
self.object_nodes[key] = np.array([[u.pos.x, u.pos.y]])
else:
self.object_nodes[key] = np.concatenate(
(
self.object_nodes[key],
[[u.pos.x, u.pos.y]]
)
, axis=0
)
# Add your own and opponent cities
for city in game.cities.values():
for cells in city.city_cells:
key = "city"
if city.team != team:
key = "city_opponent"
if key not in self.object_nodes:
self.object_nodes[key] = np.array([[cells.pos.x, cells.pos.y]])
else:
self.object_nodes[key] = np.concatenate(
(
self.object_nodes[key],
[[cells.pos.x, cells.pos.y]]
)
, axis=0
)
# Observation space: (Basic minimum for a miner agent)
# Object:
# 1x is worker
# 1x is cart
# 1x is citytile
# 5x direction_nearest_wood
# 1x distance_nearest_wood
# 1x amount
#
# 5x direction_nearest_coal
# 1x distance_nearest_coal
# 1x amount
#
# 5x direction_nearest_uranium
# 1x distance_nearest_uranium
# 1x amount
#
# 5x direction_nearest_city
# 1x distance_nearest_city
# 1x amount of fuel
#
# 5x direction_nearest_worker
# 1x distance_nearest_worker
# 1x amount of cargo
#
# 28x (the same as above, but direction, distance, and amount to the furthest of each)
#
# Unit:
# 1x cargo size
# State:
# 1x is night
# 1x percent of game done
# 2x citytile counts [cur player, opponent]
# 2x worker counts [cur player, opponent]
# 2x cart counts [cur player, opponent]
# 1x research points [cur player]
# 1x researched coal [cur player]
# 1x researched uranium [cur player]
obs = np.zeros(self.observation_shape)
# Update the type of this object
# 1x is worker
# 1x is cart
# 1x is citytile
observation_index = 0
if unit is not None:
if unit.type == Constants.UNIT_TYPES.WORKER:
obs[observation_index] = 1.0 # Worker
else:
obs[observation_index+1] = 1.0 # Cart
if city_tile is not None:
obs[observation_index+2] = 1.0 # CityTile
observation_index += 3
pos = None
if unit is not None:
pos = unit.pos
else:
pos = city_tile.pos
if pos is None:
observation_index += 7 * 5 * 2
else:
# Encode the direction to the nearest objects
# 5x direction_nearest
# 1x distance
for distance_function in [closest_node, furthest_node]:
for key in [
Constants.RESOURCE_TYPES.WOOD,
Constants.RESOURCE_TYPES.COAL,
Constants.RESOURCE_TYPES.URANIUM,
"city",
str(Constants.UNIT_TYPES.WORKER)]:
# Process the direction to and distance to this object type
# Encode the direction to the nearest object (excluding itself)
# 5x direction
# 1x distance
if key in self.object_nodes:
if (
(key == "city" and city_tile is not None) or
(unit is not None and str(unit.type) == key and len(game.map.get_cell_by_pos(unit.pos).units) <= 1 )
):
# Filter out the current unit from the closest-search
closest_index = closest_node((pos.x, pos.y), self.object_nodes[key])
filtered_nodes = np.delete(self.object_nodes[key], closest_index, axis=0)
else:
filtered_nodes = self.object_nodes[key]
if len(filtered_nodes) == 0:
# No other object of this type
obs[observation_index + 5] = 1.0
else:
# There is another object of this type
closest_index = distance_function((pos.x, pos.y), filtered_nodes)
if closest_index is not None and closest_index >= 0:
closest = filtered_nodes[closest_index]
closest_position = Position(closest[0], closest[1])
direction = pos.direction_to(closest_position)
mapping = {
Constants.DIRECTIONS.CENTER: 0,
Constants.DIRECTIONS.NORTH: 1,
Constants.DIRECTIONS.WEST: 2,
Constants.DIRECTIONS.SOUTH: 3,
Constants.DIRECTIONS.EAST: 4,
}
obs[observation_index + mapping[direction]] = 1.0 # One-hot encoding direction
# 0 to 1 distance
distance = pos.distance_to(closest_position)
obs[observation_index + 5] = min(distance / 20.0, 1.0)
# 0 to 1 value (amount of resource, cargo for unit, or fuel for city)
if key == "city":
# City fuel as % of upkeep for 200 turns
c = game.cities[game.map.get_cell_by_pos(closest_position).city_tile.city_id]
obs[observation_index + 6] = min(
c.fuel / (c.get_light_upkeep() * 200.0),
1.0
)
elif key in [Constants.RESOURCE_TYPES.WOOD, Constants.RESOURCE_TYPES.COAL,
Constants.RESOURCE_TYPES.URANIUM]:
# Resource amount
obs[observation_index + 6] = min(
game.map.get_cell_by_pos(closest_position).resource.amount / 500,
1.0
)
else:
# Unit cargo
obs[observation_index + 6] = min(
next(iter(game.map.get_cell_by_pos(
closest_position).units.values())).get_cargo_space_left() / 100,
1.0
)
observation_index += 7
if unit is not None:
# Encode the cargo space
# 1x cargo size
obs[observation_index] = unit.get_cargo_space_left() / GAME_CONSTANTS["PARAMETERS"]["RESOURCE_CAPACITY"][
"WORKER"]
observation_index += 1
else:
observation_index += 1
# Game state observations
# 1x is night
obs[observation_index] = game.is_night()
observation_index += 1
# 1x percent of game done
obs[observation_index] = game.state["turn"] / GAME_CONSTANTS["PARAMETERS"]["MAX_DAYS"]
observation_index += 1
# 2x citytile counts [cur player, opponent]
# 2x worker counts [cur player, opponent]
# 2x cart counts [cur player, opponent]
max_count = 30
for key in ["city", str(Constants.UNIT_TYPES.WORKER), str(Constants.UNIT_TYPES.CART)]:
if key in self.object_nodes:
obs[observation_index] = len(self.object_nodes[key]) / max_count
if (key + "_opponent") in self.object_nodes:
obs[observation_index + 1] = len(self.object_nodes[(key + "_opponent")]) / max_count
observation_index += 2
# 1x research points [cur player]
# 1x researched coal [cur player]
# 1x researched uranium [cur player]
obs[observation_index] = game.state["teamStates"][team]["researchPoints"] / 200.0
obs[observation_index+1] = float(game.state["teamStates"][team]["researched"]["coal"])
obs[observation_index+2] = float(game.state["teamStates"][team]["researched"]["uranium"])
return obs
def action_code_to_action(self, action_code, game, unit=None, city_tile=None, team=None):
"""
Takes an action in the environment according to actionCode:
action_code: Index of action to take into the action array.
Returns: An action.
"""
# Map action_code index into to a constructed Action object
try:
x = None
y = None
if city_tile is not None:
x = city_tile.pos.x
y = city_tile.pos.y
elif unit is not None:
x = unit.pos.x
y = unit.pos.y
if city_tile != None:
action = self.actions_cities[action_code%len(self.actions_cities)](
game=game,
unit_id=unit.id if unit else None,
unit=unit,
city_id=city_tile.city_id if city_tile else None,
citytile=city_tile,
team=team,
x=x,
y=y
)
else:
action = self.actions_units[action_code%len(self.actions_units)](
game=game,
unit_id=unit.id if unit else None,
unit=unit,
city_id=city_tile.city_id if city_tile else None,
citytile=city_tile,
team=team,
x=x,
y=y
)
return action
except Exception as e:
# Not a valid action
print(e)
return None
def take_action(self, action_code, game, unit=None, city_tile=None, team=None):
"""
Takes an action in the environment according to actionCode:
actionCode: Index of action to take into the action array.
"""
action = self.action_code_to_action(action_code, game, unit, city_tile, team)
self.match_controller.take_action(action)
def game_start(self, game):
"""
This function is called at the start of each game. Use this to
reset and initialize per game. Note that self.team may have
been changed since last game. The game map has been created
and starting units placed.
Args:
game ([type]): Game.
"""
self.units_last = 0
self.city_tiles_last = 0
self.fuel_collected_last = 0
def get_reward(self, game, is_game_finished, is_new_turn, is_game_error):
"""
Returns the reward function for this step of the game. Reward should be a
delta increment to the reward, not the total current reward.
"""
if is_game_error:
# Game environment step failed, assign a game lost reward to not incentivise this
print("Game failed due to error")
return -1.0
if not is_new_turn and not is_game_finished:
# Only apply rewards at the start of each turn or at game end
return 0
# Get some basic stats
unit_count = len(game.state["teamStates"][self.team]["units"])
city_count = 0
city_count_opponent = 0
city_tile_count = 0
city_tile_count_opponent = 0
for city in game.cities.values():
if city.team == self.team:
city_count += 1
else:
city_count_opponent += 1
for cell in city.city_cells:
if city.team == self.team:
city_tile_count += 1
else:
city_tile_count_opponent += 1
rewards = {}
# Give a reward for unit creation/death. 0.05 reward per unit.
rewards["rew/r_units"] = (unit_count - self.units_last) * 0.05
self.units_last = unit_count
# Give a reward for city creation/death. 0.1 reward per city.
rewards["rew/r_city_tiles"] = (city_tile_count - self.city_tiles_last) * 0.1
self.city_tiles_last = city_tile_count
# Reward collecting fuel
fuel_collected = game.stats["teamStats"][self.team]["fuelGenerated"]
rewards["rew/r_fuel_collected"] = ( (fuel_collected - self.fuel_collected_last) / 20000 )
self.fuel_collected_last = fuel_collected
# Give a reward of 1.0 per city tile alive at the end of the game
rewards["rew/r_city_tiles_end"] = 0
if is_game_finished:
self.is_last_turn = True
rewards["rew/r_city_tiles_end"] = city_tile_count
'''
# Example of a game win/loss reward instead
if game.get_winning_team() == self.team:
rewards["rew/r_game_win"] = 100.0 # Win
else:
rewards["rew/r_game_win"] = -100.0 # Loss
'''
reward = 0
for name, value in rewards.items():
reward += value
return reward
def turn_heurstics(self, game, is_first_turn):
"""
This is called pre-observation actions to allow for hardcoded heuristics
to control a subset of units. Any unit or city that gets an action from this
callback, will not create an observation+action.
Args:
game ([type]): Game in progress
is_first_turn (bool): True if it's the first turn of a game.
"""
return
| 42.046875 | 132 | 0.516991 | import sys
import time
from functools import partial
import copy
import random
import numpy as np
from gym import spaces
from luxai2021.env.agent import Agent, AgentWithModel
from luxai2021.game.actions import *
from luxai2021.game.game_constants import GAME_CONSTANTS
from luxai2021.game.position import Position
def closest_node(node, nodes):
dist_2 = np.sum((nodes - node) ** 2, axis=1)
return np.argmin(dist_2)
def furthest_node(node, nodes):
dist_2 = np.sum((nodes - node) ** 2, axis=1)
return np.argmax(dist_2)
def smart_transfer_to_nearby(game, team, unit_id, unit, target_type_restriction=None, **kwarg):
resource_type = None
resource_amount = 0
target_unit = None
if unit != None:
for type, amount in unit.cargo.items():
if amount > resource_amount:
resource_type = type
resource_amount = amount
unit_cell = game.map.get_cell_by_pos(unit.pos)
adjacent_cells = game.map.get_adjacent_cells(unit_cell)
for c in adjacent_cells:
for id, u in c.units.items():
if target_type_restriction == None or u.type == target_type_restriction:
if u.team == team:
if target_unit is None:
target_unit = u
else:
# Compare this unit to the existing target
if target_unit.type == u.type:
# Transfer to the target with the least capacity, but can accept
# all of our resources
if( u.get_cargo_space_left() >= resource_amount and
target_unit.get_cargo_space_left() >= resource_amount ):
# Both units can accept all our resources. Prioritize one that is most-full.
if u.get_cargo_space_left() < target_unit.get_cargo_space_left():
# This new target it better, it has less space left and can take all our
# resources
target_unit = u
elif( target_unit.get_cargo_space_left() >= resource_amount ):
# Don't change targets. Current one is best since it can take all
pass
elif( u.get_cargo_space_left() > target_unit.get_cargo_space_left() ):
# Change targets, because neither target can accept all our resources and
# this target can take more resources.
target_unit = u
elif u.type == Constants.UNIT_TYPES.CART:
# Transfer to this cart instead of the current worker target
target_unit = u
# Build the transfer action request
target_unit_id = None
if target_unit is not None:
target_unit_id = target_unit.id
# Update the transfer amount based on the room of the target
if target_unit.get_cargo_space_left() < resource_amount:
resource_amount = target_unit.get_cargo_space_left()
return TransferAction(team, unit_id, target_unit_id, resource_type, resource_amount)
########################################################################################################################
# This is the Agent that you need to design for the competition
########################################################################################################################
class AgentPolicy(AgentWithModel):
def __init__(self, mode="train", model=None) -> None:
super().__init__(mode, model)
# Define action and observation space
# They must be gym.spaces objects
# Example when using discrete actions:
self.actions_units = [
partial(MoveAction, direction=Constants.DIRECTIONS.CENTER), # This is the do-nothing action
partial(MoveAction, direction=Constants.DIRECTIONS.NORTH),
partial(MoveAction, direction=Constants.DIRECTIONS.WEST),
partial(MoveAction, direction=Constants.DIRECTIONS.SOUTH),
partial(MoveAction, direction=Constants.DIRECTIONS.EAST),
partial(smart_transfer_to_nearby, target_type_restriction=Constants.UNIT_TYPES.CART), # Transfer to nearby cart
partial(smart_transfer_to_nearby, target_type_restriction=Constants.UNIT_TYPES.WORKER), # Transfer to nearby worker
SpawnCityAction,
PillageAction,
]
self.actions_cities = [
SpawnWorkerAction,
SpawnCartAction,
ResearchAction,
]
self.action_space = spaces.Discrete(max(len(self.actions_units), len(self.actions_cities)))
# Observation space: (Basic minimum for a miner agent)
# Object:
# 1x is worker
# 1x is cart
# 1x is citytile
#
# 5x direction_nearest_wood
# 1x distance_nearest_wood
# 1x amount
#
# 5x direction_nearest_coal
# 1x distance_nearest_coal
# 1x amount
#
# 5x direction_nearest_uranium
# 1x distance_nearest_uranium
# 1x amount
#
# 5x direction_nearest_city
# 1x distance_nearest_city
# 1x amount of fuel
#
# 28x (the same as above, but direction, distance, and amount to the furthest of each)
#
# 5x direction_nearest_worker
# 1x distance_nearest_worker
# 1x amount of cargo
# Unit:
# 1x cargo size
# State:
# 1x is night
# 1x percent of game done
# 2x citytile counts [cur player, opponent]
# 2x worker counts [cur player, opponent]
# 2x cart counts [cur player, opponent]
# 1x research points [cur player]
# 1x researched coal [cur player]
# 1x researched uranium [cur player]
self.observation_shape = (3 + 7 * 5 * 2 + 1 + 1 + 1 + 2 + 2 + 2 + 3,)
self.observation_space = spaces.Box(low=0, high=1, shape=
self.observation_shape, dtype=np.float16)
self.object_nodes = {}
def get_agent_type(self):
if self.mode == "train":
return Constants.AGENT_TYPE.LEARNING
else:
return Constants.AGENT_TYPE.AGENT
def get_observation(self, game, unit, city_tile, team, is_new_turn):
observation_index = 0
if is_new_turn:
# It's a new turn this event. This flag is set True for only the first observation from each turn.
# Build a list of object nodes by type for quick distance-searches
self.object_nodes = {}
# Add resources
for cell in game.map.resources:
if cell.resource.type not in self.object_nodes:
self.object_nodes[cell.resource.type] = np.array([[cell.pos.x, cell.pos.y]])
else:
self.object_nodes[cell.resource.type] = np.concatenate(
(
self.object_nodes[cell.resource.type],
[[cell.pos.x, cell.pos.y]]
),
axis=0
)
# Add your own and opponent units
for t in [team, (team + 1) % 2]:
for u in game.state["teamStates"][team]["units"].values():
key = str(u.type)
if t != team:
key = str(u.type) + "_opponent"
if key not in self.object_nodes:
self.object_nodes[key] = np.array([[u.pos.x, u.pos.y]])
else:
self.object_nodes[key] = np.concatenate(
(
self.object_nodes[key],
[[u.pos.x, u.pos.y]]
)
, axis=0
)
# Add your own and opponent cities
for city in game.cities.values():
for cells in city.city_cells:
key = "city"
if city.team != team:
key = "city_opponent"
if key not in self.object_nodes:
self.object_nodes[key] = np.array([[cells.pos.x, cells.pos.y]])
else:
self.object_nodes[key] = np.concatenate(
(
self.object_nodes[key],
[[cells.pos.x, cells.pos.y]]
)
, axis=0
)
# Observation space: (Basic minimum for a miner agent)
# Object:
# 1x is worker
# 1x is cart
# 1x is citytile
# 5x direction_nearest_wood
# 1x distance_nearest_wood
# 1x amount
#
# 5x direction_nearest_coal
# 1x distance_nearest_coal
# 1x amount
#
# 5x direction_nearest_uranium
# 1x distance_nearest_uranium
# 1x amount
#
# 5x direction_nearest_city
# 1x distance_nearest_city
# 1x amount of fuel
#
# 5x direction_nearest_worker
# 1x distance_nearest_worker
# 1x amount of cargo
#
# 28x (the same as above, but direction, distance, and amount to the furthest of each)
#
# Unit:
# 1x cargo size
# State:
# 1x is night
# 1x percent of game done
# 2x citytile counts [cur player, opponent]
# 2x worker counts [cur player, opponent]
# 2x cart counts [cur player, opponent]
# 1x research points [cur player]
# 1x researched coal [cur player]
# 1x researched uranium [cur player]
obs = np.zeros(self.observation_shape)
# Update the type of this object
# 1x is worker
# 1x is cart
# 1x is citytile
observation_index = 0
if unit is not None:
if unit.type == Constants.UNIT_TYPES.WORKER:
obs[observation_index] = 1.0 # Worker
else:
obs[observation_index+1] = 1.0 # Cart
if city_tile is not None:
obs[observation_index+2] = 1.0 # CityTile
observation_index += 3
pos = None
if unit is not None:
pos = unit.pos
else:
pos = city_tile.pos
if pos is None:
observation_index += 7 * 5 * 2
else:
# Encode the direction to the nearest objects
# 5x direction_nearest
# 1x distance
for distance_function in [closest_node, furthest_node]:
for key in [
Constants.RESOURCE_TYPES.WOOD,
Constants.RESOURCE_TYPES.COAL,
Constants.RESOURCE_TYPES.URANIUM,
"city",
str(Constants.UNIT_TYPES.WORKER)]:
# Process the direction to and distance to this object type
# Encode the direction to the nearest object (excluding itself)
# 5x direction
# 1x distance
if key in self.object_nodes:
if (
(key == "city" and city_tile is not None) or
(unit is not None and str(unit.type) == key and len(game.map.get_cell_by_pos(unit.pos).units) <= 1 )
):
# Filter out the current unit from the closest-search
closest_index = closest_node((pos.x, pos.y), self.object_nodes[key])
filtered_nodes = np.delete(self.object_nodes[key], closest_index, axis=0)
else:
filtered_nodes = self.object_nodes[key]
if len(filtered_nodes) == 0:
# No other object of this type
obs[observation_index + 5] = 1.0
else:
# There is another object of this type
closest_index = distance_function((pos.x, pos.y), filtered_nodes)
if closest_index is not None and closest_index >= 0:
closest = filtered_nodes[closest_index]
closest_position = Position(closest[0], closest[1])
direction = pos.direction_to(closest_position)
mapping = {
Constants.DIRECTIONS.CENTER: 0,
Constants.DIRECTIONS.NORTH: 1,
Constants.DIRECTIONS.WEST: 2,
Constants.DIRECTIONS.SOUTH: 3,
Constants.DIRECTIONS.EAST: 4,
}
obs[observation_index + mapping[direction]] = 1.0 # One-hot encoding direction
# 0 to 1 distance
distance = pos.distance_to(closest_position)
obs[observation_index + 5] = min(distance / 20.0, 1.0)
# 0 to 1 value (amount of resource, cargo for unit, or fuel for city)
if key == "city":
# City fuel as % of upkeep for 200 turns
c = game.cities[game.map.get_cell_by_pos(closest_position).city_tile.city_id]
obs[observation_index + 6] = min(
c.fuel / (c.get_light_upkeep() * 200.0),
1.0
)
elif key in [Constants.RESOURCE_TYPES.WOOD, Constants.RESOURCE_TYPES.COAL,
Constants.RESOURCE_TYPES.URANIUM]:
# Resource amount
obs[observation_index + 6] = min(
game.map.get_cell_by_pos(closest_position).resource.amount / 500,
1.0
)
else:
# Unit cargo
obs[observation_index + 6] = min(
next(iter(game.map.get_cell_by_pos(
closest_position).units.values())).get_cargo_space_left() / 100,
1.0
)
observation_index += 7
if unit is not None:
# Encode the cargo space
# 1x cargo size
obs[observation_index] = unit.get_cargo_space_left() / GAME_CONSTANTS["PARAMETERS"]["RESOURCE_CAPACITY"][
"WORKER"]
observation_index += 1
else:
observation_index += 1
# Game state observations
# 1x is night
obs[observation_index] = game.is_night()
observation_index += 1
# 1x percent of game done
obs[observation_index] = game.state["turn"] / GAME_CONSTANTS["PARAMETERS"]["MAX_DAYS"]
observation_index += 1
# 2x citytile counts [cur player, opponent]
# 2x worker counts [cur player, opponent]
# 2x cart counts [cur player, opponent]
max_count = 30
for key in ["city", str(Constants.UNIT_TYPES.WORKER), str(Constants.UNIT_TYPES.CART)]:
if key in self.object_nodes:
obs[observation_index] = len(self.object_nodes[key]) / max_count
if (key + "_opponent") in self.object_nodes:
obs[observation_index + 1] = len(self.object_nodes[(key + "_opponent")]) / max_count
observation_index += 2
# 1x research points [cur player]
# 1x researched coal [cur player]
# 1x researched uranium [cur player]
obs[observation_index] = game.state["teamStates"][team]["researchPoints"] / 200.0
obs[observation_index+1] = float(game.state["teamStates"][team]["researched"]["coal"])
obs[observation_index+2] = float(game.state["teamStates"][team]["researched"]["uranium"])
return obs
def action_code_to_action(self, action_code, game, unit=None, city_tile=None, team=None):
# Map action_code index into to a constructed Action object
try:
x = None
y = None
if city_tile is not None:
x = city_tile.pos.x
y = city_tile.pos.y
elif unit is not None:
x = unit.pos.x
y = unit.pos.y
if city_tile != None:
action = self.actions_cities[action_code%len(self.actions_cities)](
game=game,
unit_id=unit.id if unit else None,
unit=unit,
city_id=city_tile.city_id if city_tile else None,
citytile=city_tile,
team=team,
x=x,
y=y
)
else:
action = self.actions_units[action_code%len(self.actions_units)](
game=game,
unit_id=unit.id if unit else None,
unit=unit,
city_id=city_tile.city_id if city_tile else None,
citytile=city_tile,
team=team,
x=x,
y=y
)
return action
except Exception as e:
# Not a valid action
print(e)
return None
def take_action(self, action_code, game, unit=None, city_tile=None, team=None):
action = self.action_code_to_action(action_code, game, unit, city_tile, team)
self.match_controller.take_action(action)
def game_start(self, game):
self.units_last = 0
self.city_tiles_last = 0
self.fuel_collected_last = 0
def get_reward(self, game, is_game_finished, is_new_turn, is_game_error):
if is_game_error:
# Game environment step failed, assign a game lost reward to not incentivise this
print("Game failed due to error")
return -1.0
if not is_new_turn and not is_game_finished:
# Only apply rewards at the start of each turn or at game end
return 0
# Get some basic stats
unit_count = len(game.state["teamStates"][self.team]["units"])
city_count = 0
city_count_opponent = 0
city_tile_count = 0
city_tile_count_opponent = 0
for city in game.cities.values():
if city.team == self.team:
city_count += 1
else:
city_count_opponent += 1
for cell in city.city_cells:
if city.team == self.team:
city_tile_count += 1
else:
city_tile_count_opponent += 1
rewards = {}
# Give a reward for unit creation/death. 0.05 reward per unit.
rewards["rew/r_units"] = (unit_count - self.units_last) * 0.05
self.units_last = unit_count
# Give a reward for city creation/death. 0.1 reward per city.
rewards["rew/r_city_tiles"] = (city_tile_count - self.city_tiles_last) * 0.1
self.city_tiles_last = city_tile_count
# Reward collecting fuel
fuel_collected = game.stats["teamStats"][self.team]["fuelGenerated"]
rewards["rew/r_fuel_collected"] = ( (fuel_collected - self.fuel_collected_last) / 20000 )
self.fuel_collected_last = fuel_collected
# Give a reward of 1.0 per city tile alive at the end of the game
rewards["rew/r_city_tiles_end"] = 0
if is_game_finished:
self.is_last_turn = True
rewards["rew/r_city_tiles_end"] = city_tile_count
reward = 0
for name, value in rewards.items():
reward += value
return reward
def turn_heurstics(self, game, is_first_turn):
return
| true | true |
f73ea08d026446221868f6b1e5293fe358e3ce8d | 1,636 | py | Python | gge_proxy_manager/models/common.py | mrcrgl/gge-storage | a8471624c1a865d4f7eeb00415bd4cd2a91ea310 | [
"MIT"
] | null | null | null | gge_proxy_manager/models/common.py | mrcrgl/gge-storage | a8471624c1a865d4f7eeb00415bd4cd2a91ea310 | [
"MIT"
] | 1 | 2015-04-09T15:58:19.000Z | 2015-04-14T06:37:02.000Z | gge_proxy_manager/models/common.py | mrcrgl/gge-storage | a8471624c1a865d4f7eeb00415bd4cd2a91ea310 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.db import models
from django.conf import settings
from .player import Player
from django.core.urlresolvers import reverse
Q = models.Q
class Game(models.Model):
name = models.CharField(max_length=128, db_index=True)
product_key = models.CharField(max_length=32, unique=True)
class Meta:
app_label = 'gge_proxy_manager'
def __unicode__(self):
return self.name
class Kingdom(models.Model):
name = models.CharField(max_length=128, null=True, default=None, blank=True)
kid = models.SmallIntegerField(db_index=True)
visual_key = models.CharField(max_length=8, default="-")
game = models.ForeignKey(Game, db_index=True, null=True, default=None, blank=True)
class Meta:
app_label = 'gge_proxy_manager'
def __unicode__(self):
if not self.name:
return "(unknown)"
return self.name
class Confederation(models.Model):
name = models.CharField(max_length=128)
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
slug = models.SlugField(max_length=128)
logo = models.FileField(null=True, blank=True, default=None, upload_to='confederation_logos/')
description = models.TextField(null=True, blank=True, default=None)
class Meta:
app_label = 'gge_proxy_manager'
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse("intern:confederation_dashboard", kwargs={"slug": self.slug})
def get_members(self):
return Player.objects.filter(alliance__confederation=self).order_by('alliance_rank', '-level') | 30.296296 | 102 | 0.714548 | from __future__ import unicode_literals
from django.db import models
from django.conf import settings
from .player import Player
from django.core.urlresolvers import reverse
Q = models.Q
class Game(models.Model):
name = models.CharField(max_length=128, db_index=True)
product_key = models.CharField(max_length=32, unique=True)
class Meta:
app_label = 'gge_proxy_manager'
def __unicode__(self):
return self.name
class Kingdom(models.Model):
name = models.CharField(max_length=128, null=True, default=None, blank=True)
kid = models.SmallIntegerField(db_index=True)
visual_key = models.CharField(max_length=8, default="-")
game = models.ForeignKey(Game, db_index=True, null=True, default=None, blank=True)
class Meta:
app_label = 'gge_proxy_manager'
def __unicode__(self):
if not self.name:
return "(unknown)"
return self.name
class Confederation(models.Model):
name = models.CharField(max_length=128)
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
slug = models.SlugField(max_length=128)
logo = models.FileField(null=True, blank=True, default=None, upload_to='confederation_logos/')
description = models.TextField(null=True, blank=True, default=None)
class Meta:
app_label = 'gge_proxy_manager'
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse("intern:confederation_dashboard", kwargs={"slug": self.slug})
def get_members(self):
return Player.objects.filter(alliance__confederation=self).order_by('alliance_rank', '-level') | true | true |
f73ea0f7d09097f61f4b3add5eda83e1a207affc | 962 | py | Python | jsngram/notifications/models.py | jjh0106/jsngram | 74f2fd79ddd6a6975d3c981ca9cb5bbed050f532 | [
"MIT"
] | null | null | null | jsngram/notifications/models.py | jjh0106/jsngram | 74f2fd79ddd6a6975d3c981ca9cb5bbed050f532 | [
"MIT"
] | 11 | 2020-06-05T20:06:56.000Z | 2022-02-17T20:23:22.000Z | jsngram/notifications/models.py | jjh0106/jsngram | 74f2fd79ddd6a6975d3c981ca9cb5bbed050f532 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from jsngram.users import models as user_models
from jsngram.images import models as image_models
class Notification(image_models.TimeStampedModel):
TYPE_CHOICES = (
('like', 'Like'), # 첫 번째는 데이터베이스를 위해, 두 번째는 어드민 패널을 위해 쓰는 것.
('comment', 'Comment'),
('follow', 'Follow')
)
creator = models.ForeignKey(user_models.User, on_delete=models.PROTECT, related_name='creator')
to = models.ForeignKey(user_models.User, on_delete=models.PROTECT, related_name='to')
notification_type = models.CharField(max_length=20, choices=TYPE_CHOICES)
image = models.ForeignKey(image_models.Image, on_delete=models.PROTECT, null=True, blank=True)
comment = models.TextField(null=True, blank=True)
class Meta:
ordering = ['-created_at']
def __str__(self):
return 'From: {} - To: {}'.format(self.creator, self.to) | 40.083333 | 99 | 0.713098 | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from jsngram.users import models as user_models
from jsngram.images import models as image_models
class Notification(image_models.TimeStampedModel):
TYPE_CHOICES = (
('like', 'Like'),
('comment', 'Comment'),
('follow', 'Follow')
)
creator = models.ForeignKey(user_models.User, on_delete=models.PROTECT, related_name='creator')
to = models.ForeignKey(user_models.User, on_delete=models.PROTECT, related_name='to')
notification_type = models.CharField(max_length=20, choices=TYPE_CHOICES)
image = models.ForeignKey(image_models.Image, on_delete=models.PROTECT, null=True, blank=True)
comment = models.TextField(null=True, blank=True)
class Meta:
ordering = ['-created_at']
def __str__(self):
return 'From: {} - To: {}'.format(self.creator, self.to) | true | true |
f73ea13bf0258131db11e58907d7c863760cd695 | 155 | py | Python | quik/quik-rts.py | inwise/VolatilityIndexLevy | 30de823f240cbfc026443d0c702981dc16548274 | [
"Apache-2.0"
] | null | null | null | quik/quik-rts.py | inwise/VolatilityIndexLevy | 30de823f240cbfc026443d0c702981dc16548274 | [
"Apache-2.0"
] | null | null | null | quik/quik-rts.py | inwise/VolatilityIndexLevy | 30de823f240cbfc026443d0c702981dc16548274 | [
"Apache-2.0"
] | 2 | 2019-07-04T08:18:07.000Z | 2021-06-03T11:09:35.000Z | # -*- coding: utf-8 -*-
__author__ = 'Александр Гречко'
from quik import quik_dde_server_history
qds=quik_dde_server_history('rts')
qds.start()
| 17.222222 | 41 | 0.703226 |
__author__ = 'Александр Гречко'
from quik import quik_dde_server_history
qds=quik_dde_server_history('rts')
qds.start()
| true | true |
f73ea377b494abcbc73ed5facbbc17c8d55fa9d3 | 1,076 | py | Python | arcusd/daemon/arcusd_signals.py | cuenca-mx/arcusd | dbbe420f98568ddc7cc35431e27b009a990b24c1 | [
"MIT"
] | null | null | null | arcusd/daemon/arcusd_signals.py | cuenca-mx/arcusd | dbbe420f98568ddc7cc35431e27b009a990b24c1 | [
"MIT"
] | 241 | 2018-11-27T22:59:02.000Z | 2022-03-31T11:03:49.000Z | arcusd/daemon/arcusd_signals.py | cuenca-mx/arcusd | dbbe420f98568ddc7cc35431e27b009a990b24c1 | [
"MIT"
] | 2 | 2019-09-22T03:42:32.000Z | 2020-11-02T20:57:13.000Z | from datetime import datetime
from celery.signals import task_postrun, task_prerun
from arcusd.contracts import Contract
from arcusd.data_access.tasks import save_task_info, update_task_info
@task_prerun.connect
def task_before_run(task_id, task, *args, **kwargs):
request_id = task.request.kwargs.get('request_id', task_id)
task_info = dict(
task_id=task_id,
task_sender=task.request.origin,
task_args=task.request.args,
task_kwargs=task.request.kwargs,
task_retries=task.request.retries,
task_start=datetime.utcnow(),
request_id=request_id,
)
save_task_info(task_info)
@task_postrun.connect
def task_after_run(task_id, task, retval, state, *args, **kwargs):
request_id = task.request.kwargs.get('request_id', task_id)
task_info = dict(
task_state=state,
task_eta=task.request.eta,
task_end=datetime.utcnow(),
)
if isinstance(retval, Contract):
task_info['task_retval'] = retval.to_dict()
update_task_info({'request_id': request_id}, task_info)
| 30.742857 | 69 | 0.711896 | from datetime import datetime
from celery.signals import task_postrun, task_prerun
from arcusd.contracts import Contract
from arcusd.data_access.tasks import save_task_info, update_task_info
@task_prerun.connect
def task_before_run(task_id, task, *args, **kwargs):
request_id = task.request.kwargs.get('request_id', task_id)
task_info = dict(
task_id=task_id,
task_sender=task.request.origin,
task_args=task.request.args,
task_kwargs=task.request.kwargs,
task_retries=task.request.retries,
task_start=datetime.utcnow(),
request_id=request_id,
)
save_task_info(task_info)
@task_postrun.connect
def task_after_run(task_id, task, retval, state, *args, **kwargs):
request_id = task.request.kwargs.get('request_id', task_id)
task_info = dict(
task_state=state,
task_eta=task.request.eta,
task_end=datetime.utcnow(),
)
if isinstance(retval, Contract):
task_info['task_retval'] = retval.to_dict()
update_task_info({'request_id': request_id}, task_info)
| true | true |
f73ea3f50e70aa6f2769bc79957a653e03482c19 | 59 | py | Python | uzkChemTem/__init__.py | DomiDre/uzkChemTem | 8a5d259904465901a7ba2a8e6f6c81dbe322d1f5 | [
"MIT"
] | null | null | null | uzkChemTem/__init__.py | DomiDre/uzkChemTem | 8a5d259904465901a7ba2a8e6f6c81dbe322d1f5 | [
"MIT"
] | null | null | null | uzkChemTem/__init__.py | DomiDre/uzkChemTem | 8a5d259904465901a7ba2a8e6f6c81dbe322d1f5 | [
"MIT"
] | null | null | null | from .tem import load_file, pretty_plot, convert_tiffolder
| 29.5 | 58 | 0.847458 | from .tem import load_file, pretty_plot, convert_tiffolder
| true | true |
f73ea44328adf8e7bd0fede20c5b5ed48b240720 | 2,962 | py | Python | dwi_utilities/comp_high_b.py | pritesh-mehta/dwi-utilities | f1e307fcf51ef4e4cc95ac311f031e3521c1fbbf | [
"Apache-2.0"
] | null | null | null | dwi_utilities/comp_high_b.py | pritesh-mehta/dwi-utilities | f1e307fcf51ef4e4cc95ac311f031e3521c1fbbf | [
"Apache-2.0"
] | 1 | 2022-01-31T23:43:20.000Z | 2022-01-31T23:43:20.000Z | dwi_utilities/comp_high_b.py | pritesh-mehta/dwi-utilities | f1e307fcf51ef4e4cc95ac311f031e3521c1fbbf | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
@author: pritesh-mehta
"""
import numpy as np
from scipy.optimize import curve_fit
from pathlib import Path
from argparse import ArgumentParser
from dwi_utilities.monoexponential_decay import log_func, func
import dwi_utilities.nifti_utilities as nutil
def comp_high_b_case(case_dir, target_bval, save_case=False, output_dir=None, extension='.nii.gz'):
"""Generate high b-value DWI using low b-value DWI (case)
"""
eps = 1e-8
data_stack = []
bval_list = []
filepaths = nutil.path_generator(case_dir)
for path in filepaths:
name, nii, data = nutil.load(path)
data_stack.append(data)
bval_list.append(name.replace('.nii.gz','').replace('b',''))
# order data stack in order of ascending b-value
bval_list, data_stack = \
zip(*sorted(zip(bval_list, data_stack)))
# generate high b-value
bval_list = np.array(bval_list)
data = np.array(data_stack)
shape = np.shape(data[0])
highb_data = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
for k in range(shape[2]):
y = []
for array in data:
y.append(array[i][j][k])
x = bval_list
y = np.array(y) + eps
z = np.log(y)
popt, pcov = curve_fit(log_func, x, z)
if popt[1] < 0:
highb_data[i][j][k] = 0
else:
highb_data[i][j][k] = func(target_bval, np.exp(popt[0]), popt[1])
if save_case:
case_name = Path(case_dir).parts[-1]
save_path = Path(output_dir) / (case_name + extension)
nutil.save(save_path, nii, highb_data)
return highb_data
def comp_high_b_dir(cases_dir, target_bval, output_dir, extension='.nii.gz'):
"""Generate high b-value DWI using low b-value DWI (directory)
"""
for case_dir in Path(cases_dir).iterdir():
print("Processing:", case_dir)
comp_high_b_case(case_dir, target_bval, save_case=True, output_dir=output_dir, extension=extension)
return None
def process():
parser = ArgumentParser()
parser.add_argument('--input_dir', required=True, type=str)
parser.add_argument('--target_bval', required=True, type=int)
parser.add_argument('--output_dir', required=True, type=str)
parser.add_argument('--case', required=False, action="store_true")
parser.add_argument('--extension', required=False, type=str, default='.nii.gz')
args = parser.parse_args()
if args.case:
comp_high_b_case(args.input_dir, args.target_bval, save_case=True, output_dir=args.output_dir,
extension=args.extension)
else:
comp_high_b_dir(args.input_dir, args.target_bval, args.output_dir,
extension=args.extension)
if __name__ == "__main__":
process()
| 33.659091 | 107 | 0.61445 |
import numpy as np
from scipy.optimize import curve_fit
from pathlib import Path
from argparse import ArgumentParser
from dwi_utilities.monoexponential_decay import log_func, func
import dwi_utilities.nifti_utilities as nutil
def comp_high_b_case(case_dir, target_bval, save_case=False, output_dir=None, extension='.nii.gz'):
eps = 1e-8
data_stack = []
bval_list = []
filepaths = nutil.path_generator(case_dir)
for path in filepaths:
name, nii, data = nutil.load(path)
data_stack.append(data)
bval_list.append(name.replace('.nii.gz','').replace('b',''))
bval_list, data_stack = \
zip(*sorted(zip(bval_list, data_stack)))
bval_list = np.array(bval_list)
data = np.array(data_stack)
shape = np.shape(data[0])
highb_data = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
for k in range(shape[2]):
y = []
for array in data:
y.append(array[i][j][k])
x = bval_list
y = np.array(y) + eps
z = np.log(y)
popt, pcov = curve_fit(log_func, x, z)
if popt[1] < 0:
highb_data[i][j][k] = 0
else:
highb_data[i][j][k] = func(target_bval, np.exp(popt[0]), popt[1])
if save_case:
case_name = Path(case_dir).parts[-1]
save_path = Path(output_dir) / (case_name + extension)
nutil.save(save_path, nii, highb_data)
return highb_data
def comp_high_b_dir(cases_dir, target_bval, output_dir, extension='.nii.gz'):
for case_dir in Path(cases_dir).iterdir():
print("Processing:", case_dir)
comp_high_b_case(case_dir, target_bval, save_case=True, output_dir=output_dir, extension=extension)
return None
def process():
parser = ArgumentParser()
parser.add_argument('--input_dir', required=True, type=str)
parser.add_argument('--target_bval', required=True, type=int)
parser.add_argument('--output_dir', required=True, type=str)
parser.add_argument('--case', required=False, action="store_true")
parser.add_argument('--extension', required=False, type=str, default='.nii.gz')
args = parser.parse_args()
if args.case:
comp_high_b_case(args.input_dir, args.target_bval, save_case=True, output_dir=args.output_dir,
extension=args.extension)
else:
comp_high_b_dir(args.input_dir, args.target_bval, args.output_dir,
extension=args.extension)
if __name__ == "__main__":
process()
| true | true |
f73ea519f8832efdc9b25199967e85cf5f3a6553 | 4,885 | py | Python | storm_control/hal4000/testing/testing.py | emanuega/storm-5 | f41dba34d1ea219d80954f8b32f0c25b9e7a876c | [
"MIT"
] | null | null | null | storm_control/hal4000/testing/testing.py | emanuega/storm-5 | f41dba34d1ea219d80954f8b32f0c25b9e7a876c | [
"MIT"
] | null | null | null | storm_control/hal4000/testing/testing.py | emanuega/storm-5 | f41dba34d1ea219d80954f8b32f0c25b9e7a876c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
The HAL testing module, basically this just sends messages
to HAL and verifies that response / behavior is correct.
Testing is done by sub-classing this module and providing
it with a series of test actions, a little bit like what
Dave does when controlling HAL.
Hazen 04/17
"""
import storm_control.sc_library.tcpClient as tcpClient
import storm_control.hal4000.halLib.halMessage as halMessage
import storm_control.hal4000.halLib.halModule as halModule
import storm_control.hal4000.testing.testActionsTCP as testActionsTCP
class Testing(halModule.HalModule):
def __init__(self, module_params = None, qt_settings = None, **kwds):
super().__init__(**kwds)
self.all_modules = None
self.current_action = None
self.test_actions = []
# This message type is just a place holder.
halMessage.addMessage("na",
validator = {"data" : None, "resp" : None})
# This message is sent but does not do anything.
halMessage.addMessage("noop",
validator = {"data" : None, "resp" : None})
# This message is sent when all the tests finish. HAL should
# close when it gets this message.
halMessage.addMessage("tests done",
validator = {"data" : None, "resp" : None})
def handleActionDone(self):
#
# If there are no more actions, send the 'tests done' message
# which will cause HAL to close.
#
if (len(self.test_actions) == 0):
self.newMessage.emit(halMessage.HalMessage(source = self,
m_type = "tests done"))
#
# Otherwise start the next action.
#
else:
if self.current_action is not None:
self.current_action.actionDone.disconnect()
self.current_action = self.test_actions[0]
self.test_actions = self.test_actions[1:]
self.current_action.start()
self.current_action.actionDone.connect(self.handleActionDone)
message = halMessage.HalMessage(source = self.all_modules[self.current_action.getSourceName()],
m_type = self.current_action.getMessageType(),
data = self.current_action.getMessageData(),
finalizer = self.current_action.finalizer)
self.current_action.setMessage(message)
self.newMessage.emit(message)
def handleResponses(self, message):
if message.hasResponses():
if message.isType(self.current_action.getResponseFilter()):
self.current_action.handleResponses(message)
def processMessage(self, message):
if message.isType("configure1"):
self.all_modules = message.getData()["all_modules"]
elif message.isType("start"):
self.handleActionDone()
if self.current_action is not None:
if message.isType(self.current_action.getMessageFilter()):
self.current_action.handleMessage(message)
class TestingTCP(Testing):
"""
This adds the ability to test HAL's handling of TCP commands.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.hal_client = None
def handleActionDone(self):
# This is little fiddly as it needs to handle both
# TestAction and TestActionTCP actions.
# If there are no more actions, close the TCP connection to HAL.
done = False
if (len(self.test_actions) == 0):
self.hal_client.stopCommunication()
self.hal_client.close()
done = True
# Super class handles TestActions. Note that for TestActionTCP
# this will send a "noop" message through HAL's queue.
super().handleActionDone()
# Check if this TestActionTCP and we need to send a TCPMessage.
if not done and isinstance(self.current_action, testActionsTCP.TestActionTCP):
self.hal_client.sendMessage(self.current_action.tcp_message)
def handleMessageReceived(self, tcp_message):
"""
Handle a TCP (response) message from HAL.
"""
self.current_action.handleMessageReceived(tcp_message)
def processMessage(self, message):
if message.isType("start"):
self.hal_client = tcpClient.TCPClient(port = 9000,
server_name = "HAL",
verbose = False)
self.hal_client.messageReceived.connect(self.handleMessageReceived)
self.hal_client.startCommunication()
super().processMessage(message)
| 36.455224 | 107 | 0.605937 |
import storm_control.sc_library.tcpClient as tcpClient
import storm_control.hal4000.halLib.halMessage as halMessage
import storm_control.hal4000.halLib.halModule as halModule
import storm_control.hal4000.testing.testActionsTCP as testActionsTCP
class Testing(halModule.HalModule):
def __init__(self, module_params = None, qt_settings = None, **kwds):
super().__init__(**kwds)
self.all_modules = None
self.current_action = None
self.test_actions = []
halMessage.addMessage("na",
validator = {"data" : None, "resp" : None})
halMessage.addMessage("noop",
validator = {"data" : None, "resp" : None})
halMessage.addMessage("tests done",
validator = {"data" : None, "resp" : None})
def handleActionDone(self):
if (len(self.test_actions) == 0):
self.newMessage.emit(halMessage.HalMessage(source = self,
m_type = "tests done"))
else:
if self.current_action is not None:
self.current_action.actionDone.disconnect()
self.current_action = self.test_actions[0]
self.test_actions = self.test_actions[1:]
self.current_action.start()
self.current_action.actionDone.connect(self.handleActionDone)
message = halMessage.HalMessage(source = self.all_modules[self.current_action.getSourceName()],
m_type = self.current_action.getMessageType(),
data = self.current_action.getMessageData(),
finalizer = self.current_action.finalizer)
self.current_action.setMessage(message)
self.newMessage.emit(message)
def handleResponses(self, message):
if message.hasResponses():
if message.isType(self.current_action.getResponseFilter()):
self.current_action.handleResponses(message)
def processMessage(self, message):
if message.isType("configure1"):
self.all_modules = message.getData()["all_modules"]
elif message.isType("start"):
self.handleActionDone()
if self.current_action is not None:
if message.isType(self.current_action.getMessageFilter()):
self.current_action.handleMessage(message)
class TestingTCP(Testing):
def __init__(self, **kwds):
super().__init__(**kwds)
self.hal_client = None
def handleActionDone(self):
done = False
if (len(self.test_actions) == 0):
self.hal_client.stopCommunication()
self.hal_client.close()
done = True
super().handleActionDone()
# Check if this TestActionTCP and we need to send a TCPMessage.
if not done and isinstance(self.current_action, testActionsTCP.TestActionTCP):
self.hal_client.sendMessage(self.current_action.tcp_message)
def handleMessageReceived(self, tcp_message):
self.current_action.handleMessageReceived(tcp_message)
def processMessage(self, message):
if message.isType("start"):
self.hal_client = tcpClient.TCPClient(port = 9000,
server_name = "HAL",
verbose = False)
self.hal_client.messageReceived.connect(self.handleMessageReceived)
self.hal_client.startCommunication()
super().processMessage(message)
| true | true |
f73ea5c89a9fc888776bfb6480169cb0334eea34 | 2,357 | py | Python | connectomics/model/utils/monitor.py | donglaiw/pytorch_connectomics | c79a3cc82f853a86e98930475f6355d0022916dd | [
"MIT"
] | 1 | 2020-05-17T08:01:56.000Z | 2020-05-17T08:01:56.000Z | connectomics/model/utils/monitor.py | donglaiw/pytorch_connectomics | c79a3cc82f853a86e98930475f6355d0022916dd | [
"MIT"
] | null | null | null | connectomics/model/utils/monitor.py | donglaiw/pytorch_connectomics | c79a3cc82f853a86e98930475f6355d0022916dd | [
"MIT"
] | 3 | 2020-03-31T21:40:12.000Z | 2021-06-09T02:26:43.000Z | import os,sys
import numpy as np
# tensorboardX
from tensorboardX import SummaryWriter
from .visualizer import Visualizer
class Logger(object):
def __init__(self, log_path='', log_opt=[1,1,0], batch_size=1):
self.n = batch_size
self.reset()
# tensorboardX
self.log_tb = None
self.do_print = log_opt[0]==1
if log_opt[1] > 0:
self.log_tb = SummaryWriter(log_path)
# txt
self.log_txt = None
if log_opt[2] > 0:
self.log_txt = open(log_path+'/log.txt','w') # unbuffered, write instantly
def reset(self):
self.val = 0
self.sum = 0
self.count = 0
def update(self, val):
self.val = val
self.sum += val * self.n
self.count += self.n
def output(self,iter_total, lr):
avg = self.sum / self.count
if self.do_print:
print('[Iteration %d] train_loss=%0.4f lr=%.5f' % (iter_total, avg, lr))
if self.log_tb is not None:
self.log_tb.add_scalar('Loss', avg, iter_total)
if self.log_txt is not None:
self.log_txt.write("[Volume %d] train_loss=%0.4f lr=%.5f\n" % (iter_total, avg, lr))
self.log_txt.flush()
return avg
class Monitor(object):
"""Computes and stores the average and current value"""
def __init__(self, log_path='', log_opt=[1,1,0,1], vis_opt=[0,8], iter_num=[10,100]):
# log_opt: do_tb, do_txt, batch_size, log_iteration
# vis_opt: vis_type, vis_iteration
self.logger = Logger(log_path, log_opt[:3], log_opt[3])
self.vis = Visualizer(vis_opt[0], vis_opt[1])
self.log_iter, self.vis_iter = iter_num
self.do_vis = False if self.logger.log_tb is None else True
def update(self, scheduler, iter_total, loss, lr=0.1):
do_vis = False
self.logger.update(loss)
if (iter_total+1) % self.log_iter == 0:
avg = self.logger.output(iter_total, lr)
self.logger.reset()
if (iter_total+1) % self.vis_iter == 0:
scheduler.step(avg)
do_vis = self.do_vis
return do_vis
def visualize(self, volume, label, output, iter_total):
self.vis.visualize(volume, label, output, iter_total, self.logger.log_tb)
def reset(self):
self.logger.reset()
| 34.661765 | 96 | 0.593127 | import os,sys
import numpy as np
from tensorboardX import SummaryWriter
from .visualizer import Visualizer
class Logger(object):
def __init__(self, log_path='', log_opt=[1,1,0], batch_size=1):
self.n = batch_size
self.reset()
self.log_tb = None
self.do_print = log_opt[0]==1
if log_opt[1] > 0:
self.log_tb = SummaryWriter(log_path)
self.log_txt = None
if log_opt[2] > 0:
self.log_txt = open(log_path+'/log.txt','w')
def reset(self):
self.val = 0
self.sum = 0
self.count = 0
def update(self, val):
self.val = val
self.sum += val * self.n
self.count += self.n
def output(self,iter_total, lr):
avg = self.sum / self.count
if self.do_print:
print('[Iteration %d] train_loss=%0.4f lr=%.5f' % (iter_total, avg, lr))
if self.log_tb is not None:
self.log_tb.add_scalar('Loss', avg, iter_total)
if self.log_txt is not None:
self.log_txt.write("[Volume %d] train_loss=%0.4f lr=%.5f\n" % (iter_total, avg, lr))
self.log_txt.flush()
return avg
class Monitor(object):
def __init__(self, log_path='', log_opt=[1,1,0,1], vis_opt=[0,8], iter_num=[10,100]):
self.logger = Logger(log_path, log_opt[:3], log_opt[3])
self.vis = Visualizer(vis_opt[0], vis_opt[1])
self.log_iter, self.vis_iter = iter_num
self.do_vis = False if self.logger.log_tb is None else True
def update(self, scheduler, iter_total, loss, lr=0.1):
do_vis = False
self.logger.update(loss)
if (iter_total+1) % self.log_iter == 0:
avg = self.logger.output(iter_total, lr)
self.logger.reset()
if (iter_total+1) % self.vis_iter == 0:
scheduler.step(avg)
do_vis = self.do_vis
return do_vis
def visualize(self, volume, label, output, iter_total):
self.vis.visualize(volume, label, output, iter_total, self.logger.log_tb)
def reset(self):
self.logger.reset()
| true | true |
f73ea5d5f0cf546eadffbe00ea6e8a62346d1682 | 3,672 | py | Python | GoToMeeting/GoToMeetingURLProvider.py | KWik/homebysix-recipes | 5f5447bfba771633a605be2468f17d8bcf429eac | [
"Apache-2.0"
] | null | null | null | GoToMeeting/GoToMeetingURLProvider.py | KWik/homebysix-recipes | 5f5447bfba771633a605be2468f17d8bcf429eac | [
"Apache-2.0"
] | null | null | null | GoToMeeting/GoToMeetingURLProvider.py | KWik/homebysix-recipes | 5f5447bfba771633a605be2468f17d8bcf429eac | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2015-2020 Elliot Jordan
# Based on original processor by Nick Gamewell
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gzip
import json
import os
import platform
import socket
from distutils.version import LooseVersion
# pylint: disable=unused-import
from autopkglib import Processor, ProcessorError, URLGetter # noqa: F401
__all__ = ["GoToMeetingURLProvider"]
HOSTNAME = "builds.cdn.getgo.com"
# workaround for 10.12.x SNI issue
if LooseVersion(platform.mac_ver()[0]) < LooseVersion("10.13.0"):
# pylint: disable=no-member
HOSTNAME = socket.gethostbyname_ex("builds.cdn.getgo.com")[0]
BASE_URL = "https://" + HOSTNAME + "/g2mupdater/live/config.json"
class GoToMeetingURLProvider(URLGetter):
"""Provides a download URL and build number for the latest GoToMeeting
release."""
input_variables = {
"base_url": {
"required": False,
"description": "URL for the GoToMeeting "
"releases JSON feed. Default is %s" % BASE_URL,
}
}
output_variables = {
"url": {"description": "URL to the latest GoToMeeting release."},
"build": {"description": "Build number of the latest GoToMeeting release."},
}
description = __doc__
def get_g2m_info(self, base_url):
"""Process the JSON data from the latest release."""
# Prepare download file path.
download_dir = os.path.join(self.env["RECIPE_CACHE_DIR"], "downloads")
meta_path = os.path.join(download_dir, "meta")
try:
os.makedirs(download_dir)
except os.error:
# Directory already exists
pass
# Download JSON feed (or gzipped JSON feed) to a file.
meta_file = self.download_to_file(base_url, meta_path)
# Sometimes the base URL is compressed as gzip, sometimes it's not.
try:
with open(meta_file, "rb") as f:
jsondata = json.loads(f.read())
self.output("Encoding: json")
except ValueError:
with gzip.open(meta_file, "rb") as f:
jsondata = json.loads(f.read())
self.output("Encoding: gzip")
max_build = max(jsondata["activeBuilds"], key=lambda x: int(x["buildNumber"]))
g2m_url = max_build.get("macDownloadUrl")
if not g2m_url:
raise ProcessorError(
"No download URL for the latest release "
"found in the base_url JSON feed."
)
url_parts = g2m_url.split("/")
url_parts[-1] = "GoToMeeting.dmg"
g2m_url = "/".join(url_parts)
g2m_build = str(max_build["buildNumber"])
return g2m_url, g2m_build
def main(self):
"""Main process."""
base_url = self.env.get("base_url", BASE_URL)
g2m_url, g2m_build = self.get_g2m_info(base_url)
self.env["url"] = g2m_url
self.output("Found URL: %s" % self.env["url"])
self.env["build"] = g2m_build
self.output("Build number: %s" % self.env["build"])
if __name__ == "__main__":
PROCESSOR = GoToMeetingURLProvider()
PROCESSOR.execute_shell()
| 33.081081 | 86 | 0.639434 |
import gzip
import json
import os
import platform
import socket
from distutils.version import LooseVersion
from autopkglib import Processor, ProcessorError, URLGetter
__all__ = ["GoToMeetingURLProvider"]
HOSTNAME = "builds.cdn.getgo.com"
if LooseVersion(platform.mac_ver()[0]) < LooseVersion("10.13.0"):
HOSTNAME = socket.gethostbyname_ex("builds.cdn.getgo.com")[0]
BASE_URL = "https://" + HOSTNAME + "/g2mupdater/live/config.json"
class GoToMeetingURLProvider(URLGetter):
input_variables = {
"base_url": {
"required": False,
"description": "URL for the GoToMeeting "
"releases JSON feed. Default is %s" % BASE_URL,
}
}
output_variables = {
"url": {"description": "URL to the latest GoToMeeting release."},
"build": {"description": "Build number of the latest GoToMeeting release."},
}
description = __doc__
def get_g2m_info(self, base_url):
download_dir = os.path.join(self.env["RECIPE_CACHE_DIR"], "downloads")
meta_path = os.path.join(download_dir, "meta")
try:
os.makedirs(download_dir)
except os.error:
pass
meta_file = self.download_to_file(base_url, meta_path)
try:
with open(meta_file, "rb") as f:
jsondata = json.loads(f.read())
self.output("Encoding: json")
except ValueError:
with gzip.open(meta_file, "rb") as f:
jsondata = json.loads(f.read())
self.output("Encoding: gzip")
max_build = max(jsondata["activeBuilds"], key=lambda x: int(x["buildNumber"]))
g2m_url = max_build.get("macDownloadUrl")
if not g2m_url:
raise ProcessorError(
"No download URL for the latest release "
"found in the base_url JSON feed."
)
url_parts = g2m_url.split("/")
url_parts[-1] = "GoToMeeting.dmg"
g2m_url = "/".join(url_parts)
g2m_build = str(max_build["buildNumber"])
return g2m_url, g2m_build
def main(self):
base_url = self.env.get("base_url", BASE_URL)
g2m_url, g2m_build = self.get_g2m_info(base_url)
self.env["url"] = g2m_url
self.output("Found URL: %s" % self.env["url"])
self.env["build"] = g2m_build
self.output("Build number: %s" % self.env["build"])
if __name__ == "__main__":
PROCESSOR = GoToMeetingURLProvider()
PROCESSOR.execute_shell()
| true | true |
f73ea67366e870025b47fe0b105ad3c759233ac0 | 39,760 | py | Python | google_appengine/google/appengine/api/system/system_service_pb.py | iTrollYou/WEB_Spotify_Youtube | 5315cdf78361942bba0b52daa8b65d74998d2db5 | [
"MIT"
] | 26 | 2015-01-20T08:02:38.000Z | 2020-06-10T04:57:41.000Z | google_appengine/google/appengine/api/system/system_service_pb.py | iTrollYou/WEB_Spotify_Youtube | 5315cdf78361942bba0b52daa8b65d74998d2db5 | [
"MIT"
] | 4 | 2016-02-28T05:53:54.000Z | 2017-01-03T07:39:50.000Z | google_appengine/google/appengine/api/system/system_service_pb.py | iTrollYou/WEB_Spotify_Youtube | 5315cdf78361942bba0b52daa8b65d74998d2db5 | [
"MIT"
] | 13 | 2016-02-28T00:14:23.000Z | 2021-05-03T15:47:36.000Z | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import abc
import array
import base64
try:
from thread import allocate_lock as _Lock
except ImportError:
from threading import Lock as _Lock
try:
from google3.net.proto import _net_proto___parse__python
except ImportError:
_net_proto___parse__python = None
import sys
try:
__import__('google.net.rpc.python.proto_python_api_1_stub')
__import__('google.net.rpc.python.pywraprpc')
proto_python_api_1_stub = sys.modules.get('google.net.rpc.python.proto_python_api_1_stub')
pywraprpc = sys.modules.get('google.net.rpc.python.pywraprpc')
_client_stub_base_class = proto_python_api_1_stub.Stub
except ImportError:
_client_stub_base_class = object
try:
__import__('google.net.rpc.python.rpcserver')
rpcserver = sys.modules.get('google.net.rpc.python.rpcserver')
_server_stub_base_class = rpcserver.BaseRpcServer
except ImportError:
_server_stub_base_class = object
if hasattr(__builtins__, 'xrange'): range = xrange
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
class SystemServiceError(ProtocolBuffer.ProtocolMessage):
OK = 0
INTERNAL_ERROR = 1
BACKEND_REQUIRED = 2
LIMIT_REACHED = 3
_ErrorCode_NAMES = {
0: "OK",
1: "INTERNAL_ERROR",
2: "BACKEND_REQUIRED",
3: "LIMIT_REACHED",
}
def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
ErrorCode_Name = classmethod(ErrorCode_Name)
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
if _net_proto___parse__python is not None:
def _CMergeFromString(self, s):
_net_proto___parse__python.MergeFromString(self, 'apphosting.SystemServiceError', s)
if _net_proto___parse__python is not None:
def _CEncode(self):
return _net_proto___parse__python.Encode(self, 'apphosting.SystemServiceError')
if _net_proto___parse__python is not None:
def _CEncodePartial(self):
return _net_proto___parse__python.EncodePartial(self, 'apphosting.SystemServiceError')
if _net_proto___parse__python is not None:
def _CToASCII(self, output_format):
return _net_proto___parse__python.ToASCII(self, 'apphosting.SystemServiceError', output_format)
if _net_proto___parse__python is not None:
def ParseASCII(self, s):
_net_proto___parse__python.ParseASCII(self, 'apphosting.SystemServiceError', s)
if _net_proto___parse__python is not None:
def ParseASCIIIgnoreUnknown(self, s):
_net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'apphosting.SystemServiceError', s)
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError()
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.SystemServiceError'
_SERIALIZED_DESCRIPTOR = array.array('B')
_SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WiphcHBob3N0aW5nL2FwaS9zeXN0ZW0vc3lzdGVtX3NlcnZpY2UucHJvdG8KHWFwcGhvc3RpbmcuU3lzdGVtU2VydmljZUVycm9yc3oJRXJyb3JDb2RliwGSAQJPS5gBAIwBiwGSAQ5JTlRFUk5BTF9FUlJPUpgBAYwBiwGSARBCQUNLRU5EX1JFUVVJUkVEmAECjAGLAZIBDUxJTUlUX1JFQUNIRUSYAQOMAXS6AYUGCiphcHBob3N0aW5nL2FwaS9zeXN0ZW0vc3lzdGVtX3NlcnZpY2UucHJvdG8SCmFwcGhvc3RpbmciZgoSU3lzdGVtU2VydmljZUVycm9yIlAKCUVycm9yQ29kZRIGCgJPSxAAEhIKDklOVEVSTkFMX0VSUk9SEAESFAoQQkFDS0VORF9SRVFVSVJFRBACEhEKDUxJTUlUX1JFQUNIRUQQAyJ0CgpTeXN0ZW1TdGF0Eg8KB2N1cnJlbnQYASABKAESEQoJYXZlcmFnZTFtGAMgASgBEhIKCmF2ZXJhZ2UxMG0YBCABKAESDQoFdG90YWwYAiABKAESDgoGcmF0ZTFtGAUgASgBEg8KB3JhdGUxMG0YBiABKAEiFwoVR2V0U3lzdGVtU3RhdHNSZXF1ZXN0ImUKFkdldFN5c3RlbVN0YXRzUmVzcG9uc2USIwoDY3B1GAEgASgLMhYuYXBwaG9zdGluZy5TeXN0ZW1TdGF0EiYKBm1lbW9yeRgCIAEoCzIWLmFwcGhvc3RpbmcuU3lzdGVtU3RhdCIfCh1TdGFydEJhY2tncm91bmRSZXF1ZXN0UmVxdWVzdCI0Ch5TdGFydEJhY2tncm91bmRSZXF1ZXN0UmVzcG9uc2USEgoKcmVxdWVzdF9pZBgBIAEoCTLdAQoNU3lzdGVtU2VydmljZRJZCg5HZXRTeXN0ZW1TdGF0cxIhLmFwcGhvc3RpbmcuR2V0U3lzdGVtU3RhdHNSZXF1ZXN0GiIuYXBwaG9zdGluZy5HZXRTeXN0ZW1TdGF0c1Jlc3BvbnNlIgAScQoWU3RhcnRCYWNrZ3JvdW5kUmVxdWVzdBIpLmFwcGhvc3RpbmcuU3RhcnRCYWNrZ3JvdW5kUmVxdWVzdFJlcXVlc3QaKi5hcHBob3N0aW5nLlN0YXJ0QmFja2dyb3VuZFJlcXVlc3RSZXNwb25zZSIAQjYKH2NvbS5nb29nbGUuYXBwZW5naW5lLmFwaS5zeXN0ZW0QAigCQg9TeXN0ZW1TZXJ2aWNlUGI="))
if _net_proto___parse__python is not None:
_net_proto___parse__python.RegisterType(
_SERIALIZED_DESCRIPTOR.tostring())
class SystemStat(ProtocolBuffer.ProtocolMessage):
has_current_ = 0
current_ = 0.0
has_average1m_ = 0
average1m_ = 0.0
has_average10m_ = 0
average10m_ = 0.0
has_total_ = 0
total_ = 0.0
has_rate1m_ = 0
rate1m_ = 0.0
has_rate10m_ = 0
rate10m_ = 0.0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def current(self): return self.current_
def set_current(self, x):
self.has_current_ = 1
self.current_ = x
def clear_current(self):
if self.has_current_:
self.has_current_ = 0
self.current_ = 0.0
def has_current(self): return self.has_current_
def average1m(self): return self.average1m_
def set_average1m(self, x):
self.has_average1m_ = 1
self.average1m_ = x
def clear_average1m(self):
if self.has_average1m_:
self.has_average1m_ = 0
self.average1m_ = 0.0
def has_average1m(self): return self.has_average1m_
def average10m(self): return self.average10m_
def set_average10m(self, x):
self.has_average10m_ = 1
self.average10m_ = x
def clear_average10m(self):
if self.has_average10m_:
self.has_average10m_ = 0
self.average10m_ = 0.0
def has_average10m(self): return self.has_average10m_
def total(self): return self.total_
def set_total(self, x):
self.has_total_ = 1
self.total_ = x
def clear_total(self):
if self.has_total_:
self.has_total_ = 0
self.total_ = 0.0
def has_total(self): return self.has_total_
def rate1m(self): return self.rate1m_
def set_rate1m(self, x):
self.has_rate1m_ = 1
self.rate1m_ = x
def clear_rate1m(self):
if self.has_rate1m_:
self.has_rate1m_ = 0
self.rate1m_ = 0.0
def has_rate1m(self): return self.has_rate1m_
def rate10m(self): return self.rate10m_
def set_rate10m(self, x):
self.has_rate10m_ = 1
self.rate10m_ = x
def clear_rate10m(self):
if self.has_rate10m_:
self.has_rate10m_ = 0
self.rate10m_ = 0.0
def has_rate10m(self): return self.has_rate10m_
def MergeFrom(self, x):
assert x is not self
if (x.has_current()): self.set_current(x.current())
if (x.has_average1m()): self.set_average1m(x.average1m())
if (x.has_average10m()): self.set_average10m(x.average10m())
if (x.has_total()): self.set_total(x.total())
if (x.has_rate1m()): self.set_rate1m(x.rate1m())
if (x.has_rate10m()): self.set_rate10m(x.rate10m())
if _net_proto___parse__python is not None:
def _CMergeFromString(self, s):
_net_proto___parse__python.MergeFromString(self, 'apphosting.SystemStat', s)
if _net_proto___parse__python is not None:
def _CEncode(self):
return _net_proto___parse__python.Encode(self, 'apphosting.SystemStat')
if _net_proto___parse__python is not None:
def _CEncodePartial(self):
return _net_proto___parse__python.EncodePartial(self, 'apphosting.SystemStat')
if _net_proto___parse__python is not None:
def _CToASCII(self, output_format):
return _net_proto___parse__python.ToASCII(self, 'apphosting.SystemStat', output_format)
if _net_proto___parse__python is not None:
def ParseASCII(self, s):
_net_proto___parse__python.ParseASCII(self, 'apphosting.SystemStat', s)
if _net_proto___parse__python is not None:
def ParseASCIIIgnoreUnknown(self, s):
_net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'apphosting.SystemStat', s)
def Equals(self, x):
if x is self: return 1
if self.has_current_ != x.has_current_: return 0
if self.has_current_ and self.current_ != x.current_: return 0
if self.has_average1m_ != x.has_average1m_: return 0
if self.has_average1m_ and self.average1m_ != x.average1m_: return 0
if self.has_average10m_ != x.has_average10m_: return 0
if self.has_average10m_ and self.average10m_ != x.average10m_: return 0
if self.has_total_ != x.has_total_: return 0
if self.has_total_ and self.total_ != x.total_: return 0
if self.has_rate1m_ != x.has_rate1m_: return 0
if self.has_rate1m_ and self.rate1m_ != x.rate1m_: return 0
if self.has_rate10m_ != x.has_rate10m_: return 0
if self.has_rate10m_ and self.rate10m_ != x.rate10m_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_current_): n += 9
if (self.has_average1m_): n += 9
if (self.has_average10m_): n += 9
if (self.has_total_): n += 9
if (self.has_rate1m_): n += 9
if (self.has_rate10m_): n += 9
return n
def ByteSizePartial(self):
n = 0
if (self.has_current_): n += 9
if (self.has_average1m_): n += 9
if (self.has_average10m_): n += 9
if (self.has_total_): n += 9
if (self.has_rate1m_): n += 9
if (self.has_rate10m_): n += 9
return n
def Clear(self):
self.clear_current()
self.clear_average1m()
self.clear_average10m()
self.clear_total()
self.clear_rate1m()
self.clear_rate10m()
def OutputUnchecked(self, out):
if (self.has_current_):
out.putVarInt32(9)
out.putDouble(self.current_)
if (self.has_total_):
out.putVarInt32(17)
out.putDouble(self.total_)
if (self.has_average1m_):
out.putVarInt32(25)
out.putDouble(self.average1m_)
if (self.has_average10m_):
out.putVarInt32(33)
out.putDouble(self.average10m_)
if (self.has_rate1m_):
out.putVarInt32(41)
out.putDouble(self.rate1m_)
if (self.has_rate10m_):
out.putVarInt32(49)
out.putDouble(self.rate10m_)
def OutputPartial(self, out):
if (self.has_current_):
out.putVarInt32(9)
out.putDouble(self.current_)
if (self.has_total_):
out.putVarInt32(17)
out.putDouble(self.total_)
if (self.has_average1m_):
out.putVarInt32(25)
out.putDouble(self.average1m_)
if (self.has_average10m_):
out.putVarInt32(33)
out.putDouble(self.average10m_)
if (self.has_rate1m_):
out.putVarInt32(41)
out.putDouble(self.rate1m_)
if (self.has_rate10m_):
out.putVarInt32(49)
out.putDouble(self.rate10m_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 9:
self.set_current(d.getDouble())
continue
if tt == 17:
self.set_total(d.getDouble())
continue
if tt == 25:
self.set_average1m(d.getDouble())
continue
if tt == 33:
self.set_average10m(d.getDouble())
continue
if tt == 41:
self.set_rate1m(d.getDouble())
continue
if tt == 49:
self.set_rate10m(d.getDouble())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError()
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_current_: res+=prefix+("current: %s\n" % self.DebugFormat(self.current_))
if self.has_average1m_: res+=prefix+("average1m: %s\n" % self.DebugFormat(self.average1m_))
if self.has_average10m_: res+=prefix+("average10m: %s\n" % self.DebugFormat(self.average10m_))
if self.has_total_: res+=prefix+("total: %s\n" % self.DebugFormat(self.total_))
if self.has_rate1m_: res+=prefix+("rate1m: %s\n" % self.DebugFormat(self.rate1m_))
if self.has_rate10m_: res+=prefix+("rate10m: %s\n" % self.DebugFormat(self.rate10m_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kcurrent = 1
kaverage1m = 3
kaverage10m = 4
ktotal = 2
krate1m = 5
krate10m = 6
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "current",
2: "total",
3: "average1m",
4: "average10m",
5: "rate1m",
6: "rate10m",
}, 6)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.DOUBLE,
2: ProtocolBuffer.Encoder.DOUBLE,
3: ProtocolBuffer.Encoder.DOUBLE,
4: ProtocolBuffer.Encoder.DOUBLE,
5: ProtocolBuffer.Encoder.DOUBLE,
6: ProtocolBuffer.Encoder.DOUBLE,
}, 6, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.SystemStat'
_SERIALIZED_DESCRIPTOR = array.array('B')
_SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WiphcHBob3N0aW5nL2FwaS9zeXN0ZW0vc3lzdGVtX3NlcnZpY2UucHJvdG8KFWFwcGhvc3RpbmcuU3lzdGVtU3RhdBMaB2N1cnJlbnQgASgBMAE4ARQTGglhdmVyYWdlMW0gAygBMAE4ARQTGgphdmVyYWdlMTBtIAQoATABOAEUExoFdG90YWwgAigBMAE4ARQTGgZyYXRlMW0gBSgBMAE4ARQTGgdyYXRlMTBtIAYoATABOAEUwgEdYXBwaG9zdGluZy5TeXN0ZW1TZXJ2aWNlRXJyb3I="))
if _net_proto___parse__python is not None:
_net_proto___parse__python.RegisterType(
_SERIALIZED_DESCRIPTOR.tostring())
class GetSystemStatsRequest(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
if _net_proto___parse__python is not None:
def _CMergeFromString(self, s):
_net_proto___parse__python.MergeFromString(self, 'apphosting.GetSystemStatsRequest', s)
if _net_proto___parse__python is not None:
def _CEncode(self):
return _net_proto___parse__python.Encode(self, 'apphosting.GetSystemStatsRequest')
if _net_proto___parse__python is not None:
def _CEncodePartial(self):
return _net_proto___parse__python.EncodePartial(self, 'apphosting.GetSystemStatsRequest')
if _net_proto___parse__python is not None:
def _CToASCII(self, output_format):
return _net_proto___parse__python.ToASCII(self, 'apphosting.GetSystemStatsRequest', output_format)
if _net_proto___parse__python is not None:
def ParseASCII(self, s):
_net_proto___parse__python.ParseASCII(self, 'apphosting.GetSystemStatsRequest', s)
if _net_proto___parse__python is not None:
def ParseASCIIIgnoreUnknown(self, s):
_net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'apphosting.GetSystemStatsRequest', s)
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError()
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.GetSystemStatsRequest'
_SERIALIZED_DESCRIPTOR = array.array('B')
_SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WiphcHBob3N0aW5nL2FwaS9zeXN0ZW0vc3lzdGVtX3NlcnZpY2UucHJvdG8KIGFwcGhvc3RpbmcuR2V0U3lzdGVtU3RhdHNSZXF1ZXN0wgEdYXBwaG9zdGluZy5TeXN0ZW1TZXJ2aWNlRXJyb3I="))
if _net_proto___parse__python is not None:
_net_proto___parse__python.RegisterType(
_SERIALIZED_DESCRIPTOR.tostring())
class GetSystemStatsResponse(ProtocolBuffer.ProtocolMessage):
has_cpu_ = 0
cpu_ = None
has_memory_ = 0
memory_ = None
def __init__(self, contents=None):
self.lazy_init_lock_ = _Lock()
if contents is not None: self.MergeFromString(contents)
def cpu(self):
if self.cpu_ is None:
self.lazy_init_lock_.acquire()
try:
if self.cpu_ is None: self.cpu_ = SystemStat()
finally:
self.lazy_init_lock_.release()
return self.cpu_
def mutable_cpu(self): self.has_cpu_ = 1; return self.cpu()
def clear_cpu(self):
if self.has_cpu_:
self.has_cpu_ = 0;
if self.cpu_ is not None: self.cpu_.Clear()
def has_cpu(self): return self.has_cpu_
def memory(self):
if self.memory_ is None:
self.lazy_init_lock_.acquire()
try:
if self.memory_ is None: self.memory_ = SystemStat()
finally:
self.lazy_init_lock_.release()
return self.memory_
def mutable_memory(self): self.has_memory_ = 1; return self.memory()
def clear_memory(self):
if self.has_memory_:
self.has_memory_ = 0;
if self.memory_ is not None: self.memory_.Clear()
def has_memory(self): return self.has_memory_
def MergeFrom(self, x):
assert x is not self
if (x.has_cpu()): self.mutable_cpu().MergeFrom(x.cpu())
if (x.has_memory()): self.mutable_memory().MergeFrom(x.memory())
if _net_proto___parse__python is not None:
def _CMergeFromString(self, s):
_net_proto___parse__python.MergeFromString(self, 'apphosting.GetSystemStatsResponse', s)
if _net_proto___parse__python is not None:
def _CEncode(self):
return _net_proto___parse__python.Encode(self, 'apphosting.GetSystemStatsResponse')
if _net_proto___parse__python is not None:
def _CEncodePartial(self):
return _net_proto___parse__python.EncodePartial(self, 'apphosting.GetSystemStatsResponse')
if _net_proto___parse__python is not None:
def _CToASCII(self, output_format):
return _net_proto___parse__python.ToASCII(self, 'apphosting.GetSystemStatsResponse', output_format)
if _net_proto___parse__python is not None:
def ParseASCII(self, s):
_net_proto___parse__python.ParseASCII(self, 'apphosting.GetSystemStatsResponse', s)
if _net_proto___parse__python is not None:
def ParseASCIIIgnoreUnknown(self, s):
_net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'apphosting.GetSystemStatsResponse', s)
def Equals(self, x):
if x is self: return 1
if self.has_cpu_ != x.has_cpu_: return 0
if self.has_cpu_ and self.cpu_ != x.cpu_: return 0
if self.has_memory_ != x.has_memory_: return 0
if self.has_memory_ and self.memory_ != x.memory_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_cpu_ and not self.cpu_.IsInitialized(debug_strs)): initialized = 0
if (self.has_memory_ and not self.memory_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_cpu_): n += 1 + self.lengthString(self.cpu_.ByteSize())
if (self.has_memory_): n += 1 + self.lengthString(self.memory_.ByteSize())
return n
def ByteSizePartial(self):
n = 0
if (self.has_cpu_): n += 1 + self.lengthString(self.cpu_.ByteSizePartial())
if (self.has_memory_): n += 1 + self.lengthString(self.memory_.ByteSizePartial())
return n
def Clear(self):
self.clear_cpu()
self.clear_memory()
def OutputUnchecked(self, out):
if (self.has_cpu_):
out.putVarInt32(10)
out.putVarInt32(self.cpu_.ByteSize())
self.cpu_.OutputUnchecked(out)
if (self.has_memory_):
out.putVarInt32(18)
out.putVarInt32(self.memory_.ByteSize())
self.memory_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_cpu_):
out.putVarInt32(10)
out.putVarInt32(self.cpu_.ByteSizePartial())
self.cpu_.OutputPartial(out)
if (self.has_memory_):
out.putVarInt32(18)
out.putVarInt32(self.memory_.ByteSizePartial())
self.memory_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_cpu().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_memory().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError()
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_cpu_:
res+=prefix+"cpu <\n"
res+=self.cpu_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_memory_:
res+=prefix+"memory <\n"
res+=self.memory_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kcpu = 1
kmemory = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "cpu",
2: "memory",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.GetSystemStatsResponse'
_SERIALIZED_DESCRIPTOR = array.array('B')
_SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WiphcHBob3N0aW5nL2FwaS9zeXN0ZW0vc3lzdGVtX3NlcnZpY2UucHJvdG8KIWFwcGhvc3RpbmcuR2V0U3lzdGVtU3RhdHNSZXNwb25zZRMaA2NwdSABKAIwCzgBShVhcHBob3N0aW5nLlN5c3RlbVN0YXSjAaoBBWN0eXBlsgEGcHJvdG8ypAEUExoGbWVtb3J5IAIoAjALOAFKFWFwcGhvc3RpbmcuU3lzdGVtU3RhdKMBqgEFY3R5cGWyAQZwcm90bzKkARTCAR1hcHBob3N0aW5nLlN5c3RlbVNlcnZpY2VFcnJvcg=="))
if _net_proto___parse__python is not None:
_net_proto___parse__python.RegisterType(
_SERIALIZED_DESCRIPTOR.tostring())
class StartBackgroundRequestRequest(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
if _net_proto___parse__python is not None:
def _CMergeFromString(self, s):
_net_proto___parse__python.MergeFromString(self, 'apphosting.StartBackgroundRequestRequest', s)
if _net_proto___parse__python is not None:
def _CEncode(self):
return _net_proto___parse__python.Encode(self, 'apphosting.StartBackgroundRequestRequest')
if _net_proto___parse__python is not None:
def _CEncodePartial(self):
return _net_proto___parse__python.EncodePartial(self, 'apphosting.StartBackgroundRequestRequest')
if _net_proto___parse__python is not None:
def _CToASCII(self, output_format):
return _net_proto___parse__python.ToASCII(self, 'apphosting.StartBackgroundRequestRequest', output_format)
if _net_proto___parse__python is not None:
def ParseASCII(self, s):
_net_proto___parse__python.ParseASCII(self, 'apphosting.StartBackgroundRequestRequest', s)
if _net_proto___parse__python is not None:
def ParseASCIIIgnoreUnknown(self, s):
_net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'apphosting.StartBackgroundRequestRequest', s)
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError()
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.StartBackgroundRequestRequest'
_SERIALIZED_DESCRIPTOR = array.array('B')
_SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WiphcHBob3N0aW5nL2FwaS9zeXN0ZW0vc3lzdGVtX3NlcnZpY2UucHJvdG8KKGFwcGhvc3RpbmcuU3RhcnRCYWNrZ3JvdW5kUmVxdWVzdFJlcXVlc3TCAR1hcHBob3N0aW5nLlN5c3RlbVNlcnZpY2VFcnJvcg=="))
if _net_proto___parse__python is not None:
_net_proto___parse__python.RegisterType(
_SERIALIZED_DESCRIPTOR.tostring())
class StartBackgroundRequestResponse(ProtocolBuffer.ProtocolMessage):
has_request_id_ = 0
request_id_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def request_id(self): return self.request_id_
def set_request_id(self, x):
self.has_request_id_ = 1
self.request_id_ = x
def clear_request_id(self):
if self.has_request_id_:
self.has_request_id_ = 0
self.request_id_ = ""
def has_request_id(self): return self.has_request_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_request_id()): self.set_request_id(x.request_id())
if _net_proto___parse__python is not None:
def _CMergeFromString(self, s):
_net_proto___parse__python.MergeFromString(self, 'apphosting.StartBackgroundRequestResponse', s)
if _net_proto___parse__python is not None:
def _CEncode(self):
return _net_proto___parse__python.Encode(self, 'apphosting.StartBackgroundRequestResponse')
if _net_proto___parse__python is not None:
def _CEncodePartial(self):
return _net_proto___parse__python.EncodePartial(self, 'apphosting.StartBackgroundRequestResponse')
if _net_proto___parse__python is not None:
def _CToASCII(self, output_format):
return _net_proto___parse__python.ToASCII(self, 'apphosting.StartBackgroundRequestResponse', output_format)
if _net_proto___parse__python is not None:
def ParseASCII(self, s):
_net_proto___parse__python.ParseASCII(self, 'apphosting.StartBackgroundRequestResponse', s)
if _net_proto___parse__python is not None:
def ParseASCIIIgnoreUnknown(self, s):
_net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'apphosting.StartBackgroundRequestResponse', s)
def Equals(self, x):
if x is self: return 1
if self.has_request_id_ != x.has_request_id_: return 0
if self.has_request_id_ and self.request_id_ != x.request_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_request_id_): n += 1 + self.lengthString(len(self.request_id_))
return n
def ByteSizePartial(self):
n = 0
if (self.has_request_id_): n += 1 + self.lengthString(len(self.request_id_))
return n
def Clear(self):
self.clear_request_id()
def OutputUnchecked(self, out):
if (self.has_request_id_):
out.putVarInt32(10)
out.putPrefixedString(self.request_id_)
def OutputPartial(self, out):
if (self.has_request_id_):
out.putVarInt32(10)
out.putPrefixedString(self.request_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_request_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError()
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_request_id_: res+=prefix+("request_id: %s\n" % self.DebugFormatString(self.request_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
krequest_id = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "request_id",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.StartBackgroundRequestResponse'
_SERIALIZED_DESCRIPTOR = array.array('B')
_SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WiphcHBob3N0aW5nL2FwaS9zeXN0ZW0vc3lzdGVtX3NlcnZpY2UucHJvdG8KKWFwcGhvc3RpbmcuU3RhcnRCYWNrZ3JvdW5kUmVxdWVzdFJlc3BvbnNlExoKcmVxdWVzdF9pZCABKAIwCTgBFMIBHWFwcGhvc3RpbmcuU3lzdGVtU2VydmljZUVycm9y"))
if _net_proto___parse__python is not None:
_net_proto___parse__python.RegisterType(
_SERIALIZED_DESCRIPTOR.tostring())
class SystemServiceStub(object):
"""Makes Stubby RPC calls to a SystemService server."""
__metaclass__ = abc.ABCMeta
__slots__ = ()
@abc.abstractmethod
def GetSystemStats(self, request, rpc=None, callback=None, response=None):
"""Make a GetSystemStats RPC call.
Args:
request: a GetSystemStatsRequest instance.
rpc: Optional RPC instance to use for the call.
callback: Optional final callback. Will be called as
callback(rpc, result) when the rpc completes. If None, the
call is synchronous.
response: Optional ProtocolMessage to be filled in with response.
Returns:
The GetSystemStatsResponse if callback is None. Otherwise, returns None.
"""
raise NotImplementedError()
@abc.abstractmethod
def StartBackgroundRequest(self, request, rpc=None, callback=None, response=None):
"""Make a StartBackgroundRequest RPC call.
Args:
request: a StartBackgroundRequestRequest instance.
rpc: Optional RPC instance to use for the call.
callback: Optional final callback. Will be called as
callback(rpc, result) when the rpc completes. If None, the
call is synchronous.
response: Optional ProtocolMessage to be filled in with response.
Returns:
The StartBackgroundRequestResponse if callback is None. Otherwise, returns None.
"""
raise NotImplementedError()
class _SystemService_ClientBaseStub(
SystemServiceStub, _client_stub_base_class):
"""Makes Stubby RPC calls to a SystemService server."""
__slots__ = (
'_protorpc_GetSystemStats', '_full_name_GetSystemStats',
'_protorpc_StartBackgroundRequest', '_full_name_StartBackgroundRequest',
)
def __init__(self, rpc_stub, rpc_factory=None):
super(_SystemService_ClientBaseStub, self).__init__(
None, inject_stub=rpc_stub, rpc_factory=rpc_factory)
self._protorpc_GetSystemStats = pywraprpc.RPC()
self._full_name_GetSystemStats = self._stub.GetFullMethodName(
'GetSystemStats')
self._protorpc_StartBackgroundRequest = pywraprpc.RPC()
self._full_name_StartBackgroundRequest = self._stub.GetFullMethodName(
'StartBackgroundRequest')
def GetSystemStats(self, request, rpc=None, callback=None, response=None):
"""Make a GetSystemStats RPC call.
Args:
request: a GetSystemStatsRequest instance.
rpc: Optional RPC instance to use for the call.
callback: Optional final callback. Will be called as
callback(rpc, result) when the rpc completes. If None, the
call is synchronous.
response: Optional ProtocolMessage to be filled in with response.
Returns:
The GetSystemStatsResponse if callback is None. Otherwise, returns None.
"""
if response is None:
response = GetSystemStatsResponse
return self._MakeCall(rpc,
self._full_name_GetSystemStats,
'GetSystemStats',
request,
response,
callback,
self._protorpc_GetSystemStats,
package_name='apphosting')
def StartBackgroundRequest(self, request, rpc=None, callback=None, response=None):
"""Make a StartBackgroundRequest RPC call.
Args:
request: a StartBackgroundRequestRequest instance.
rpc: Optional RPC instance to use for the call.
callback: Optional final callback. Will be called as
callback(rpc, result) when the rpc completes. If None, the
call is synchronous.
response: Optional ProtocolMessage to be filled in with response.
Returns:
The StartBackgroundRequestResponse if callback is None. Otherwise, returns None.
"""
if response is None:
response = StartBackgroundRequestResponse
return self._MakeCall(rpc,
self._full_name_StartBackgroundRequest,
'StartBackgroundRequest',
request,
response,
callback,
self._protorpc_StartBackgroundRequest,
package_name='apphosting')
class _SystemService_ClientStub(_SystemService_ClientBaseStub):
__slots__ = ('_params',)
def __init__(self, rpc_stub_parameters, service_name, rpc_factory=None):
if service_name is None:
service_name = 'SystemService'
stub = pywraprpc.RPC_GenericStub(service_name, rpc_stub_parameters)
super(_SystemService_ClientStub, self).__init__(stub, rpc_factory=rpc_factory)
self._params = rpc_stub_parameters
class _SystemService_RPC2ClientStub(_SystemService_ClientBaseStub):
__slots__ = ()
def __init__(self, server, channel, service_name, rpc_factory=None):
if service_name is None:
service_name = 'SystemService'
if channel is None:
if server is None:
raise RuntimeError('Invalid argument combination to create a stub')
channel = pywraprpc.NewClientChannel(server)
elif channel.version() == 1:
raise RuntimeError('Expecting an RPC2 channel to create the stub')
stub = pywraprpc.RPC_GenericStub(service_name, channel)
super(_SystemService_RPC2ClientStub, self).__init__(stub, rpc_factory=rpc_factory)
class SystemService(_server_stub_base_class):
"""Base class for SystemService Stubby servers."""
@classmethod
def _MethodSignatures(cls):
"""Returns a dict of {<method-name>: (<request-type>, <response-type>)}."""
return {
'GetSystemStats': (GetSystemStatsRequest, GetSystemStatsResponse),
'StartBackgroundRequest': (StartBackgroundRequestRequest, StartBackgroundRequestResponse),
}
@classmethod
def _StreamMethodSignatures(cls):
"""Returns a dict of {<method-name>: (<request-type>, <stream-type>, <response-type>)}."""
return {
}
def __init__(self, *args, **kwargs):
"""Creates a Stubby RPC server.
The arguments to this constructor are the same as the arguments to
BaseRpcServer.__init__ in rpcserver.py *MINUS* export_name. This
constructor passes its own value for export_name to
BaseRpcServer.__init__, so callers of this constructor should only
pass to this constructor values corresponding to
BaseRpcServer.__init__'s remaining arguments.
"""
if _server_stub_base_class is object:
raise NotImplementedError('Add //net/rpc/python:rpcserver as a '
'dependency for Stubby server support.')
_server_stub_base_class.__init__(self, 'apphosting.SystemService', *args, **kwargs)
@staticmethod
def NewStub(rpc_stub_parameters, service_name=None, rpc_factory=None):
"""USE NewRPC2Stub INSTEAD."""
if _client_stub_base_class is object:
raise RuntimeError('Add //net/rpc/python as a dependency to use Stubby')
return _SystemService_ClientStub(
rpc_stub_parameters, service_name, rpc_factory=rpc_factory)
@staticmethod
def NewRPC2Stub(
server=None, channel=None, service_name=None, rpc_factory=None):
"""Creates a new SystemService Stubby2 client stub.
Args:
server: host:port or bns address (favor passing a channel instead).
channel: directly use a channel to create a stub. Will ignore server
argument if this is specified.
service_name: the service name used by the Stubby server.
rpc_factory: the rpc factory to use if no rpc argument is specified.
Returns:
A SystemServiceStub to be used to invoke RPCs.
"""
if _client_stub_base_class is object:
raise RuntimeError('Add //net/rpc/python:proto_python_api_2_stub (or maybe //net/rpc/python:proto_python_api_1_stub, but eww and b/67959631) as a dependency to create Stubby stubs')
return _SystemService_RPC2ClientStub(
server, channel, service_name, rpc_factory=rpc_factory)
def GetSystemStats(self, rpc, request, response):
"""Handles a GetSystemStats RPC call. You should override this.
Args:
rpc: a Stubby RPC object
request: a GetSystemStatsRequest that contains the client request
response: a GetSystemStatsResponse that should be modified to send the response
"""
raise NotImplementedError()
def StartBackgroundRequest(self, rpc, request, response):
"""Handles a StartBackgroundRequest RPC call. You should override this.
Args:
rpc: a Stubby RPC object
request: a StartBackgroundRequestRequest that contains the client request
response: a StartBackgroundRequestResponse that should be modified to send the response
"""
raise NotImplementedError()
def _AddMethodAttributes(self):
"""Sets attributes on Python RPC handlers.
See BaseRpcServer in rpcserver.py for details.
"""
rpcserver._GetHandlerDecorator(
getattr(self.GetSystemStats, '__func__'),
GetSystemStatsRequest,
GetSystemStatsResponse,
None,
'INTEGRITY')
rpcserver._GetHandlerDecorator(
getattr(self.StartBackgroundRequest, '__func__'),
StartBackgroundRequestRequest,
StartBackgroundRequestResponse,
None,
'INTEGRITY')
if _extension_runtime:
pass
__all__ = ['SystemServiceError','SystemStat','GetSystemStatsRequest','GetSystemStatsResponse','StartBackgroundRequestRequest','StartBackgroundRequestResponse','SystemService']
| 32.220421 | 1,328 | 0.723164 |
from google.net.proto import ProtocolBuffer
import abc
import array
import base64
try:
from thread import allocate_lock as _Lock
except ImportError:
from threading import Lock as _Lock
try:
from google3.net.proto import _net_proto___parse__python
except ImportError:
_net_proto___parse__python = None
import sys
try:
__import__('google.net.rpc.python.proto_python_api_1_stub')
__import__('google.net.rpc.python.pywraprpc')
proto_python_api_1_stub = sys.modules.get('google.net.rpc.python.proto_python_api_1_stub')
pywraprpc = sys.modules.get('google.net.rpc.python.pywraprpc')
_client_stub_base_class = proto_python_api_1_stub.Stub
except ImportError:
_client_stub_base_class = object
try:
__import__('google.net.rpc.python.rpcserver')
rpcserver = sys.modules.get('google.net.rpc.python.rpcserver')
_server_stub_base_class = rpcserver.BaseRpcServer
except ImportError:
_server_stub_base_class = object
if hasattr(__builtins__, 'xrange'): range = xrange
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
class SystemServiceError(ProtocolBuffer.ProtocolMessage):
OK = 0
INTERNAL_ERROR = 1
BACKEND_REQUIRED = 2
LIMIT_REACHED = 3
_ErrorCode_NAMES = {
0: "OK",
1: "INTERNAL_ERROR",
2: "BACKEND_REQUIRED",
3: "LIMIT_REACHED",
}
def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
ErrorCode_Name = classmethod(ErrorCode_Name)
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
if _net_proto___parse__python is not None:
def _CMergeFromString(self, s):
_net_proto___parse__python.MergeFromString(self, 'apphosting.SystemServiceError', s)
if _net_proto___parse__python is not None:
def _CEncode(self):
return _net_proto___parse__python.Encode(self, 'apphosting.SystemServiceError')
if _net_proto___parse__python is not None:
def _CEncodePartial(self):
return _net_proto___parse__python.EncodePartial(self, 'apphosting.SystemServiceError')
if _net_proto___parse__python is not None:
def _CToASCII(self, output_format):
return _net_proto___parse__python.ToASCII(self, 'apphosting.SystemServiceError', output_format)
if _net_proto___parse__python is not None:
def ParseASCII(self, s):
_net_proto___parse__python.ParseASCII(self, 'apphosting.SystemServiceError', s)
if _net_proto___parse__python is not None:
def ParseASCIIIgnoreUnknown(self, s):
_net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'apphosting.SystemServiceError', s)
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError()
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.SystemServiceError'
_SERIALIZED_DESCRIPTOR = array.array('B')
_SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WiphcHBob3N0aW5nL2FwaS9zeXN0ZW0vc3lzdGVtX3NlcnZpY2UucHJvdG8KHWFwcGhvc3RpbmcuU3lzdGVtU2VydmljZUVycm9yc3oJRXJyb3JDb2RliwGSAQJPS5gBAIwBiwGSAQ5JTlRFUk5BTF9FUlJPUpgBAYwBiwGSARBCQUNLRU5EX1JFUVVJUkVEmAECjAGLAZIBDUxJTUlUX1JFQUNIRUSYAQOMAXS6AYUGCiphcHBob3N0aW5nL2FwaS9zeXN0ZW0vc3lzdGVtX3NlcnZpY2UucHJvdG8SCmFwcGhvc3RpbmciZgoSU3lzdGVtU2VydmljZUVycm9yIlAKCUVycm9yQ29kZRIGCgJPSxAAEhIKDklOVEVSTkFMX0VSUk9SEAESFAoQQkFDS0VORF9SRVFVSVJFRBACEhEKDUxJTUlUX1JFQUNIRUQQAyJ0CgpTeXN0ZW1TdGF0Eg8KB2N1cnJlbnQYASABKAESEQoJYXZlcmFnZTFtGAMgASgBEhIKCmF2ZXJhZ2UxMG0YBCABKAESDQoFdG90YWwYAiABKAESDgoGcmF0ZTFtGAUgASgBEg8KB3JhdGUxMG0YBiABKAEiFwoVR2V0U3lzdGVtU3RhdHNSZXF1ZXN0ImUKFkdldFN5c3RlbVN0YXRzUmVzcG9uc2USIwoDY3B1GAEgASgLMhYuYXBwaG9zdGluZy5TeXN0ZW1TdGF0EiYKBm1lbW9yeRgCIAEoCzIWLmFwcGhvc3RpbmcuU3lzdGVtU3RhdCIfCh1TdGFydEJhY2tncm91bmRSZXF1ZXN0UmVxdWVzdCI0Ch5TdGFydEJhY2tncm91bmRSZXF1ZXN0UmVzcG9uc2USEgoKcmVxdWVzdF9pZBgBIAEoCTLdAQoNU3lzdGVtU2VydmljZRJZCg5HZXRTeXN0ZW1TdGF0cxIhLmFwcGhvc3RpbmcuR2V0U3lzdGVtU3RhdHNSZXF1ZXN0GiIuYXBwaG9zdGluZy5HZXRTeXN0ZW1TdGF0c1Jlc3BvbnNlIgAScQoWU3RhcnRCYWNrZ3JvdW5kUmVxdWVzdBIpLmFwcGhvc3RpbmcuU3RhcnRCYWNrZ3JvdW5kUmVxdWVzdFJlcXVlc3QaKi5hcHBob3N0aW5nLlN0YXJ0QmFja2dyb3VuZFJlcXVlc3RSZXNwb25zZSIAQjYKH2NvbS5nb29nbGUuYXBwZW5naW5lLmFwaS5zeXN0ZW0QAigCQg9TeXN0ZW1TZXJ2aWNlUGI="))
if _net_proto___parse__python is not None:
_net_proto___parse__python.RegisterType(
_SERIALIZED_DESCRIPTOR.tostring())
class SystemStat(ProtocolBuffer.ProtocolMessage):
has_current_ = 0
current_ = 0.0
has_average1m_ = 0
average1m_ = 0.0
has_average10m_ = 0
average10m_ = 0.0
has_total_ = 0
total_ = 0.0
has_rate1m_ = 0
rate1m_ = 0.0
has_rate10m_ = 0
rate10m_ = 0.0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def current(self): return self.current_
def set_current(self, x):
self.has_current_ = 1
self.current_ = x
def clear_current(self):
if self.has_current_:
self.has_current_ = 0
self.current_ = 0.0
def has_current(self): return self.has_current_
def average1m(self): return self.average1m_
def set_average1m(self, x):
self.has_average1m_ = 1
self.average1m_ = x
def clear_average1m(self):
if self.has_average1m_:
self.has_average1m_ = 0
self.average1m_ = 0.0
def has_average1m(self): return self.has_average1m_
def average10m(self): return self.average10m_
def set_average10m(self, x):
self.has_average10m_ = 1
self.average10m_ = x
def clear_average10m(self):
if self.has_average10m_:
self.has_average10m_ = 0
self.average10m_ = 0.0
def has_average10m(self): return self.has_average10m_
def total(self): return self.total_
def set_total(self, x):
self.has_total_ = 1
self.total_ = x
def clear_total(self):
if self.has_total_:
self.has_total_ = 0
self.total_ = 0.0
def has_total(self): return self.has_total_
def rate1m(self): return self.rate1m_
def set_rate1m(self, x):
self.has_rate1m_ = 1
self.rate1m_ = x
def clear_rate1m(self):
if self.has_rate1m_:
self.has_rate1m_ = 0
self.rate1m_ = 0.0
def has_rate1m(self): return self.has_rate1m_
def rate10m(self): return self.rate10m_
def set_rate10m(self, x):
self.has_rate10m_ = 1
self.rate10m_ = x
def clear_rate10m(self):
if self.has_rate10m_:
self.has_rate10m_ = 0
self.rate10m_ = 0.0
def has_rate10m(self): return self.has_rate10m_
def MergeFrom(self, x):
assert x is not self
if (x.has_current()): self.set_current(x.current())
if (x.has_average1m()): self.set_average1m(x.average1m())
if (x.has_average10m()): self.set_average10m(x.average10m())
if (x.has_total()): self.set_total(x.total())
if (x.has_rate1m()): self.set_rate1m(x.rate1m())
if (x.has_rate10m()): self.set_rate10m(x.rate10m())
if _net_proto___parse__python is not None:
def _CMergeFromString(self, s):
_net_proto___parse__python.MergeFromString(self, 'apphosting.SystemStat', s)
if _net_proto___parse__python is not None:
def _CEncode(self):
return _net_proto___parse__python.Encode(self, 'apphosting.SystemStat')
if _net_proto___parse__python is not None:
def _CEncodePartial(self):
return _net_proto___parse__python.EncodePartial(self, 'apphosting.SystemStat')
if _net_proto___parse__python is not None:
def _CToASCII(self, output_format):
return _net_proto___parse__python.ToASCII(self, 'apphosting.SystemStat', output_format)
if _net_proto___parse__python is not None:
def ParseASCII(self, s):
_net_proto___parse__python.ParseASCII(self, 'apphosting.SystemStat', s)
if _net_proto___parse__python is not None:
def ParseASCIIIgnoreUnknown(self, s):
_net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'apphosting.SystemStat', s)
def Equals(self, x):
if x is self: return 1
if self.has_current_ != x.has_current_: return 0
if self.has_current_ and self.current_ != x.current_: return 0
if self.has_average1m_ != x.has_average1m_: return 0
if self.has_average1m_ and self.average1m_ != x.average1m_: return 0
if self.has_average10m_ != x.has_average10m_: return 0
if self.has_average10m_ and self.average10m_ != x.average10m_: return 0
if self.has_total_ != x.has_total_: return 0
if self.has_total_ and self.total_ != x.total_: return 0
if self.has_rate1m_ != x.has_rate1m_: return 0
if self.has_rate1m_ and self.rate1m_ != x.rate1m_: return 0
if self.has_rate10m_ != x.has_rate10m_: return 0
if self.has_rate10m_ and self.rate10m_ != x.rate10m_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_current_): n += 9
if (self.has_average1m_): n += 9
if (self.has_average10m_): n += 9
if (self.has_total_): n += 9
if (self.has_rate1m_): n += 9
if (self.has_rate10m_): n += 9
return n
def ByteSizePartial(self):
n = 0
if (self.has_current_): n += 9
if (self.has_average1m_): n += 9
if (self.has_average10m_): n += 9
if (self.has_total_): n += 9
if (self.has_rate1m_): n += 9
if (self.has_rate10m_): n += 9
return n
def Clear(self):
self.clear_current()
self.clear_average1m()
self.clear_average10m()
self.clear_total()
self.clear_rate1m()
self.clear_rate10m()
def OutputUnchecked(self, out):
if (self.has_current_):
out.putVarInt32(9)
out.putDouble(self.current_)
if (self.has_total_):
out.putVarInt32(17)
out.putDouble(self.total_)
if (self.has_average1m_):
out.putVarInt32(25)
out.putDouble(self.average1m_)
if (self.has_average10m_):
out.putVarInt32(33)
out.putDouble(self.average10m_)
if (self.has_rate1m_):
out.putVarInt32(41)
out.putDouble(self.rate1m_)
if (self.has_rate10m_):
out.putVarInt32(49)
out.putDouble(self.rate10m_)
def OutputPartial(self, out):
if (self.has_current_):
out.putVarInt32(9)
out.putDouble(self.current_)
if (self.has_total_):
out.putVarInt32(17)
out.putDouble(self.total_)
if (self.has_average1m_):
out.putVarInt32(25)
out.putDouble(self.average1m_)
if (self.has_average10m_):
out.putVarInt32(33)
out.putDouble(self.average10m_)
if (self.has_rate1m_):
out.putVarInt32(41)
out.putDouble(self.rate1m_)
if (self.has_rate10m_):
out.putVarInt32(49)
out.putDouble(self.rate10m_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 9:
self.set_current(d.getDouble())
continue
if tt == 17:
self.set_total(d.getDouble())
continue
if tt == 25:
self.set_average1m(d.getDouble())
continue
if tt == 33:
self.set_average10m(d.getDouble())
continue
if tt == 41:
self.set_rate1m(d.getDouble())
continue
if tt == 49:
self.set_rate10m(d.getDouble())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError()
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_current_: res+=prefix+("current: %s\n" % self.DebugFormat(self.current_))
if self.has_average1m_: res+=prefix+("average1m: %s\n" % self.DebugFormat(self.average1m_))
if self.has_average10m_: res+=prefix+("average10m: %s\n" % self.DebugFormat(self.average10m_))
if self.has_total_: res+=prefix+("total: %s\n" % self.DebugFormat(self.total_))
if self.has_rate1m_: res+=prefix+("rate1m: %s\n" % self.DebugFormat(self.rate1m_))
if self.has_rate10m_: res+=prefix+("rate10m: %s\n" % self.DebugFormat(self.rate10m_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kcurrent = 1
kaverage1m = 3
kaverage10m = 4
ktotal = 2
krate1m = 5
krate10m = 6
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "current",
2: "total",
3: "average1m",
4: "average10m",
5: "rate1m",
6: "rate10m",
}, 6)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.DOUBLE,
2: ProtocolBuffer.Encoder.DOUBLE,
3: ProtocolBuffer.Encoder.DOUBLE,
4: ProtocolBuffer.Encoder.DOUBLE,
5: ProtocolBuffer.Encoder.DOUBLE,
6: ProtocolBuffer.Encoder.DOUBLE,
}, 6, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.SystemStat'
_SERIALIZED_DESCRIPTOR = array.array('B')
_SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WiphcHBob3N0aW5nL2FwaS9zeXN0ZW0vc3lzdGVtX3NlcnZpY2UucHJvdG8KFWFwcGhvc3RpbmcuU3lzdGVtU3RhdBMaB2N1cnJlbnQgASgBMAE4ARQTGglhdmVyYWdlMW0gAygBMAE4ARQTGgphdmVyYWdlMTBtIAQoATABOAEUExoFdG90YWwgAigBMAE4ARQTGgZyYXRlMW0gBSgBMAE4ARQTGgdyYXRlMTBtIAYoATABOAEUwgEdYXBwaG9zdGluZy5TeXN0ZW1TZXJ2aWNlRXJyb3I="))
if _net_proto___parse__python is not None:
_net_proto___parse__python.RegisterType(
_SERIALIZED_DESCRIPTOR.tostring())
class GetSystemStatsRequest(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
if _net_proto___parse__python is not None:
def _CMergeFromString(self, s):
_net_proto___parse__python.MergeFromString(self, 'apphosting.GetSystemStatsRequest', s)
if _net_proto___parse__python is not None:
def _CEncode(self):
return _net_proto___parse__python.Encode(self, 'apphosting.GetSystemStatsRequest')
if _net_proto___parse__python is not None:
def _CEncodePartial(self):
return _net_proto___parse__python.EncodePartial(self, 'apphosting.GetSystemStatsRequest')
if _net_proto___parse__python is not None:
def _CToASCII(self, output_format):
return _net_proto___parse__python.ToASCII(self, 'apphosting.GetSystemStatsRequest', output_format)
if _net_proto___parse__python is not None:
def ParseASCII(self, s):
_net_proto___parse__python.ParseASCII(self, 'apphosting.GetSystemStatsRequest', s)
if _net_proto___parse__python is not None:
def ParseASCIIIgnoreUnknown(self, s):
_net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'apphosting.GetSystemStatsRequest', s)
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError()
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.GetSystemStatsRequest'
_SERIALIZED_DESCRIPTOR = array.array('B')
_SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WiphcHBob3N0aW5nL2FwaS9zeXN0ZW0vc3lzdGVtX3NlcnZpY2UucHJvdG8KIGFwcGhvc3RpbmcuR2V0U3lzdGVtU3RhdHNSZXF1ZXN0wgEdYXBwaG9zdGluZy5TeXN0ZW1TZXJ2aWNlRXJyb3I="))
if _net_proto___parse__python is not None:
_net_proto___parse__python.RegisterType(
_SERIALIZED_DESCRIPTOR.tostring())
class GetSystemStatsResponse(ProtocolBuffer.ProtocolMessage):
has_cpu_ = 0
cpu_ = None
has_memory_ = 0
memory_ = None
def __init__(self, contents=None):
self.lazy_init_lock_ = _Lock()
if contents is not None: self.MergeFromString(contents)
def cpu(self):
if self.cpu_ is None:
self.lazy_init_lock_.acquire()
try:
if self.cpu_ is None: self.cpu_ = SystemStat()
finally:
self.lazy_init_lock_.release()
return self.cpu_
def mutable_cpu(self): self.has_cpu_ = 1; return self.cpu()
def clear_cpu(self):
if self.has_cpu_:
self.has_cpu_ = 0;
if self.cpu_ is not None: self.cpu_.Clear()
def has_cpu(self): return self.has_cpu_
def memory(self):
if self.memory_ is None:
self.lazy_init_lock_.acquire()
try:
if self.memory_ is None: self.memory_ = SystemStat()
finally:
self.lazy_init_lock_.release()
return self.memory_
def mutable_memory(self): self.has_memory_ = 1; return self.memory()
def clear_memory(self):
if self.has_memory_:
self.has_memory_ = 0;
if self.memory_ is not None: self.memory_.Clear()
def has_memory(self): return self.has_memory_
def MergeFrom(self, x):
assert x is not self
if (x.has_cpu()): self.mutable_cpu().MergeFrom(x.cpu())
if (x.has_memory()): self.mutable_memory().MergeFrom(x.memory())
if _net_proto___parse__python is not None:
def _CMergeFromString(self, s):
_net_proto___parse__python.MergeFromString(self, 'apphosting.GetSystemStatsResponse', s)
if _net_proto___parse__python is not None:
def _CEncode(self):
return _net_proto___parse__python.Encode(self, 'apphosting.GetSystemStatsResponse')
if _net_proto___parse__python is not None:
def _CEncodePartial(self):
return _net_proto___parse__python.EncodePartial(self, 'apphosting.GetSystemStatsResponse')
if _net_proto___parse__python is not None:
def _CToASCII(self, output_format):
return _net_proto___parse__python.ToASCII(self, 'apphosting.GetSystemStatsResponse', output_format)
if _net_proto___parse__python is not None:
def ParseASCII(self, s):
_net_proto___parse__python.ParseASCII(self, 'apphosting.GetSystemStatsResponse', s)
if _net_proto___parse__python is not None:
def ParseASCIIIgnoreUnknown(self, s):
_net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'apphosting.GetSystemStatsResponse', s)
def Equals(self, x):
if x is self: return 1
if self.has_cpu_ != x.has_cpu_: return 0
if self.has_cpu_ and self.cpu_ != x.cpu_: return 0
if self.has_memory_ != x.has_memory_: return 0
if self.has_memory_ and self.memory_ != x.memory_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_cpu_ and not self.cpu_.IsInitialized(debug_strs)): initialized = 0
if (self.has_memory_ and not self.memory_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_cpu_): n += 1 + self.lengthString(self.cpu_.ByteSize())
if (self.has_memory_): n += 1 + self.lengthString(self.memory_.ByteSize())
return n
def ByteSizePartial(self):
n = 0
if (self.has_cpu_): n += 1 + self.lengthString(self.cpu_.ByteSizePartial())
if (self.has_memory_): n += 1 + self.lengthString(self.memory_.ByteSizePartial())
return n
def Clear(self):
self.clear_cpu()
self.clear_memory()
def OutputUnchecked(self, out):
if (self.has_cpu_):
out.putVarInt32(10)
out.putVarInt32(self.cpu_.ByteSize())
self.cpu_.OutputUnchecked(out)
if (self.has_memory_):
out.putVarInt32(18)
out.putVarInt32(self.memory_.ByteSize())
self.memory_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_cpu_):
out.putVarInt32(10)
out.putVarInt32(self.cpu_.ByteSizePartial())
self.cpu_.OutputPartial(out)
if (self.has_memory_):
out.putVarInt32(18)
out.putVarInt32(self.memory_.ByteSizePartial())
self.memory_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_cpu().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_memory().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError()
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_cpu_:
res+=prefix+"cpu <\n"
res+=self.cpu_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_memory_:
res+=prefix+"memory <\n"
res+=self.memory_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kcpu = 1
kmemory = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "cpu",
2: "memory",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.GetSystemStatsResponse'
_SERIALIZED_DESCRIPTOR = array.array('B')
_SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WiphcHBob3N0aW5nL2FwaS9zeXN0ZW0vc3lzdGVtX3NlcnZpY2UucHJvdG8KIWFwcGhvc3RpbmcuR2V0U3lzdGVtU3RhdHNSZXNwb25zZRMaA2NwdSABKAIwCzgBShVhcHBob3N0aW5nLlN5c3RlbVN0YXSjAaoBBWN0eXBlsgEGcHJvdG8ypAEUExoGbWVtb3J5IAIoAjALOAFKFWFwcGhvc3RpbmcuU3lzdGVtU3RhdKMBqgEFY3R5cGWyAQZwcm90bzKkARTCAR1hcHBob3N0aW5nLlN5c3RlbVNlcnZpY2VFcnJvcg=="))
if _net_proto___parse__python is not None:
_net_proto___parse__python.RegisterType(
_SERIALIZED_DESCRIPTOR.tostring())
class StartBackgroundRequestRequest(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
if _net_proto___parse__python is not None:
def _CMergeFromString(self, s):
_net_proto___parse__python.MergeFromString(self, 'apphosting.StartBackgroundRequestRequest', s)
if _net_proto___parse__python is not None:
def _CEncode(self):
return _net_proto___parse__python.Encode(self, 'apphosting.StartBackgroundRequestRequest')
if _net_proto___parse__python is not None:
def _CEncodePartial(self):
return _net_proto___parse__python.EncodePartial(self, 'apphosting.StartBackgroundRequestRequest')
if _net_proto___parse__python is not None:
def _CToASCII(self, output_format):
return _net_proto___parse__python.ToASCII(self, 'apphosting.StartBackgroundRequestRequest', output_format)
if _net_proto___parse__python is not None:
def ParseASCII(self, s):
_net_proto___parse__python.ParseASCII(self, 'apphosting.StartBackgroundRequestRequest', s)
if _net_proto___parse__python is not None:
def ParseASCIIIgnoreUnknown(self, s):
_net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'apphosting.StartBackgroundRequestRequest', s)
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError()
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.StartBackgroundRequestRequest'
_SERIALIZED_DESCRIPTOR = array.array('B')
_SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WiphcHBob3N0aW5nL2FwaS9zeXN0ZW0vc3lzdGVtX3NlcnZpY2UucHJvdG8KKGFwcGhvc3RpbmcuU3RhcnRCYWNrZ3JvdW5kUmVxdWVzdFJlcXVlc3TCAR1hcHBob3N0aW5nLlN5c3RlbVNlcnZpY2VFcnJvcg=="))
if _net_proto___parse__python is not None:
_net_proto___parse__python.RegisterType(
_SERIALIZED_DESCRIPTOR.tostring())
class StartBackgroundRequestResponse(ProtocolBuffer.ProtocolMessage):
has_request_id_ = 0
request_id_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def request_id(self): return self.request_id_
def set_request_id(self, x):
self.has_request_id_ = 1
self.request_id_ = x
def clear_request_id(self):
if self.has_request_id_:
self.has_request_id_ = 0
self.request_id_ = ""
def has_request_id(self): return self.has_request_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_request_id()): self.set_request_id(x.request_id())
if _net_proto___parse__python is not None:
def _CMergeFromString(self, s):
_net_proto___parse__python.MergeFromString(self, 'apphosting.StartBackgroundRequestResponse', s)
if _net_proto___parse__python is not None:
def _CEncode(self):
return _net_proto___parse__python.Encode(self, 'apphosting.StartBackgroundRequestResponse')
if _net_proto___parse__python is not None:
def _CEncodePartial(self):
return _net_proto___parse__python.EncodePartial(self, 'apphosting.StartBackgroundRequestResponse')
if _net_proto___parse__python is not None:
def _CToASCII(self, output_format):
return _net_proto___parse__python.ToASCII(self, 'apphosting.StartBackgroundRequestResponse', output_format)
if _net_proto___parse__python is not None:
def ParseASCII(self, s):
_net_proto___parse__python.ParseASCII(self, 'apphosting.StartBackgroundRequestResponse', s)
if _net_proto___parse__python is not None:
def ParseASCIIIgnoreUnknown(self, s):
_net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'apphosting.StartBackgroundRequestResponse', s)
def Equals(self, x):
if x is self: return 1
if self.has_request_id_ != x.has_request_id_: return 0
if self.has_request_id_ and self.request_id_ != x.request_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_request_id_): n += 1 + self.lengthString(len(self.request_id_))
return n
def ByteSizePartial(self):
n = 0
if (self.has_request_id_): n += 1 + self.lengthString(len(self.request_id_))
return n
def Clear(self):
self.clear_request_id()
def OutputUnchecked(self, out):
if (self.has_request_id_):
out.putVarInt32(10)
out.putPrefixedString(self.request_id_)
def OutputPartial(self, out):
if (self.has_request_id_):
out.putVarInt32(10)
out.putPrefixedString(self.request_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_request_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError()
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_request_id_: res+=prefix+("request_id: %s\n" % self.DebugFormatString(self.request_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
krequest_id = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "request_id",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.StartBackgroundRequestResponse'
_SERIALIZED_DESCRIPTOR = array.array('B')
_SERIALIZED_DESCRIPTOR.fromstring(base64.decodestring("WiphcHBob3N0aW5nL2FwaS9zeXN0ZW0vc3lzdGVtX3NlcnZpY2UucHJvdG8KKWFwcGhvc3RpbmcuU3RhcnRCYWNrZ3JvdW5kUmVxdWVzdFJlc3BvbnNlExoKcmVxdWVzdF9pZCABKAIwCTgBFMIBHWFwcGhvc3RpbmcuU3lzdGVtU2VydmljZUVycm9y"))
if _net_proto___parse__python is not None:
_net_proto___parse__python.RegisterType(
_SERIALIZED_DESCRIPTOR.tostring())
class SystemServiceStub(object):
__metaclass__ = abc.ABCMeta
__slots__ = ()
@abc.abstractmethod
def GetSystemStats(self, request, rpc=None, callback=None, response=None):
raise NotImplementedError()
@abc.abstractmethod
def StartBackgroundRequest(self, request, rpc=None, callback=None, response=None):
raise NotImplementedError()
class _SystemService_ClientBaseStub(
SystemServiceStub, _client_stub_base_class):
__slots__ = (
'_protorpc_GetSystemStats', '_full_name_GetSystemStats',
'_protorpc_StartBackgroundRequest', '_full_name_StartBackgroundRequest',
)
def __init__(self, rpc_stub, rpc_factory=None):
super(_SystemService_ClientBaseStub, self).__init__(
None, inject_stub=rpc_stub, rpc_factory=rpc_factory)
self._protorpc_GetSystemStats = pywraprpc.RPC()
self._full_name_GetSystemStats = self._stub.GetFullMethodName(
'GetSystemStats')
self._protorpc_StartBackgroundRequest = pywraprpc.RPC()
self._full_name_StartBackgroundRequest = self._stub.GetFullMethodName(
'StartBackgroundRequest')
def GetSystemStats(self, request, rpc=None, callback=None, response=None):
if response is None:
response = GetSystemStatsResponse
return self._MakeCall(rpc,
self._full_name_GetSystemStats,
'GetSystemStats',
request,
response,
callback,
self._protorpc_GetSystemStats,
package_name='apphosting')
def StartBackgroundRequest(self, request, rpc=None, callback=None, response=None):
if response is None:
response = StartBackgroundRequestResponse
return self._MakeCall(rpc,
self._full_name_StartBackgroundRequest,
'StartBackgroundRequest',
request,
response,
callback,
self._protorpc_StartBackgroundRequest,
package_name='apphosting')
class _SystemService_ClientStub(_SystemService_ClientBaseStub):
__slots__ = ('_params',)
def __init__(self, rpc_stub_parameters, service_name, rpc_factory=None):
if service_name is None:
service_name = 'SystemService'
stub = pywraprpc.RPC_GenericStub(service_name, rpc_stub_parameters)
super(_SystemService_ClientStub, self).__init__(stub, rpc_factory=rpc_factory)
self._params = rpc_stub_parameters
class _SystemService_RPC2ClientStub(_SystemService_ClientBaseStub):
__slots__ = ()
def __init__(self, server, channel, service_name, rpc_factory=None):
if service_name is None:
service_name = 'SystemService'
if channel is None:
if server is None:
raise RuntimeError('Invalid argument combination to create a stub')
channel = pywraprpc.NewClientChannel(server)
elif channel.version() == 1:
raise RuntimeError('Expecting an RPC2 channel to create the stub')
stub = pywraprpc.RPC_GenericStub(service_name, channel)
super(_SystemService_RPC2ClientStub, self).__init__(stub, rpc_factory=rpc_factory)
class SystemService(_server_stub_base_class):
@classmethod
def _MethodSignatures(cls):
return {
'GetSystemStats': (GetSystemStatsRequest, GetSystemStatsResponse),
'StartBackgroundRequest': (StartBackgroundRequestRequest, StartBackgroundRequestResponse),
}
@classmethod
def _StreamMethodSignatures(cls):
return {
}
def __init__(self, *args, **kwargs):
if _server_stub_base_class is object:
raise NotImplementedError('Add //net/rpc/python:rpcserver as a '
'dependency for Stubby server support.')
_server_stub_base_class.__init__(self, 'apphosting.SystemService', *args, **kwargs)
@staticmethod
def NewStub(rpc_stub_parameters, service_name=None, rpc_factory=None):
if _client_stub_base_class is object:
raise RuntimeError('Add //net/rpc/python as a dependency to use Stubby')
return _SystemService_ClientStub(
rpc_stub_parameters, service_name, rpc_factory=rpc_factory)
@staticmethod
def NewRPC2Stub(
server=None, channel=None, service_name=None, rpc_factory=None):
if _client_stub_base_class is object:
raise RuntimeError('Add //net/rpc/python:proto_python_api_2_stub (or maybe //net/rpc/python:proto_python_api_1_stub, but eww and b/67959631) as a dependency to create Stubby stubs')
return _SystemService_RPC2ClientStub(
server, channel, service_name, rpc_factory=rpc_factory)
def GetSystemStats(self, rpc, request, response):
raise NotImplementedError()
def StartBackgroundRequest(self, rpc, request, response):
raise NotImplementedError()
def _AddMethodAttributes(self):
rpcserver._GetHandlerDecorator(
getattr(self.GetSystemStats, '__func__'),
GetSystemStatsRequest,
GetSystemStatsResponse,
None,
'INTEGRITY')
rpcserver._GetHandlerDecorator(
getattr(self.StartBackgroundRequest, '__func__'),
StartBackgroundRequestRequest,
StartBackgroundRequestResponse,
None,
'INTEGRITY')
if _extension_runtime:
pass
__all__ = ['SystemServiceError','SystemStat','GetSystemStatsRequest','GetSystemStatsResponse','StartBackgroundRequestRequest','StartBackgroundRequestResponse','SystemService']
| true | true |
f73ea6b48f02b966ed801c86743b03f96e4e362e | 18,171 | py | Python | countcps.py | nthall/Twilio-Tools-2 | 26605c97bd462b5e6a74fa79f3f9ffb0b2217af6 | [
"MIT"
] | 5 | 2020-11-03T14:37:19.000Z | 2021-02-09T19:45:25.000Z | countcps.py | nthall/Twilio-Tools-2 | 26605c97bd462b5e6a74fa79f3f9ffb0b2217af6 | [
"MIT"
] | 1 | 2021-12-11T04:51:25.000Z | 2021-12-11T04:51:25.000Z | countcps.py | nthall/Twilio-Tools-2 | 26605c97bd462b5e6a74fa79f3f9ffb0b2217af6 | [
"MIT"
] | 3 | 2021-10-30T04:08:58.000Z | 2022-02-17T01:34:42.000Z | #!/usr/bin/env python
"""Program that takes a CSV file of CDRs and produces a list of one-second intervals
with call counts, again as a CSV file. Optionally, the program will display the
spread of CPS values.
usage: countcps.py [-h] [-s START] [-e END] [--tz TZ]
[-t {auto,header,positional}] [-c COLUMN] [--spread]
[--queue] [--version] [--log {debug,info,warning}]
cdr_file cps_file
Create a calls-per-second CSV file from a CDR file.
positional arguments:
cdr_file input CSV file containing call detail records
cps_file output CSV file containing CPS counts
optional arguments:
-h, --help show this help message and exit
-s START, --start START ignore records before this date/time
(YYYY-MM-DD [[HH:MM:SS]±HH:MM])
-e END, --end END ignore records after this date/time
(YYYY-MM-DD [[HH:MM:SS]±HH:MM])
--tz TZ timezone as ±HHMM offset from UTC (default: timezone
of local machine)
-t {auto,header,positional}, --type {auto,header,positional}
specify format of CDR file (auto: autodetect; header:
has a header row; positional: no header row)
-c COLUMN, --column COLUMN column name or number containing call start date/time
--spread display CPS spread
--queue display queue time estimates from CDRs
--version show program's version number and exit
--log {debug,info,warning} set logging level
The program will by default attempt to auto-detect the format of the CDR file. Twilio
Console, Looker and Monkey download formats are recognized. Otherwise, it looks for the
first column that is formatted as an ISO 8061 date. If the above conditions are not true,
then you should specify the name (if there is a header row) or number (if no header) of
the column that contains the date/time the call was made.
Note that the program will automatically filter out non-Outgoing API calls for Console,
Looker and Monkey CDRs; for other sources, you should make sure that the only calls
included in the CDR file are outbound calls.
"""
import sys
import argparse
from datetime import datetime, timedelta
import csv
import logging
from decimal import Decimal
__version__ = "1.0"
DEFAULT_FIELDNAMES = \
['1', '2', '3', '4', '5', '6', '7', '8', '9', '10'] # Used to select a CDR field by position
DATETIME_FORMATS = {
'Monkey': "%a, %d %b %Y %H:%M:%S %z", # e.g. "Sat, 12 Sep 2020 10:30:05 -0700"
'Console': "%H:%M:%S %Z %Y-%m-%d", # e.g. "14:52:06 EDT 2020-09-10"
'ISO': None # e.g. "2020-09-10 14:52:06.000"
}
logger = logging.getLogger(__name__)
# Set up logging for the module.
def configure_logging(level=logging.INFO):
logger.setLevel(level)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s.%(msecs)03d: %(message)s', datefmt='%H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
# Return parsed command line arguments.
def get_args():
col_num = None
# Check that column number or name is possible.
def column_id(str):
try:
nonlocal col_num
col_num = int(str)
if col_num < 1 or col_num > len(DEFAULT_FIELDNAMES):
raise argparse.ArgumentTypeError("Column number is out of range")
else:
return str
except ValueError:
return str
# Parse a timezone offset and return a tzinfo object.
def tzinfo(str):
try:
dt = datetime.strptime(str, '%z')
return dt.tzinfo
except ValueError:
raise argparse.ArgumentTypeError(
"Timezone offset should be a signed value in the form ±HHMM")
# Calculate timezone default.
now = datetime.now()
local_timezone = now.astimezone().tzinfo
parser = argparse.ArgumentParser(
description="Create a calls-per-second CSV file from a CDR file.",
epilog=(
"We recommend defaulting the CDR file type to 'auto', unless the start "
"date/time is not the first date/time column in the file, in which "
"case you should specify 'column', which is the name (type='header') "
"or number (type='positional') of the start date/time column. "
"Add a filename to the command line prefixed by '@' if you wish to place "
"parameters in a file, one parameter per line."),
fromfile_prefix_chars='@')
parser.add_argument(
'cdr_file', type=argparse.FileType('r'),
help="input CSV file containing call detail records")
parser.add_argument(
'cps_file', type=argparse.FileType('w'),
help="output CSV file containing CPS counts")
parser.add_argument(
'-s', '--start', type=datetime.fromisoformat,
help="ignore records before this date/time (YYYY-MM-DD [[HH:MM:SS]±HHMM])")
parser.add_argument(
'-e', '--end', type=datetime.fromisoformat,
help="ignore records after this date/time (YYYY-MM-DD [[HH:MM:SS]±HHMM])")
parser.add_argument(
'--tz', default=local_timezone, type=tzinfo,
help="timezone as ±HHMM offset from UTC (default: timezone of local machine)")
parser.add_argument(
'-t', '--type', choices=['auto', 'header', 'positional'], default='auto',
help=("specify format of CDR file (auto: autodetect; "
"header: has a header row; positional: no header row)"))
parser.add_argument(
'-c', '--column', type=column_id,
help="column name or number containing call start date/time")
parser.add_argument(
'--spread', action='store_true',
help="display CPS spread")
parser.add_argument(
'--queue', action='store_true',
help="display queue time estimates from CDRs")
parser.add_argument(
'--version', action='version', version=__version__)
parser.add_argument(
'--log', choices=['debug', 'info', 'warning'], default='info',
help="set logging level")
args = parser.parse_args()
if args.type == 'positional' and not col_num:
parser.error("Start date/time field specified by position, but no column number specified")
if args.type == 'header' and not args.column:
parser.error("Start date/time field specified by column name, but none specified")
return args
# Take a row of CSV values and find all those that are formatted as a datetime.
# We'll try all the known datetime formats in turn, until we find one that works.
# Returns a tuple containing a list of the column numbers, indexed from 1,
# the datetime format, and timezone info.
def look_for_datetime(columns):
dt_cols = []
tzinfo = None
for fmt_name, fmt_string in DATETIME_FORMATS.items():
logger.debug('Trying %s datetime format', fmt_name)
i = 1
for column in columns:
try:
if fmt_string:
dt = datetime.strptime(column, fmt_string)
else:
dt = datetime.fromisoformat(column)
dt_cols.append(i)
tzinfo = dt.tzinfo
except ValueError:
pass
i += 1
if dt_cols: break
if dt_cols:
logger.debug("Columns formatted as date/time values: %s", dt_cols)
logger.debug("Datetime format is %s", fmt_name)
logger.debug("Timezone in CDR file is %s", tzinfo)
else:
fmt_name = None
logger.debug("No datetime items found in row")
return (dt_cols, fmt_string, tzinfo)
# Look for a candidate header field, choosing the first found in the given list.
def look_for_header(columns, candidates):
for candidate in candidates:
if candidate in columns: return candidate
return None
# Structure containing header row and date/time format information.
class CDRinfo:
def __init__(self):
self.has_header = False
self.start_col_id = None
self.flags_col_id = None
self.direction_col_id = None
self.queuetime_col_id = None
self.datetime_format = None
self.tzinfo = None
# Returns a CDRinfo containing details of the name or position of Flags
# and DateCreated/StartTime columns, and the date/time format.
def detect_cdr_type(args):
# Let's initially assume the CDR file has a header, and get the field names.
cdr_info = CDRinfo()
reader = csv.DictReader(args.cdr_file)
fieldnames = reader.fieldnames
if fieldnames is None:
sys.exit("Error: CDR file is empty!")
logger.debug("Header fieldnames: %s", fieldnames)
# See whether this is a real header by determining whether any of the
# field names are actually datetimes.
dt_cols, cdr_info.datetime_format, cdr_info.tzinfo = look_for_datetime(fieldnames)
cdr_info.has_header = False if dt_cols else True
# Next, do a little more validation.
if args.type == 'positional' and cdr_info.has_header:
sys.exit("Error: CDR file has header row, but start date/time was specified by position")
if args.type == 'header' and not cdr_info.has_header:
sys.exit("Error: CDR file has no header row, but start date/time was specified by column name")
# If there's a header, get the next row to use as a sample.
if cdr_info.has_header:
try:
sample_row = next(reader).values()
logger.debug("Sample row: %s", sample_row)
except StopIteration:
sys.exit("Error: CDR file contains no call records!")
dt_cols, cdr_info.datetime_format, cdr_info.tzinfo = look_for_datetime(sample_row)
if not dt_cols:
sys.exit("Error: CDR file contains no recognizable call records!")
# If the start date/time column is positional, check against the header row.
if args.type == 'positional':
if int(args.column) in dt_cols:
cdr_info.start_col_id = args.column
logger.info("CDR file confirmed as type 'positional'")
else:
sys.exit(f"Column {args.column} does not contain date/time values")
# If the start date/time column was specified by name, check agsinst the sample row.
elif args.type == 'header':
try:
column_num = fieldnames.index(args.column) + 1 # Remember, indexed from 1
except ValueError:
sys.exit(f"No such column name '{args.column}' in header row")
if column_num in dt_cols:
cdr_info.start_col_id = args.column
logger.info("CDR file confirmed as type 'header'")
else:
sys.exit(f"Column {args.column} does not contain date/time values")
# Autodetect: look for Monkey/Looker/Console headers. If we can't find a recognized
# start date/time header, we'll pick the first column with a datetime.
elif args.type == 'auto':
if cdr_info.has_header:
# Determine whether any of the standard headers are present.
cdr_info.flags_col_id = look_for_header(fieldnames, ['Flags', 'flags'])
cdr_info.direction_col_id = look_for_header(fieldnames, ['Direction', 'direction'])
cdr_info.queuetime_col_id = look_for_header(fieldnames, ['QueueTime', 'queue_time'])
cdr_info.start_col_id = look_for_header(
fieldnames, ['DateCreated', 'date_created', 'StartTime', 'start_time'])
if cdr_info.flags_col_id:
logger.info("CDR file autodetected as likely from Monkey or Looker")
elif cdr_info.direction_col_id:
logger.info("CDR file autodetected as likely from Console or getcdrs.py")
# If there's a defined start date/time header, make sure the column is a datetime.
if cdr_info.start_col_id:
col_num = fieldnames.index(cdr_info.start_col_id) + 1 # Indexed from 1
if col_num not in dt_cols:
sys.exit(f"Column {args.column} does not contain date/time values")
# Otherwise pick the first column with a datetime.
else:
cdr_info.start_col_id = fieldnames[dt_cols[0] - 1]
logger.info("CDR file autodetected as type 'header'")
else:
# No headers, so pick the first datetime column.
cdr_info.start_col_id = str(dt_cols[0])
logger.info("CDR file autodetected as type 'positional'")
logger.debug("Start column is '%s'", cdr_info.start_col_id)
logger.debug("Flags column is '%s'", cdr_info.flags_col_id)
logger.debug("Direction column is '%s'", cdr_info.direction_col_id)
args.cdr_file.seek(0) # Reset reader to beginning of file again.
return cdr_info
# We will need to make sure that start and end times have proper timezone info.
# If the CDRs contain TZ info, then the start and end times must also contain
# TZ info; the reverse is also true.
def adjust_start_and_end_times(start, end, cdr_tz, given_tz):
if start: logger.debug("Start date/time parsed as %r", start)
if end: logger.debug("End date/time parsed as %r", end)
logger.debug("Timezone adjustment if needed: %r", given_tz)
if cdr_tz:
if start and start.tzinfo is None:
start = start.replace(tzinfo=given_tz)
if end and end.tzinfo is None:
end = end.replace(tzinfo=given_tz)
else:
if start and start.tzinfo:
start = start.replace(tzinfo=None)
if end and end.tzinfo:
end = end.replace(tzinfo=None)
if start: logger.debug("Adjusted start date/time: %r", start)
if end: logger.debug("Adjusted end date/time: %r", end)
return start, end
def calculate_spread(intervals):
logger.debug("Calculating spread...")
spread = {}
for value in intervals.values():
if value in spread.keys():
spread[value] += 1
else:
spread[value] = 1
return spread
def print_spread(spread):
print()
print("Spread")
print("------")
for key in sorted(spread.keys()):
print(f'{key:4d} CPS: x {spread[key]}')
print()
def print_queue_times(queue_times):
print()
if queue_times:
print("Queue Time Estimates")
print("--------------------")
for queue_time in sorted(queue_times.keys()):
print(f'{queue_time:6.2f} secs: x {queue_times[queue_time]}')
else:
print("No queue times were recorded")
print()
def main(args):
configure_logging(level=getattr(logging, args.log.upper()))
cdr_info = detect_cdr_type(args)
start, end = adjust_start_and_end_times(args.start, args.end, cdr_info.tzinfo, args.tz)
logger.debug("Reading CSV file...")
intervals = {}
queue_times = {}
num_read = 0
num_counted = 0
num_written = 0
with args.cdr_file as cdr_file:
cdrs = csv.DictReader(cdr_file, fieldnames=None if cdr_info.has_header else DEFAULT_FIELDNAMES)
for cdr in cdrs:
try:
num_read += 1
# Filter all but Outgoing API calls, if the CDRs were exported from Monkey, Looker or
# Twilio Console. If not from these sources, the CDR file should be pre-filtered.
# Flags definition can be found here: https://wiki.hq.twilio.com/display/RT/Call (Twilions only).
if cdr_info.flags_col_id and (int(cdr[cdr_info.flags_col_id]) & 0x0002 != 2):
continue
if cdr_info.direction_col_id and (cdr[cdr_info.direction_col_id] not in ['Outgoing API', 'outbound-api']):
continue
# Get the call start date/time, according to the format of the source.
if cdr_info.datetime_format is None:
call_start = datetime.fromisoformat(cdr[cdr_info.start_col_id])
else:
call_start = datetime.strptime(
cdr[cdr_info.start_col_id],
cdr_info.datetime_format)
# If the call was queued, add it to a tally for the queue length, and adjust the start time.
if cdr_info.queuetime_col_id:
queue_time = Decimal(cdr[cdr_info.queuetime_col_id]) / 1000 # Result in seconds
if queue_time in queue_times.keys():
queue_times[queue_time] += 1
else:
queue_times[queue_time] = 1
if queue_time > 0:
call_start -= timedelta(seconds=int(queue_time))
# Filter records outside of the chosen period.
if start and call_start < start: continue
if end and call_start >= end: continue
# Count the call against its CPS interval.
num_counted += 1
if call_start in intervals.keys():
intervals[call_start] += 1
else:
intervals[call_start] = 1
except Exception as err:
logger.error("Line: %s", cdr)
sys.exit(f"Problem parsing CDR file: {str(err)}")
logger.debug("%s records read, %s records counted", num_read, num_counted)
logger.debug("Writing CPS file...")
with args.cps_file as cps_file:
for key, value in intervals.items():
num_written += 1
print(f'{key},{value}', file=cps_file)
logger.debug("%s records written", num_written)
if args.spread:
print_spread(calculate_spread(intervals))
if args.queue:
print_queue_times(queue_times)
if __name__ == "__main__":
main(get_args()) | 40.024229 | 123 | 0.614441 |
import sys
import argparse
from datetime import datetime, timedelta
import csv
import logging
from decimal import Decimal
__version__ = "1.0"
DEFAULT_FIELDNAMES = \
['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
DATETIME_FORMATS = {
'Monkey': "%a, %d %b %Y %H:%M:%S %z",
'Console': "%H:%M:%S %Z %Y-%m-%d",
'ISO': None
}
logger = logging.getLogger(__name__)
def configure_logging(level=logging.INFO):
logger.setLevel(level)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s.%(msecs)03d: %(message)s', datefmt='%H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
def get_args():
col_num = None
def column_id(str):
try:
nonlocal col_num
col_num = int(str)
if col_num < 1 or col_num > len(DEFAULT_FIELDNAMES):
raise argparse.ArgumentTypeError("Column number is out of range")
else:
return str
except ValueError:
return str
def tzinfo(str):
try:
dt = datetime.strptime(str, '%z')
return dt.tzinfo
except ValueError:
raise argparse.ArgumentTypeError(
"Timezone offset should be a signed value in the form ±HHMM")
now = datetime.now()
local_timezone = now.astimezone().tzinfo
parser = argparse.ArgumentParser(
description="Create a calls-per-second CSV file from a CDR file.",
epilog=(
"We recommend defaulting the CDR file type to 'auto', unless the start "
"date/time is not the first date/time column in the file, in which "
"case you should specify 'column', which is the name (type='header') "
"or number (type='positional') of the start date/time column. "
"Add a filename to the command line prefixed by '@' if you wish to place "
"parameters in a file, one parameter per line."),
fromfile_prefix_chars='@')
parser.add_argument(
'cdr_file', type=argparse.FileType('r'),
help="input CSV file containing call detail records")
parser.add_argument(
'cps_file', type=argparse.FileType('w'),
help="output CSV file containing CPS counts")
parser.add_argument(
'-s', '--start', type=datetime.fromisoformat,
help="ignore records before this date/time (YYYY-MM-DD [[HH:MM:SS]±HHMM])")
parser.add_argument(
'-e', '--end', type=datetime.fromisoformat,
help="ignore records after this date/time (YYYY-MM-DD [[HH:MM:SS]±HHMM])")
parser.add_argument(
'--tz', default=local_timezone, type=tzinfo,
help="timezone as ±HHMM offset from UTC (default: timezone of local machine)")
parser.add_argument(
'-t', '--type', choices=['auto', 'header', 'positional'], default='auto',
help=("specify format of CDR file (auto: autodetect; "
"header: has a header row; positional: no header row)"))
parser.add_argument(
'-c', '--column', type=column_id,
help="column name or number containing call start date/time")
parser.add_argument(
'--spread', action='store_true',
help="display CPS spread")
parser.add_argument(
'--queue', action='store_true',
help="display queue time estimates from CDRs")
parser.add_argument(
'--version', action='version', version=__version__)
parser.add_argument(
'--log', choices=['debug', 'info', 'warning'], default='info',
help="set logging level")
args = parser.parse_args()
if args.type == 'positional' and not col_num:
parser.error("Start date/time field specified by position, but no column number specified")
if args.type == 'header' and not args.column:
parser.error("Start date/time field specified by column name, but none specified")
return args
# Returns a tuple containing a list of the column numbers, indexed from 1,
# the datetime format, and timezone info.
def look_for_datetime(columns):
dt_cols = []
tzinfo = None
for fmt_name, fmt_string in DATETIME_FORMATS.items():
logger.debug('Trying %s datetime format', fmt_name)
i = 1
for column in columns:
try:
if fmt_string:
dt = datetime.strptime(column, fmt_string)
else:
dt = datetime.fromisoformat(column)
dt_cols.append(i)
tzinfo = dt.tzinfo
except ValueError:
pass
i += 1
if dt_cols: break
if dt_cols:
logger.debug("Columns formatted as date/time values: %s", dt_cols)
logger.debug("Datetime format is %s", fmt_name)
logger.debug("Timezone in CDR file is %s", tzinfo)
else:
fmt_name = None
logger.debug("No datetime items found in row")
return (dt_cols, fmt_string, tzinfo)
# Look for a candidate header field, choosing the first found in the given list.
def look_for_header(columns, candidates):
for candidate in candidates:
if candidate in columns: return candidate
return None
# Structure containing header row and date/time format information.
class CDRinfo:
def __init__(self):
self.has_header = False
self.start_col_id = None
self.flags_col_id = None
self.direction_col_id = None
self.queuetime_col_id = None
self.datetime_format = None
self.tzinfo = None
# Returns a CDRinfo containing details of the name or position of Flags
# and DateCreated/StartTime columns, and the date/time format.
def detect_cdr_type(args):
# Let's initially assume the CDR file has a header, and get the field names.
cdr_info = CDRinfo()
reader = csv.DictReader(args.cdr_file)
fieldnames = reader.fieldnames
if fieldnames is None:
sys.exit("Error: CDR file is empty!")
logger.debug("Header fieldnames: %s", fieldnames)
dt_cols, cdr_info.datetime_format, cdr_info.tzinfo = look_for_datetime(fieldnames)
cdr_info.has_header = False if dt_cols else True
if args.type == 'positional' and cdr_info.has_header:
sys.exit("Error: CDR file has header row, but start date/time was specified by position")
if args.type == 'header' and not cdr_info.has_header:
sys.exit("Error: CDR file has no header row, but start date/time was specified by column name")
if cdr_info.has_header:
try:
sample_row = next(reader).values()
logger.debug("Sample row: %s", sample_row)
except StopIteration:
sys.exit("Error: CDR file contains no call records!")
dt_cols, cdr_info.datetime_format, cdr_info.tzinfo = look_for_datetime(sample_row)
if not dt_cols:
sys.exit("Error: CDR file contains no recognizable call records!")
# If the start date/time column is positional, check against the header row.
if args.type == 'positional':
if int(args.column) in dt_cols:
cdr_info.start_col_id = args.column
logger.info("CDR file confirmed as type 'positional'")
else:
sys.exit(f"Column {args.column} does not contain date/time values")
# If the start date/time column was specified by name, check agsinst the sample row.
elif args.type == 'header':
try:
column_num = fieldnames.index(args.column) + 1 # Remember, indexed from 1
except ValueError:
sys.exit(f"No such column name '{args.column}' in header row")
if column_num in dt_cols:
cdr_info.start_col_id = args.column
logger.info("CDR file confirmed as type 'header'")
else:
sys.exit(f"Column {args.column} does not contain date/time values")
# Autodetect: look for Monkey/Looker/Console headers. If we can't find a recognized
elif args.type == 'auto':
if cdr_info.has_header:
# Determine whether any of the standard headers are present.
cdr_info.flags_col_id = look_for_header(fieldnames, ['Flags', 'flags'])
cdr_info.direction_col_id = look_for_header(fieldnames, ['Direction', 'direction'])
cdr_info.queuetime_col_id = look_for_header(fieldnames, ['QueueTime', 'queue_time'])
cdr_info.start_col_id = look_for_header(
fieldnames, ['DateCreated', 'date_created', 'StartTime', 'start_time'])
if cdr_info.flags_col_id:
logger.info("CDR file autodetected as likely from Monkey or Looker")
elif cdr_info.direction_col_id:
logger.info("CDR file autodetected as likely from Console or getcdrs.py")
# If there's a defined start date/time header, make sure the column is a datetime.
if cdr_info.start_col_id:
col_num = fieldnames.index(cdr_info.start_col_id) + 1
if col_num not in dt_cols:
sys.exit(f"Column {args.column} does not contain date/time values")
else:
cdr_info.start_col_id = fieldnames[dt_cols[0] - 1]
logger.info("CDR file autodetected as type 'header'")
else:
cdr_info.start_col_id = str(dt_cols[0])
logger.info("CDR file autodetected as type 'positional'")
logger.debug("Start column is '%s'", cdr_info.start_col_id)
logger.debug("Flags column is '%s'", cdr_info.flags_col_id)
logger.debug("Direction column is '%s'", cdr_info.direction_col_id)
args.cdr_file.seek(0)
return cdr_info
def adjust_start_and_end_times(start, end, cdr_tz, given_tz):
if start: logger.debug("Start date/time parsed as %r", start)
if end: logger.debug("End date/time parsed as %r", end)
logger.debug("Timezone adjustment if needed: %r", given_tz)
if cdr_tz:
if start and start.tzinfo is None:
start = start.replace(tzinfo=given_tz)
if end and end.tzinfo is None:
end = end.replace(tzinfo=given_tz)
else:
if start and start.tzinfo:
start = start.replace(tzinfo=None)
if end and end.tzinfo:
end = end.replace(tzinfo=None)
if start: logger.debug("Adjusted start date/time: %r", start)
if end: logger.debug("Adjusted end date/time: %r", end)
return start, end
def calculate_spread(intervals):
logger.debug("Calculating spread...")
spread = {}
for value in intervals.values():
if value in spread.keys():
spread[value] += 1
else:
spread[value] = 1
return spread
def print_spread(spread):
print()
print("Spread")
print("------")
for key in sorted(spread.keys()):
print(f'{key:4d} CPS: x {spread[key]}')
print()
def print_queue_times(queue_times):
print()
if queue_times:
print("Queue Time Estimates")
print("--------------------")
for queue_time in sorted(queue_times.keys()):
print(f'{queue_time:6.2f} secs: x {queue_times[queue_time]}')
else:
print("No queue times were recorded")
print()
def main(args):
configure_logging(level=getattr(logging, args.log.upper()))
cdr_info = detect_cdr_type(args)
start, end = adjust_start_and_end_times(args.start, args.end, cdr_info.tzinfo, args.tz)
logger.debug("Reading CSV file...")
intervals = {}
queue_times = {}
num_read = 0
num_counted = 0
num_written = 0
with args.cdr_file as cdr_file:
cdrs = csv.DictReader(cdr_file, fieldnames=None if cdr_info.has_header else DEFAULT_FIELDNAMES)
for cdr in cdrs:
try:
num_read += 1
if cdr_info.flags_col_id and (int(cdr[cdr_info.flags_col_id]) & 0x0002 != 2):
continue
if cdr_info.direction_col_id and (cdr[cdr_info.direction_col_id] not in ['Outgoing API', 'outbound-api']):
continue
if cdr_info.datetime_format is None:
call_start = datetime.fromisoformat(cdr[cdr_info.start_col_id])
else:
call_start = datetime.strptime(
cdr[cdr_info.start_col_id],
cdr_info.datetime_format)
if cdr_info.queuetime_col_id:
queue_time = Decimal(cdr[cdr_info.queuetime_col_id]) / 1000
if queue_time in queue_times.keys():
queue_times[queue_time] += 1
else:
queue_times[queue_time] = 1
if queue_time > 0:
call_start -= timedelta(seconds=int(queue_time))
if start and call_start < start: continue
if end and call_start >= end: continue
num_counted += 1
if call_start in intervals.keys():
intervals[call_start] += 1
else:
intervals[call_start] = 1
except Exception as err:
logger.error("Line: %s", cdr)
sys.exit(f"Problem parsing CDR file: {str(err)}")
logger.debug("%s records read, %s records counted", num_read, num_counted)
logger.debug("Writing CPS file...")
with args.cps_file as cps_file:
for key, value in intervals.items():
num_written += 1
print(f'{key},{value}', file=cps_file)
logger.debug("%s records written", num_written)
if args.spread:
print_spread(calculate_spread(intervals))
if args.queue:
print_queue_times(queue_times)
if __name__ == "__main__":
main(get_args()) | true | true |
f73ea73828b8a8b0eb57d0c377e6a31b63b3f12b | 3,678 | py | Python | kivy/input/providers/androidjoystick.py | Zen-CODE/kivy | 020cd4ae930f99cab432f6f3746b0566cdc96860 | [
"MIT"
] | null | null | null | kivy/input/providers/androidjoystick.py | Zen-CODE/kivy | 020cd4ae930f99cab432f6f3746b0566cdc96860 | [
"MIT"
] | null | null | null | kivy/input/providers/androidjoystick.py | Zen-CODE/kivy | 020cd4ae930f99cab432f6f3746b0566cdc96860 | [
"MIT"
] | 1 | 2019-04-16T22:35:58.000Z | 2019-04-16T22:35:58.000Z | # pylint: disable=W0611
'''
Android Joystick Input Provider
===============================
This module is based on the PyGame JoyStick Input Provider. For more
information, please refer to
`<http://www.pygame.org/docs/ref/joystick.html>`_
'''
__all__ = ('AndroidMotionEventProvider', )
import os
try:
import android # NOQA
except ImportError:
if 'KIVY_DOC' not in os.environ:
raise Exception('android lib not found.')
from kivy.logger import Logger
from kivy.input.provider import MotionEventProvider
from kivy.input.factory import MotionEventFactory
from kivy.input.shape import ShapeRect
from kivy.input.motionevent import MotionEvent
import pygame.joystick
class AndroidMotionEvent(MotionEvent):
def depack(self, args):
self.is_touch = True
self.profile = ['pos', 'pressure', 'shape']
self.sx, self.sy, self.pressure, radius = args
self.shape = ShapeRect()
self.shape.width = radius
self.shape.height = radius
super(AndroidMotionEvent, self).depack(args)
class AndroidMotionEventProvider(MotionEventProvider):
def __init__(self, device, args):
super(AndroidMotionEventProvider, self).__init__(device, args)
self.joysticks = []
self.touches = {}
self.uid = 0
self.window = None
def create_joystick(self, index):
Logger.info('Android: create joystick <%d>' % index)
js = pygame.joystick.Joystick(index)
js.init()
if js.get_numbuttons() == 0:
Logger.info('Android: discard joystick <%d> cause no button' %
index)
return
self.joysticks.append(js)
def start(self):
pygame.joystick.init()
Logger.info('Android: found %d joystick' % pygame.joystick.get_count())
for i in range(pygame.joystick.get_count()):
self.create_joystick(i)
def stop(self):
self.joysticks = []
def update(self, dispatch_fn):
if not self.window:
from kivy.core.window import Window
self.window = Window
w, h = self.window.system_size
touches = self.touches
for joy in self.joysticks:
jid = joy.get_id()
pressed = joy.get_button(0)
if pressed or jid in touches:
x = joy.get_axis(0) * 32768. / w
y = 1. - (joy.get_axis(1) * 32768. / h)
# python for android do * 1000.
pressure = joy.get_axis(2) / 1000.
radius = joy.get_axis(3) / 1000.
# new touche ?
if pressed and jid not in touches:
self.uid += 1
touch = AndroidMotionEvent(self.device, self.uid,
[x, y, pressure, radius])
touches[jid] = touch
dispatch_fn('begin', touch)
# update touch
elif pressed:
touch = touches[jid]
# avoid same touch position
if touch.sx == x and touch.sy == y \
and touch.pressure == pressure:
continue
touch.move([x, y, pressure, radius])
dispatch_fn('update', touch)
# disapear
elif not pressed and jid in touches:
touch = touches[jid]
touch.move([x, y, pressure, radius])
touch.update_time_end()
dispatch_fn('end', touch)
touches.pop(jid)
MotionEventFactory.register('android', AndroidMotionEventProvider)
| 32.839286 | 79 | 0.558728 |
__all__ = ('AndroidMotionEventProvider', )
import os
try:
import android
except ImportError:
if 'KIVY_DOC' not in os.environ:
raise Exception('android lib not found.')
from kivy.logger import Logger
from kivy.input.provider import MotionEventProvider
from kivy.input.factory import MotionEventFactory
from kivy.input.shape import ShapeRect
from kivy.input.motionevent import MotionEvent
import pygame.joystick
class AndroidMotionEvent(MotionEvent):
def depack(self, args):
self.is_touch = True
self.profile = ['pos', 'pressure', 'shape']
self.sx, self.sy, self.pressure, radius = args
self.shape = ShapeRect()
self.shape.width = radius
self.shape.height = radius
super(AndroidMotionEvent, self).depack(args)
class AndroidMotionEventProvider(MotionEventProvider):
def __init__(self, device, args):
super(AndroidMotionEventProvider, self).__init__(device, args)
self.joysticks = []
self.touches = {}
self.uid = 0
self.window = None
def create_joystick(self, index):
Logger.info('Android: create joystick <%d>' % index)
js = pygame.joystick.Joystick(index)
js.init()
if js.get_numbuttons() == 0:
Logger.info('Android: discard joystick <%d> cause no button' %
index)
return
self.joysticks.append(js)
def start(self):
pygame.joystick.init()
Logger.info('Android: found %d joystick' % pygame.joystick.get_count())
for i in range(pygame.joystick.get_count()):
self.create_joystick(i)
def stop(self):
self.joysticks = []
def update(self, dispatch_fn):
if not self.window:
from kivy.core.window import Window
self.window = Window
w, h = self.window.system_size
touches = self.touches
for joy in self.joysticks:
jid = joy.get_id()
pressed = joy.get_button(0)
if pressed or jid in touches:
x = joy.get_axis(0) * 32768. / w
y = 1. - (joy.get_axis(1) * 32768. / h)
pressure = joy.get_axis(2) / 1000.
radius = joy.get_axis(3) / 1000.
if pressed and jid not in touches:
self.uid += 1
touch = AndroidMotionEvent(self.device, self.uid,
[x, y, pressure, radius])
touches[jid] = touch
dispatch_fn('begin', touch)
elif pressed:
touch = touches[jid]
if touch.sx == x and touch.sy == y \
and touch.pressure == pressure:
continue
touch.move([x, y, pressure, radius])
dispatch_fn('update', touch)
elif not pressed and jid in touches:
touch = touches[jid]
touch.move([x, y, pressure, radius])
touch.update_time_end()
dispatch_fn('end', touch)
touches.pop(jid)
MotionEventFactory.register('android', AndroidMotionEventProvider)
| true | true |
f73ea882b3c478b64d849ace9aad77a4fd64c642 | 504 | py | Python | trees.py | dmancevo/trees | a76a8d9c8e11c67042e3d947d58a84fee83ad6b5 | [
"Apache-2.0"
] | null | null | null | trees.py | dmancevo/trees | a76a8d9c8e11c67042e3d947d58a84fee83ad6b5 | [
"Apache-2.0"
] | null | null | null | trees.py | dmancevo/trees | a76a8d9c8e11c67042e3d947d58a84fee83ad6b5 | [
"Apache-2.0"
] | null | null | null | from ctypes import *
class Node(Structure): pass
Node._fields_ = [
("leaf", c_int),
("g", c_float),
("min_samples", c_int),
("split_ind", c_int),
("split", c_float),
("left", POINTER(Node)),
("right", POINTER(Node))]
trees = CDLL("./trees.so")
trees.get_root.argtypes = (c_int, )
trees.get_root.restype = POINTER(Node)
class Tree(object):
def __init__(self, min_samples=1):
self.root = trees.get_root(min_samples)
if __name__ == '__main__':
tree = Tree()
| 21 | 47 | 0.621032 | from ctypes import *
class Node(Structure): pass
Node._fields_ = [
("leaf", c_int),
("g", c_float),
("min_samples", c_int),
("split_ind", c_int),
("split", c_float),
("left", POINTER(Node)),
("right", POINTER(Node))]
trees = CDLL("./trees.so")
trees.get_root.argtypes = (c_int, )
trees.get_root.restype = POINTER(Node)
class Tree(object):
def __init__(self, min_samples=1):
self.root = trees.get_root(min_samples)
if __name__ == '__main__':
tree = Tree()
| true | true |
f73ea89050f8e5f31753f343554a19afdf4716d5 | 10,395 | py | Python | histomicstk/deeplab/utils/get_dataset_colormap.py | SarderLab/HistomicsTK_PodoSighter | 9a75302f645bfb3dfd9688d247388c9948f4eadb | [
"Apache-2.0"
] | null | null | null | histomicstk/deeplab/utils/get_dataset_colormap.py | SarderLab/HistomicsTK_PodoSighter | 9a75302f645bfb3dfd9688d247388c9948f4eadb | [
"Apache-2.0"
] | null | null | null | histomicstk/deeplab/utils/get_dataset_colormap.py | SarderLab/HistomicsTK_PodoSighter | 9a75302f645bfb3dfd9688d247388c9948f4eadb | [
"Apache-2.0"
] | null | null | null | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Visualizes the segmentation results via specified color map.
Visualizes the semantic segmentation results by the color map
defined by the different datasets. Supported colormaps are:
* ADE20K (http://groups.csail.mit.edu/vision/datasets/ADE20K/).
* Cityscapes dataset (https://www.cityscapes-dataset.com).
* Mapillary Vistas (https://research.mapillary.com).
* PASCAL VOC 2012 (http://host.robots.ox.ac.uk/pascal/VOC/).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
# Dataset names.
_ADE20K = 'ade20k'
_CITYSCAPES = 'cityscapes'
_MAPILLARY_VISTAS = 'mapillary_vistas'
_PASCAL = 'pascal'
_PC1 = 'PC1'
# Max number of entries in the colormap for each dataset.
_DATASET_MAX_ENTRIES = {
_ADE20K: 151,
_CITYSCAPES: 256,
_MAPILLARY_VISTAS: 66,
_PASCAL: 512,
_PC1: 256,
}
def create_pc1_label_colormap():
"""Creates a label colormap used in PC1 segmentation benchmark.
Returns:
A colormap for visualizing segmentation results.
"""
colormap = np.zeros((256, 3), dtype=np.uint8)
colormap[0] = [128, 64, 128]
colormap[1] = [244, 35, 232]
colormap[2] = [70, 70, 70]
colormap[3] = [102, 102, 156]
return colormap
def create_ade20k_label_colormap():
"""Creates a label colormap used in ADE20K segmentation benchmark.
Returns:
A colormap for visualizing segmentation results.
"""
return np.asarray([
[0, 0, 0],
[120, 120, 120],
[180, 120, 120],
[6, 230, 230],
[80, 50, 50],
[4, 200, 3],
[120, 120, 80],
[140, 140, 140],
[204, 5, 255],
[230, 230, 230],
[4, 250, 7],
[224, 5, 255],
[235, 255, 7],
[150, 5, 61],
[120, 120, 70],
[8, 255, 51],
[255, 6, 82],
[143, 255, 140],
[204, 255, 4],
[255, 51, 7],
[204, 70, 3],
[0, 102, 200],
[61, 230, 250],
[255, 6, 51],
[11, 102, 255],
[255, 7, 71],
[255, 9, 224],
[9, 7, 230],
[220, 220, 220],
[255, 9, 92],
[112, 9, 255],
[8, 255, 214],
[7, 255, 224],
[255, 184, 6],
[10, 255, 71],
[255, 41, 10],
[7, 255, 255],
[224, 255, 8],
[102, 8, 255],
[255, 61, 6],
[255, 194, 7],
[255, 122, 8],
[0, 255, 20],
[255, 8, 41],
[255, 5, 153],
[6, 51, 255],
[235, 12, 255],
[160, 150, 20],
[0, 163, 255],
[140, 140, 140],
[250, 10, 15],
[20, 255, 0],
[31, 255, 0],
[255, 31, 0],
[255, 224, 0],
[153, 255, 0],
[0, 0, 255],
[255, 71, 0],
[0, 235, 255],
[0, 173, 255],
[31, 0, 255],
[11, 200, 200],
[255, 82, 0],
[0, 255, 245],
[0, 61, 255],
[0, 255, 112],
[0, 255, 133],
[255, 0, 0],
[255, 163, 0],
[255, 102, 0],
[194, 255, 0],
[0, 143, 255],
[51, 255, 0],
[0, 82, 255],
[0, 255, 41],
[0, 255, 173],
[10, 0, 255],
[173, 255, 0],
[0, 255, 153],
[255, 92, 0],
[255, 0, 255],
[255, 0, 245],
[255, 0, 102],
[255, 173, 0],
[255, 0, 20],
[255, 184, 184],
[0, 31, 255],
[0, 255, 61],
[0, 71, 255],
[255, 0, 204],
[0, 255, 194],
[0, 255, 82],
[0, 10, 255],
[0, 112, 255],
[51, 0, 255],
[0, 194, 255],
[0, 122, 255],
[0, 255, 163],
[255, 153, 0],
[0, 255, 10],
[255, 112, 0],
[143, 255, 0],
[82, 0, 255],
[163, 255, 0],
[255, 235, 0],
[8, 184, 170],
[133, 0, 255],
[0, 255, 92],
[184, 0, 255],
[255, 0, 31],
[0, 184, 255],
[0, 214, 255],
[255, 0, 112],
[92, 255, 0],
[0, 224, 255],
[112, 224, 255],
[70, 184, 160],
[163, 0, 255],
[153, 0, 255],
[71, 255, 0],
[255, 0, 163],
[255, 204, 0],
[255, 0, 143],
[0, 255, 235],
[133, 255, 0],
[255, 0, 235],
[245, 0, 255],
[255, 0, 122],
[255, 245, 0],
[10, 190, 212],
[214, 255, 0],
[0, 204, 255],
[20, 0, 255],
[255, 255, 0],
[0, 153, 255],
[0, 41, 255],
[0, 255, 204],
[41, 0, 255],
[41, 255, 0],
[173, 0, 255],
[0, 245, 255],
[71, 0, 255],
[122, 0, 255],
[0, 255, 184],
[0, 92, 255],
[184, 255, 0],
[0, 133, 255],
[255, 214, 0],
[25, 194, 194],
[102, 255, 0],
[92, 0, 255],
])
def create_cityscapes_label_colormap():
"""Creates a label colormap used in CITYSCAPES segmentation benchmark.
Returns:
A colormap for visualizing segmentation results.
"""
colormap = np.zeros((256, 3), dtype=np.uint8)
colormap[0] = [128, 64, 128]
colormap[1] = [244, 35, 232]
colormap[2] = [70, 70, 70]
colormap[3] = [102, 102, 156]
colormap[4] = [190, 153, 153]
colormap[5] = [153, 153, 153]
colormap[6] = [250, 170, 30]
colormap[7] = [220, 220, 0]
colormap[8] = [107, 142, 35]
colormap[9] = [152, 251, 152]
colormap[10] = [70, 130, 180]
colormap[11] = [220, 20, 60]
colormap[12] = [255, 0, 0]
colormap[13] = [0, 0, 142]
colormap[14] = [0, 0, 70]
colormap[15] = [0, 60, 100]
colormap[16] = [0, 80, 100]
colormap[17] = [0, 0, 230]
colormap[18] = [119, 11, 32]
return colormap
def create_mapillary_vistas_label_colormap():
"""Creates a label colormap used in Mapillary Vistas segmentation benchmark.
Returns:
A colormap for visualizing segmentation results.
"""
return np.asarray([
[165, 42, 42],
[0, 192, 0],
[196, 196, 196],
[190, 153, 153],
[180, 165, 180],
[102, 102, 156],
[102, 102, 156],
[128, 64, 255],
[140, 140, 200],
[170, 170, 170],
[250, 170, 160],
[96, 96, 96],
[230, 150, 140],
[128, 64, 128],
[110, 110, 110],
[244, 35, 232],
[150, 100, 100],
[70, 70, 70],
[150, 120, 90],
[220, 20, 60],
[255, 0, 0],
[255, 0, 0],
[255, 0, 0],
[200, 128, 128],
[255, 255, 255],
[64, 170, 64],
[128, 64, 64],
[70, 130, 180],
[255, 255, 255],
[152, 251, 152],
[107, 142, 35],
[0, 170, 30],
[255, 255, 128],
[250, 0, 30],
[0, 0, 0],
[220, 220, 220],
[170, 170, 170],
[222, 40, 40],
[100, 170, 30],
[40, 40, 40],
[33, 33, 33],
[170, 170, 170],
[0, 0, 142],
[170, 170, 170],
[210, 170, 100],
[153, 153, 153],
[128, 128, 128],
[0, 0, 142],
[250, 170, 30],
[192, 192, 192],
[220, 220, 0],
[180, 165, 180],
[119, 11, 32],
[0, 0, 142],
[0, 60, 100],
[0, 0, 142],
[0, 0, 90],
[0, 0, 230],
[0, 80, 100],
[128, 64, 64],
[0, 0, 110],
[0, 0, 70],
[0, 0, 192],
[32, 32, 32],
[0, 0, 0],
[0, 0, 0],
])
def create_pascal_label_colormap():
"""Creates a label colormap used in PASCAL VOC segmentation benchmark.
Returns:
A colormap for visualizing segmentation results.
"""
colormap = np.zeros((_DATASET_MAX_ENTRIES[_PASCAL], 3), dtype=int)
ind = np.arange(_DATASET_MAX_ENTRIES[_PASCAL], dtype=int)
for shift in reversed(list(range(8))):
for channel in range(3):
colormap[:, channel] |= bit_get(ind, channel) << shift
ind >>= 3
return colormap
def get_ade20k_name():
return _ADE20K
def get_cityscapes_name():
return _CITYSCAPES
def get_mapillary_vistas_name():
return _MAPILLARY_VISTAS
def get_pascal_name():
return _PASCAL
def get_pc1_name():
return _PC1
def bit_get(val, idx):
"""Gets the bit value.
Args:
val: Input value, int or numpy int array.
idx: Which bit of the input val.
Returns:
The "idx"-th bit of input val.
"""
return (val >> idx) & 1
def create_label_colormap(dataset=_PC1):
"""Creates a label colormap for the specified dataset.
Args:
dataset: The colormap used in the dataset.
Returns:
A numpy array of the dataset colormap.
Raises:
ValueError: If the dataset is not supported.
"""
if dataset == _ADE20K:
return create_ade20k_label_colormap()
elif dataset == _CITYSCAPES:
return create_cityscapes_label_colormap()
elif dataset == _MAPILLARY_VISTAS:
return create_mapillary_vistas_label_colormap()
elif dataset == _PASCAL:
return create_pascal_label_colormap()
elif dataset == _PC1:
return create_pc1_label_colormap()
else:
raise ValueError('Unsupported dataset.')
def label_to_color_image(label, dataset=_PC1):
"""Adds color defined by the dataset colormap to the label.
Args:
label: A 2D array with integer type, storing the segmentation label.
dataset: The colormap used in the dataset.
Returns:
result: A 2D array with floating type. The element of the array
is the color indexed by the corresponding element in the input label
to the dataset color map.
Raises:
ValueError: If label is not of rank 2 or its value is larger than color
map maximum entry.
"""
if label.ndim != 2:
raise ValueError('Expect 2-D input label. Got {}'.format(label.shape))
if np.max(label) >= _DATASET_MAX_ENTRIES[dataset]:
raise ValueError(
'label value too large: {} >= {}.'.format(
np.max(label), _DATASET_MAX_ENTRIES[dataset]))
colormap = create_label_colormap(dataset)
return colormap[label]
def get_dataset_colormap_max_entries(dataset):
return _DATASET_MAX_ENTRIES[dataset]
| 23.841743 | 80 | 0.537085 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
_ADE20K = 'ade20k'
_CITYSCAPES = 'cityscapes'
_MAPILLARY_VISTAS = 'mapillary_vistas'
_PASCAL = 'pascal'
_PC1 = 'PC1'
_DATASET_MAX_ENTRIES = {
_ADE20K: 151,
_CITYSCAPES: 256,
_MAPILLARY_VISTAS: 66,
_PASCAL: 512,
_PC1: 256,
}
def create_pc1_label_colormap():
colormap = np.zeros((256, 3), dtype=np.uint8)
colormap[0] = [128, 64, 128]
colormap[1] = [244, 35, 232]
colormap[2] = [70, 70, 70]
colormap[3] = [102, 102, 156]
return colormap
def create_ade20k_label_colormap():
return np.asarray([
[0, 0, 0],
[120, 120, 120],
[180, 120, 120],
[6, 230, 230],
[80, 50, 50],
[4, 200, 3],
[120, 120, 80],
[140, 140, 140],
[204, 5, 255],
[230, 230, 230],
[4, 250, 7],
[224, 5, 255],
[235, 255, 7],
[150, 5, 61],
[120, 120, 70],
[8, 255, 51],
[255, 6, 82],
[143, 255, 140],
[204, 255, 4],
[255, 51, 7],
[204, 70, 3],
[0, 102, 200],
[61, 230, 250],
[255, 6, 51],
[11, 102, 255],
[255, 7, 71],
[255, 9, 224],
[9, 7, 230],
[220, 220, 220],
[255, 9, 92],
[112, 9, 255],
[8, 255, 214],
[7, 255, 224],
[255, 184, 6],
[10, 255, 71],
[255, 41, 10],
[7, 255, 255],
[224, 255, 8],
[102, 8, 255],
[255, 61, 6],
[255, 194, 7],
[255, 122, 8],
[0, 255, 20],
[255, 8, 41],
[255, 5, 153],
[6, 51, 255],
[235, 12, 255],
[160, 150, 20],
[0, 163, 255],
[140, 140, 140],
[250, 10, 15],
[20, 255, 0],
[31, 255, 0],
[255, 31, 0],
[255, 224, 0],
[153, 255, 0],
[0, 0, 255],
[255, 71, 0],
[0, 235, 255],
[0, 173, 255],
[31, 0, 255],
[11, 200, 200],
[255, 82, 0],
[0, 255, 245],
[0, 61, 255],
[0, 255, 112],
[0, 255, 133],
[255, 0, 0],
[255, 163, 0],
[255, 102, 0],
[194, 255, 0],
[0, 143, 255],
[51, 255, 0],
[0, 82, 255],
[0, 255, 41],
[0, 255, 173],
[10, 0, 255],
[173, 255, 0],
[0, 255, 153],
[255, 92, 0],
[255, 0, 255],
[255, 0, 245],
[255, 0, 102],
[255, 173, 0],
[255, 0, 20],
[255, 184, 184],
[0, 31, 255],
[0, 255, 61],
[0, 71, 255],
[255, 0, 204],
[0, 255, 194],
[0, 255, 82],
[0, 10, 255],
[0, 112, 255],
[51, 0, 255],
[0, 194, 255],
[0, 122, 255],
[0, 255, 163],
[255, 153, 0],
[0, 255, 10],
[255, 112, 0],
[143, 255, 0],
[82, 0, 255],
[163, 255, 0],
[255, 235, 0],
[8, 184, 170],
[133, 0, 255],
[0, 255, 92],
[184, 0, 255],
[255, 0, 31],
[0, 184, 255],
[0, 214, 255],
[255, 0, 112],
[92, 255, 0],
[0, 224, 255],
[112, 224, 255],
[70, 184, 160],
[163, 0, 255],
[153, 0, 255],
[71, 255, 0],
[255, 0, 163],
[255, 204, 0],
[255, 0, 143],
[0, 255, 235],
[133, 255, 0],
[255, 0, 235],
[245, 0, 255],
[255, 0, 122],
[255, 245, 0],
[10, 190, 212],
[214, 255, 0],
[0, 204, 255],
[20, 0, 255],
[255, 255, 0],
[0, 153, 255],
[0, 41, 255],
[0, 255, 204],
[41, 0, 255],
[41, 255, 0],
[173, 0, 255],
[0, 245, 255],
[71, 0, 255],
[122, 0, 255],
[0, 255, 184],
[0, 92, 255],
[184, 255, 0],
[0, 133, 255],
[255, 214, 0],
[25, 194, 194],
[102, 255, 0],
[92, 0, 255],
])
def create_cityscapes_label_colormap():
colormap = np.zeros((256, 3), dtype=np.uint8)
colormap[0] = [128, 64, 128]
colormap[1] = [244, 35, 232]
colormap[2] = [70, 70, 70]
colormap[3] = [102, 102, 156]
colormap[4] = [190, 153, 153]
colormap[5] = [153, 153, 153]
colormap[6] = [250, 170, 30]
colormap[7] = [220, 220, 0]
colormap[8] = [107, 142, 35]
colormap[9] = [152, 251, 152]
colormap[10] = [70, 130, 180]
colormap[11] = [220, 20, 60]
colormap[12] = [255, 0, 0]
colormap[13] = [0, 0, 142]
colormap[14] = [0, 0, 70]
colormap[15] = [0, 60, 100]
colormap[16] = [0, 80, 100]
colormap[17] = [0, 0, 230]
colormap[18] = [119, 11, 32]
return colormap
def create_mapillary_vistas_label_colormap():
return np.asarray([
[165, 42, 42],
[0, 192, 0],
[196, 196, 196],
[190, 153, 153],
[180, 165, 180],
[102, 102, 156],
[102, 102, 156],
[128, 64, 255],
[140, 140, 200],
[170, 170, 170],
[250, 170, 160],
[96, 96, 96],
[230, 150, 140],
[128, 64, 128],
[110, 110, 110],
[244, 35, 232],
[150, 100, 100],
[70, 70, 70],
[150, 120, 90],
[220, 20, 60],
[255, 0, 0],
[255, 0, 0],
[255, 0, 0],
[200, 128, 128],
[255, 255, 255],
[64, 170, 64],
[128, 64, 64],
[70, 130, 180],
[255, 255, 255],
[152, 251, 152],
[107, 142, 35],
[0, 170, 30],
[255, 255, 128],
[250, 0, 30],
[0, 0, 0],
[220, 220, 220],
[170, 170, 170],
[222, 40, 40],
[100, 170, 30],
[40, 40, 40],
[33, 33, 33],
[170, 170, 170],
[0, 0, 142],
[170, 170, 170],
[210, 170, 100],
[153, 153, 153],
[128, 128, 128],
[0, 0, 142],
[250, 170, 30],
[192, 192, 192],
[220, 220, 0],
[180, 165, 180],
[119, 11, 32],
[0, 0, 142],
[0, 60, 100],
[0, 0, 142],
[0, 0, 90],
[0, 0, 230],
[0, 80, 100],
[128, 64, 64],
[0, 0, 110],
[0, 0, 70],
[0, 0, 192],
[32, 32, 32],
[0, 0, 0],
[0, 0, 0],
])
def create_pascal_label_colormap():
colormap = np.zeros((_DATASET_MAX_ENTRIES[_PASCAL], 3), dtype=int)
ind = np.arange(_DATASET_MAX_ENTRIES[_PASCAL], dtype=int)
for shift in reversed(list(range(8))):
for channel in range(3):
colormap[:, channel] |= bit_get(ind, channel) << shift
ind >>= 3
return colormap
def get_ade20k_name():
return _ADE20K
def get_cityscapes_name():
return _CITYSCAPES
def get_mapillary_vistas_name():
return _MAPILLARY_VISTAS
def get_pascal_name():
return _PASCAL
def get_pc1_name():
return _PC1
def bit_get(val, idx):
return (val >> idx) & 1
def create_label_colormap(dataset=_PC1):
if dataset == _ADE20K:
return create_ade20k_label_colormap()
elif dataset == _CITYSCAPES:
return create_cityscapes_label_colormap()
elif dataset == _MAPILLARY_VISTAS:
return create_mapillary_vistas_label_colormap()
elif dataset == _PASCAL:
return create_pascal_label_colormap()
elif dataset == _PC1:
return create_pc1_label_colormap()
else:
raise ValueError('Unsupported dataset.')
def label_to_color_image(label, dataset=_PC1):
if label.ndim != 2:
raise ValueError('Expect 2-D input label. Got {}'.format(label.shape))
if np.max(label) >= _DATASET_MAX_ENTRIES[dataset]:
raise ValueError(
'label value too large: {} >= {}.'.format(
np.max(label), _DATASET_MAX_ENTRIES[dataset]))
colormap = create_label_colormap(dataset)
return colormap[label]
def get_dataset_colormap_max_entries(dataset):
return _DATASET_MAX_ENTRIES[dataset]
| true | true |
f73ea8e291e1e1993fa256e8d3edb3b55354375f | 879 | py | Python | test/sources/test_static.py | ChowNow/blingalytics | a05c866bc0dc7c56b5106c71c12cf10b37c5bae5 | [
"MIT"
] | null | null | null | test/sources/test_static.py | ChowNow/blingalytics | a05c866bc0dc7c56b5106c71c12cf10b37c5bae5 | [
"MIT"
] | 1 | 2019-08-16T07:15:42.000Z | 2019-08-16T15:27:18.000Z | test/sources/test_static.py | ChowNow/blingalytics | a05c866bc0dc7c56b5106c71c12cf10b37c5bae5 | [
"MIT"
] | 1 | 2017-06-01T23:32:08.000Z | 2017-06-01T23:32:08.000Z | from __future__ import absolute_import
import unittest
from blingalytics.sources import static
from mock import Mock
from test import reports
class TestStaticSource(unittest.TestCase):
def setUp(self):
self.report = reports.SuperBasicReport(Mock())
def test_static_source(self):
source = static.StaticSource(self.report)
self.assertEqual(len(source._columns), 1)
self.assertEqual(len(source._columns[0]), 2)
self.assertEqual(source._columns[0][0], 'id')
self.assertEqual(list(source._columns_dict), ['id'])
self.assertTrue(isinstance(source._columns[0][1], static.Value))
self.assertEqual(source.pre_process({}), None)
self.assertEqual(list(source.get_rows([], {})), [])
self.assertEqual(source.post_process({'othercolumn': 'stuff'}, {}),
{'othercolumn': 'stuff', 'id': 1})
| 35.16 | 75 | 0.678043 | from __future__ import absolute_import
import unittest
from blingalytics.sources import static
from mock import Mock
from test import reports
class TestStaticSource(unittest.TestCase):
def setUp(self):
self.report = reports.SuperBasicReport(Mock())
def test_static_source(self):
source = static.StaticSource(self.report)
self.assertEqual(len(source._columns), 1)
self.assertEqual(len(source._columns[0]), 2)
self.assertEqual(source._columns[0][0], 'id')
self.assertEqual(list(source._columns_dict), ['id'])
self.assertTrue(isinstance(source._columns[0][1], static.Value))
self.assertEqual(source.pre_process({}), None)
self.assertEqual(list(source.get_rows([], {})), [])
self.assertEqual(source.post_process({'othercolumn': 'stuff'}, {}),
{'othercolumn': 'stuff', 'id': 1})
| true | true |
f73eaa0f91a7803cfd80660d9a05e13025b23948 | 7,041 | py | Python | cn/opencv/finger/finger.py | Jasonandy/Python-X | 2f02b9a17bd5495dd1f8746b191f11ec2d7bccbe | [
"Apache-2.0"
] | null | null | null | cn/opencv/finger/finger.py | Jasonandy/Python-X | 2f02b9a17bd5495dd1f8746b191f11ec2d7bccbe | [
"Apache-2.0"
] | null | null | null | cn/opencv/finger/finger.py | Jasonandy/Python-X | 2f02b9a17bd5495dd1f8746b191f11ec2d7bccbe | [
"Apache-2.0"
] | 2 | 2019-06-18T05:53:26.000Z | 2019-06-19T03:26:02.000Z | import cv2 as cv
import numpy as np
import math
import time
capture = cv.VideoCapture(0)
# video = "http://admin:admin@10.242.200.134:8081/" # admin是账号:admin是密码 后面是局域网
# capture = cv.VideoCapture(video)
# 获得欧几里距离
def _get_eucledian_distance(vect1, vect2):
distant = vect1[0] - vect2[0]
dist = np.sqrt(np.sum(np.square(distant)))
# 或者用numpy内建方法
# vect1 = list(vect1)
# vect2 = list(vect2)
# dist = np.linalg.norm(vect1 - vect2)
return dist
def gesture_recognition():
while True:
ret, frame = capture.read() # 读取摄像头
# frame = cv.flip(frame, 1)
fgbg = cv.createBackgroundSubtractorMOG2() # 利用BackgroundSubtractorMOG2算法消除背景
# fgmask = bgModel.apply(frame)
fgmask = fgbg.apply(frame)
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
# res = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
kernel = np.ones((5, 5), np.uint8)
fgmask = cv.erode(fgmask, kernel, iterations=1) # 膨胀
res = cv.bitwise_and(frame, frame, mask=fgmask)
ycrcb = cv.cvtColor(res, cv.COLOR_BGR2YCrCb) # 分解为YUV图像,得到CR分量
(_, cr, _) = cv.split(ycrcb)
cr1 = cv.GaussianBlur(cr, (5, 5), 0) # 高斯滤波
_, skin = cv.threshold(cr1, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU) # OTSU图像二值化
# dst = cv.GaussianBlur(frame, (3, 3), 0)
# gray = cv.cvtColor(dst, cv.COLOR_BGR2GRAY)
# ret, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)
# cv.imshow("binary_image", binary)
# hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV) # hsv 色彩空间 分割肤色
# ycrcb = cv.cvtColor(frame, cv.COLOR_BGR2YCrCb) # Ycrcb 色彩空间 分割肤色
# # lower_hsv = np.array([0, 15, 0])
# # upper_hsv = np.array([17, 170, 255])
# lower_ycrcb = np.array([0, 135, 85])
# upper_ycrcb = np.array([255, 180, 135])
# # mask = cv.inRange(hsv, lowerb=lower_hsv, upperb=upper_hsv) # hsv 掩码
# mask = cv.inRange(ycrcb, lowerb=lower_ycrcb, upperb=upper_ycrcb) # ycrcb 掩码
# dst = cv.GaussianBlur(mask, (11, 11), 0) # 高斯去噪
# gray = cv.cvtColor(dst, cv.COLOR_BGR2GRAY)
# edge_output = cv.Canny(gray, 50, 150) # 图像边缘提取
# kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3)) # 获取图像结构化元素
# # dst = cv.morphologyEx(binary, cv.MORPH_OPEN, kernel) # 开操作
# dst = cv.erode(skin, kernel) # 膨胀操作
gesture_roi = skin[0:350, 380:700]
cv.imshow("dst_demo", skin)
# cv.imshow("gesture_roi", gesture_roi)
contours, heriachy = cv.findContours(gesture_roi, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) # 获取轮廓点集(坐标)
# if contours[0] < [30, 260]:
# cnt = contours[0]
# elif 270 <= contours[0] < [60, 260]:
# cnt = contours[1]
# else:
# cnt = contours[2]
# cnt = contours[0]
# print(cnt)
# print(contours)
# cnt = contours[0]
for i, contour in enumerate(contours): # 获取轮廓
cv.drawContours(frame[0:350, 380:700], contours, i, (255, 0, 0), 1) # 绘制轮廓
# 得到面积
# area = cv.contourArea(contour)
# 得到外接矩形
# x, y, w, h = cv.boundingRect(contour)
# 得到的几何距是字典类型的
# mm = cv.moments(contour)
# cx = mm['m10']/mm['m00']
# cy = mm['m01']/mm['m00']
# center, radius = cv.minEnclosingCircle(contour)
# center = (int(x), int(y))
# radius = int(radius)
# cv.circle(frame, center, radius, (0, 255, 255), 2)
# cv.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
# print(i)
# cv.imshow("measure_contures", frame)
x, y, w, h = cv.boundingRect(contour)
# center = (int(x), int(y))
cv.rectangle(frame[0:350, 380:700], (x, y), (x + w, y + h), (100, 100, 0), 1)
# approxcurve = cv.approxPolyDP(contour, 4, False)
# if approxcurve.shape[0] < 5:
# cv.drawContours(frame, contours, -1, (0, 255, 0), 3)
hull = cv.convexHull(contour, True, returnPoints=False) # 获得凸包点 x, y坐标
defects = cv.convexityDefects(contour, hull) # 计算轮廓的凹点
# print(hull, defects)
# cv.polylines(frame[0:350, 380:700], [hull], True, (0, 255, 0), 3)
"""
defect反馈的是Nx4的数组,
第一列表示的是起点(轮廓集合中点的编号)
第二列表示的是终点(轮廓集合中点的编号)
第三列表示的是最远点(轮廓集合中点的编号)
第四列表示的是最远点到凸轮廓的最短距离
"""
# cv.drawContours(frame[0:350, 380:700], hull, -1, (255, 0, 0), 5, 8) # 绘制凸包
# dist = np.sqrt(np.sum(np.square(vect1 - vect2)))
ndefects = 0
if defects is not None: # 重要!
for i in range(defects.shape[0]):
s, e, f, d = defects[i, 0]
# float(s)
# float(e)
# float(f)
# float(d)
start = tuple(contour[s][0]) # 起点
end = tuple(contour[e][0]) # 终点
far = tuple(contour[f][0]) # 最远点
a = _get_eucledian_distance(start, end)
b = _get_eucledian_distance(start, far)
c = _get_eucledian_distance(end, far)
angle = math.acos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c))
cv.line(frame[0:350, 380:700], start, end, [255, 255, 0], 2)
cv.circle(frame[0:350, 380:700], far, 5, [0, 0, 255], -1)
if angle <= math.pi / 5: # <30度:
ndefects = ndefects + 1
print("数字 = %f" % ndefects)
# cv.polylines(frame[50:350, 380:700], [hull], True, (0, 255, 0), 2)
# retval = cv.pointPolygonTest(contour, center, True)
# cv.drawContours(frame, defects, -1, (0, 255, 0), 3)
# cv.imshow("defects", defects)
cv.imshow("video", frame)
c = cv.waitKey(50)
if c == 27:
break
def gesture_recognition_two():
img = cv.imread("E:/pictureprocessing/practice/picture/practice_one.png")
img = cv.flip(img, 1)
# dst = cv.GaussianBlur(frame, (3, 3), 0)
# gray = cv.cvtColor(dst, cv.COLOR_BGR2GRAY)
# ret, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)
# cv.imshow("binary_image", binary)
# hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV) # 通过hsv将颜色过滤出来
# lower_hsv = np.array([100, 43, 46])
# upper_hsv = np.array([124, 255, 255])
# mask = cv.inRange(hsv, lowerb=lower_hsv, upperb=upper_hsv)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
ret, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)
dst = cv.GaussianBlur(binary, (1, 1), 0) # 高斯去噪
# cv.imshow("dst_demo", dst)
contours, heriachy = cv.findContours(dst, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) # 获取轮廓本身
for i, contour in enumerate(contours): # 获取轮廓
cv.drawContours(img, contours, i, (0, 255, 0), 3) # 绘制轮廓
print(i)
cv.imshow("img_demo", img)
cv.namedWindow("video")
gesture_recognition()
# gesture_recognition_two()
cv.waitKey(0)
capture.release()
cv.destroyAllWindows() | 40.465517 | 113 | 0.564693 | import cv2 as cv
import numpy as np
import math
import time
capture = cv.VideoCapture(0)
stance(vect1, vect2):
distant = vect1[0] - vect2[0]
dist = np.sqrt(np.sum(np.square(distant)))
return dist
def gesture_recognition():
while True:
ret, frame = capture.read()
fgbg = cv.createBackgroundSubtractorMOG2()
fgmask = fgbg.apply(frame)
kernel = np.ones((5, 5), np.uint8)
fgmask = cv.erode(fgmask, kernel, iterations=1)
res = cv.bitwise_and(frame, frame, mask=fgmask)
ycrcb = cv.cvtColor(res, cv.COLOR_BGR2YCrCb)
(_, cr, _) = cv.split(ycrcb)
cr1 = cv.GaussianBlur(cr, (5, 5), 0)
_, skin = cv.threshold(cr1, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
IN_APPROX_SIMPLE)
for i, contour in enumerate(contours):
cv.drawContours(frame[0:350, 380:700], contours, i, (255, 0, 0), 1)
x, y, w, h = cv.boundingRect(contour)
cv.rectangle(frame[0:350, 380:700], (x, y), (x + w, y + h), (100, 100, 0), 1)
hull = cv.convexHull(contour, True, returnPoints=False)
defects = cv.convexityDefects(contour, hull)
ndefects = 0
if defects is not None:
for i in range(defects.shape[0]):
s, e, f, d = defects[i, 0]
start = tuple(contour[s][0])
end = tuple(contour[e][0])
far = tuple(contour[f][0])
a = _get_eucledian_distance(start, end)
b = _get_eucledian_distance(start, far)
c = _get_eucledian_distance(end, far)
angle = math.acos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c))
cv.line(frame[0:350, 380:700], start, end, [255, 255, 0], 2)
cv.circle(frame[0:350, 380:700], far, 5, [0, 0, 255], -1)
if angle <= math.pi / 5:
ndefects = ndefects + 1
print("数字 = %f" % ndefects)
cv.imshow("video", frame)
c = cv.waitKey(50)
if c == 27:
break
def gesture_recognition_two():
img = cv.imread("E:/pictureprocessing/practice/picture/practice_one.png")
img = cv.flip(img, 1)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
ret, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)
dst = cv.GaussianBlur(binary, (1, 1), 0)
contours, heriachy = cv.findContours(dst, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
for i, contour in enumerate(contours):
cv.drawContours(img, contours, i, (0, 255, 0), 3)
print(i)
cv.imshow("img_demo", img)
cv.namedWindow("video")
gesture_recognition()
cv.waitKey(0)
capture.release()
cv.destroyAllWindows() | true | true |
f73eaa11e79ce80711a3a3c53d1393426c686df0 | 10,525 | py | Python | tests/components/geo_json_events/test_geo_location.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 4 | 2020-07-29T17:47:10.000Z | 2020-09-16T13:39:13.000Z | tests/components/geo_json_events/test_geo_location.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 6 | 2020-11-08T19:40:10.000Z | 2022-03-01T11:11:07.000Z | tests/components/geo_json_events/test_geo_location.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 5 | 2020-03-29T00:29:13.000Z | 2021-09-06T20:58:40.000Z | """The tests for the geojson platform."""
from homeassistant.components import geo_location
from homeassistant.components.geo_json_events.geo_location import (
ATTR_EXTERNAL_ID,
SCAN_INTERVAL,
)
from homeassistant.components.geo_location import ATTR_SOURCE
from homeassistant.const import (
ATTR_FRIENDLY_NAME,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
CONF_URL,
EVENT_HOMEASSISTANT_START,
LENGTH_KILOMETERS,
)
from homeassistant.helpers.dispatcher import DATA_DISPATCHER
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import MagicMock, call, patch
from tests.common import assert_setup_component, async_fire_time_changed
URL = "http://geo.json.local/geo_json_events.json"
CONFIG = {
geo_location.DOMAIN: [
{"platform": "geo_json_events", CONF_URL: URL, CONF_RADIUS: 200}
]
}
CONFIG_WITH_CUSTOM_LOCATION = {
geo_location.DOMAIN: [
{
"platform": "geo_json_events",
CONF_URL: URL,
CONF_RADIUS: 200,
CONF_LATITUDE: 15.1,
CONF_LONGITUDE: 25.2,
}
]
}
def _generate_mock_feed_entry(external_id, title, distance_to_home, coordinates):
"""Construct a mock feed entry for testing purposes."""
feed_entry = MagicMock()
feed_entry.external_id = external_id
feed_entry.title = title
feed_entry.distance_to_home = distance_to_home
feed_entry.coordinates = coordinates
return feed_entry
async def test_setup(hass):
"""Test the general setup of the platform."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry("1234", "Title 1", 15.5, (-31.0, 150.0))
mock_entry_2 = _generate_mock_feed_entry("2345", "Title 2", 20.5, (-31.1, 150.1))
mock_entry_3 = _generate_mock_feed_entry("3456", "Title 3", 25.5, (-31.2, 150.2))
mock_entry_4 = _generate_mock_feed_entry("4567", "Title 4", 12.5, (-31.3, 150.3))
# Patching 'utcnow' to gain more control over the timed update.
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"geojson_client.generic_feed.GenericFeed"
) as mock_feed:
mock_feed.return_value.update.return_value = (
"OK",
[mock_entry_1, mock_entry_2, mock_entry_3],
)
with assert_setup_component(1, geo_location.DOMAIN):
assert await async_setup_component(hass, geo_location.DOMAIN, CONFIG)
await hass.async_block_till_done()
# Artificially trigger update.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
# Collect events.
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
state = hass.states.get("geo_location.title_1")
assert state is not None
assert state.name == "Title 1"
assert state.attributes == {
ATTR_EXTERNAL_ID: "1234",
ATTR_LATITUDE: -31.0,
ATTR_LONGITUDE: 150.0,
ATTR_FRIENDLY_NAME: "Title 1",
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
ATTR_SOURCE: "geo_json_events",
}
assert round(abs(float(state.state) - 15.5), 7) == 0
state = hass.states.get("geo_location.title_2")
assert state is not None
assert state.name == "Title 2"
assert state.attributes == {
ATTR_EXTERNAL_ID: "2345",
ATTR_LATITUDE: -31.1,
ATTR_LONGITUDE: 150.1,
ATTR_FRIENDLY_NAME: "Title 2",
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
ATTR_SOURCE: "geo_json_events",
}
assert round(abs(float(state.state) - 20.5), 7) == 0
state = hass.states.get("geo_location.title_3")
assert state is not None
assert state.name == "Title 3"
assert state.attributes == {
ATTR_EXTERNAL_ID: "3456",
ATTR_LATITUDE: -31.2,
ATTR_LONGITUDE: 150.2,
ATTR_FRIENDLY_NAME: "Title 3",
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
ATTR_SOURCE: "geo_json_events",
}
assert round(abs(float(state.state) - 25.5), 7) == 0
# Simulate an update - one existing, one new entry,
# one outdated entry
mock_feed.return_value.update.return_value = (
"OK",
[mock_entry_1, mock_entry_4, mock_entry_3],
)
async_fire_time_changed(hass, utcnow + SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
# Simulate an update - empty data, but successful update,
# so no changes to entities.
mock_feed.return_value.update.return_value = "OK_NO_DATA", None
async_fire_time_changed(hass, utcnow + 2 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
# Simulate an update - empty data, removes all entities
mock_feed.return_value.update.return_value = "ERROR", None
async_fire_time_changed(hass, utcnow + 3 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 0
async def test_setup_with_custom_location(hass):
"""Test the setup with a custom location."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry("1234", "Title 1", 2000.5, (-31.1, 150.1))
with patch("geojson_client.generic_feed.GenericFeed") as mock_feed:
mock_feed.return_value.update.return_value = "OK", [mock_entry_1]
with assert_setup_component(1, geo_location.DOMAIN):
assert await async_setup_component(
hass, geo_location.DOMAIN, CONFIG_WITH_CUSTOM_LOCATION
)
await hass.async_block_till_done()
# Artificially trigger update.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
# Collect events.
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
assert mock_feed.call_args == call((15.1, 25.2), URL, filter_radius=200.0)
async def test_setup_race_condition(hass):
"""Test a particular race condition experienced."""
# 1. Feed returns 1 entry -> Feed manager creates 1 entity.
# 2. Feed returns error -> Feed manager removes 1 entity.
# However, this stayed on and kept listening for dispatcher signals.
# 3. Feed returns 1 entry -> Feed manager creates 1 entity.
# 4. Feed returns 1 entry -> Feed manager updates 1 entity.
# Internally, the previous entity is updating itself, too.
# 5. Feed returns error -> Feed manager removes 1 entity.
# There are now 2 entities trying to remove themselves from HA, but
# the second attempt fails of course.
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry("1234", "Title 1", 15.5, (-31.0, 150.0))
delete_signal = "geo_json_events_delete_1234"
update_signal = "geo_json_events_update_1234"
# Patching 'utcnow' to gain more control over the timed update.
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"geojson_client.generic_feed.GenericFeed"
) as mock_feed:
with assert_setup_component(1, geo_location.DOMAIN):
assert await async_setup_component(hass, geo_location.DOMAIN, CONFIG)
await hass.async_block_till_done()
mock_feed.return_value.update.return_value = "OK", [mock_entry_1]
# Artificially trigger update.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
# Collect events.
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
assert len(hass.data[DATA_DISPATCHER][delete_signal]) == 1
assert len(hass.data[DATA_DISPATCHER][update_signal]) == 1
# Simulate an update - empty data, removes all entities
mock_feed.return_value.update.return_value = "ERROR", None
async_fire_time_changed(hass, utcnow + SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 0
assert len(hass.data[DATA_DISPATCHER][delete_signal]) == 0
assert len(hass.data[DATA_DISPATCHER][update_signal]) == 0
# Simulate an update - 1 entry
mock_feed.return_value.update.return_value = "OK", [mock_entry_1]
async_fire_time_changed(hass, utcnow + 2 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
assert len(hass.data[DATA_DISPATCHER][delete_signal]) == 1
assert len(hass.data[DATA_DISPATCHER][update_signal]) == 1
# Simulate an update - 1 entry
mock_feed.return_value.update.return_value = "OK", [mock_entry_1]
async_fire_time_changed(hass, utcnow + 3 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
assert len(hass.data[DATA_DISPATCHER][delete_signal]) == 1
assert len(hass.data[DATA_DISPATCHER][update_signal]) == 1
# Simulate an update - empty data, removes all entities
mock_feed.return_value.update.return_value = "ERROR", None
async_fire_time_changed(hass, utcnow + 4 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 0
# Ensure that delete and update signal targets are now empty.
assert len(hass.data[DATA_DISPATCHER][delete_signal]) == 0
assert len(hass.data[DATA_DISPATCHER][update_signal]) == 0
| 40.953307 | 87 | 0.639145 | from homeassistant.components import geo_location
from homeassistant.components.geo_json_events.geo_location import (
ATTR_EXTERNAL_ID,
SCAN_INTERVAL,
)
from homeassistant.components.geo_location import ATTR_SOURCE
from homeassistant.const import (
ATTR_FRIENDLY_NAME,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
CONF_URL,
EVENT_HOMEASSISTANT_START,
LENGTH_KILOMETERS,
)
from homeassistant.helpers.dispatcher import DATA_DISPATCHER
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import MagicMock, call, patch
from tests.common import assert_setup_component, async_fire_time_changed
URL = "http://geo.json.local/geo_json_events.json"
CONFIG = {
geo_location.DOMAIN: [
{"platform": "geo_json_events", CONF_URL: URL, CONF_RADIUS: 200}
]
}
CONFIG_WITH_CUSTOM_LOCATION = {
geo_location.DOMAIN: [
{
"platform": "geo_json_events",
CONF_URL: URL,
CONF_RADIUS: 200,
CONF_LATITUDE: 15.1,
CONF_LONGITUDE: 25.2,
}
]
}
def _generate_mock_feed_entry(external_id, title, distance_to_home, coordinates):
feed_entry = MagicMock()
feed_entry.external_id = external_id
feed_entry.title = title
feed_entry.distance_to_home = distance_to_home
feed_entry.coordinates = coordinates
return feed_entry
async def test_setup(hass):
mock_entry_1 = _generate_mock_feed_entry("1234", "Title 1", 15.5, (-31.0, 150.0))
mock_entry_2 = _generate_mock_feed_entry("2345", "Title 2", 20.5, (-31.1, 150.1))
mock_entry_3 = _generate_mock_feed_entry("3456", "Title 3", 25.5, (-31.2, 150.2))
mock_entry_4 = _generate_mock_feed_entry("4567", "Title 4", 12.5, (-31.3, 150.3))
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"geojson_client.generic_feed.GenericFeed"
) as mock_feed:
mock_feed.return_value.update.return_value = (
"OK",
[mock_entry_1, mock_entry_2, mock_entry_3],
)
with assert_setup_component(1, geo_location.DOMAIN):
assert await async_setup_component(hass, geo_location.DOMAIN, CONFIG)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
state = hass.states.get("geo_location.title_1")
assert state is not None
assert state.name == "Title 1"
assert state.attributes == {
ATTR_EXTERNAL_ID: "1234",
ATTR_LATITUDE: -31.0,
ATTR_LONGITUDE: 150.0,
ATTR_FRIENDLY_NAME: "Title 1",
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
ATTR_SOURCE: "geo_json_events",
}
assert round(abs(float(state.state) - 15.5), 7) == 0
state = hass.states.get("geo_location.title_2")
assert state is not None
assert state.name == "Title 2"
assert state.attributes == {
ATTR_EXTERNAL_ID: "2345",
ATTR_LATITUDE: -31.1,
ATTR_LONGITUDE: 150.1,
ATTR_FRIENDLY_NAME: "Title 2",
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
ATTR_SOURCE: "geo_json_events",
}
assert round(abs(float(state.state) - 20.5), 7) == 0
state = hass.states.get("geo_location.title_3")
assert state is not None
assert state.name == "Title 3"
assert state.attributes == {
ATTR_EXTERNAL_ID: "3456",
ATTR_LATITUDE: -31.2,
ATTR_LONGITUDE: 150.2,
ATTR_FRIENDLY_NAME: "Title 3",
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
ATTR_SOURCE: "geo_json_events",
}
assert round(abs(float(state.state) - 25.5), 7) == 0
mock_feed.return_value.update.return_value = (
"OK",
[mock_entry_1, mock_entry_4, mock_entry_3],
)
async_fire_time_changed(hass, utcnow + SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
mock_feed.return_value.update.return_value = "OK_NO_DATA", None
async_fire_time_changed(hass, utcnow + 2 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
mock_feed.return_value.update.return_value = "ERROR", None
async_fire_time_changed(hass, utcnow + 3 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 0
async def test_setup_with_custom_location(hass):
mock_entry_1 = _generate_mock_feed_entry("1234", "Title 1", 2000.5, (-31.1, 150.1))
with patch("geojson_client.generic_feed.GenericFeed") as mock_feed:
mock_feed.return_value.update.return_value = "OK", [mock_entry_1]
with assert_setup_component(1, geo_location.DOMAIN):
assert await async_setup_component(
hass, geo_location.DOMAIN, CONFIG_WITH_CUSTOM_LOCATION
)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
assert mock_feed.call_args == call((15.1, 25.2), URL, filter_radius=200.0)
async def test_setup_race_condition(hass):
mock_entry_1 = _generate_mock_feed_entry("1234", "Title 1", 15.5, (-31.0, 150.0))
delete_signal = "geo_json_events_delete_1234"
update_signal = "geo_json_events_update_1234"
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"geojson_client.generic_feed.GenericFeed"
) as mock_feed:
with assert_setup_component(1, geo_location.DOMAIN):
assert await async_setup_component(hass, geo_location.DOMAIN, CONFIG)
await hass.async_block_till_done()
mock_feed.return_value.update.return_value = "OK", [mock_entry_1]
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
assert len(hass.data[DATA_DISPATCHER][delete_signal]) == 1
assert len(hass.data[DATA_DISPATCHER][update_signal]) == 1
mock_feed.return_value.update.return_value = "ERROR", None
async_fire_time_changed(hass, utcnow + SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 0
assert len(hass.data[DATA_DISPATCHER][delete_signal]) == 0
assert len(hass.data[DATA_DISPATCHER][update_signal]) == 0
mock_feed.return_value.update.return_value = "OK", [mock_entry_1]
async_fire_time_changed(hass, utcnow + 2 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
assert len(hass.data[DATA_DISPATCHER][delete_signal]) == 1
assert len(hass.data[DATA_DISPATCHER][update_signal]) == 1
mock_feed.return_value.update.return_value = "OK", [mock_entry_1]
async_fire_time_changed(hass, utcnow + 3 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
assert len(hass.data[DATA_DISPATCHER][delete_signal]) == 1
assert len(hass.data[DATA_DISPATCHER][update_signal]) == 1
mock_feed.return_value.update.return_value = "ERROR", None
async_fire_time_changed(hass, utcnow + 4 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 0
assert len(hass.data[DATA_DISPATCHER][delete_signal]) == 0
assert len(hass.data[DATA_DISPATCHER][update_signal]) == 0
| true | true |
f73eaa3a0a84cb23f0f313df5d2591c453441532 | 374 | py | Python | TelegramSpider/database.py | panchaoxin/telegram-crawler | 73d5ad62bf73027a9ca42699a87e865e5db3f589 | [
"MIT"
] | 9 | 2019-11-01T01:54:26.000Z | 2022-02-22T16:48:15.000Z | TelegramSpider/database.py | zestcode/telegram-crawler | 73d5ad62bf73027a9ca42699a87e865e5db3f589 | [
"MIT"
] | 1 | 2021-10-16T12:34:00.000Z | 2021-10-16T12:34:00.000Z | TelegramSpider/database.py | zestcode/telegram-crawler | 73d5ad62bf73027a9ca42699a87e865e5db3f589 | [
"MIT"
] | 3 | 2020-07-14T11:31:21.000Z | 2021-02-04T12:46:51.000Z | # -*- coding: utf-8 -*-
import pymysql
MYSQL_HOST = 'localhost'
MYSQL_DB = 'telegram'
MYSQL_USER = 'root'
MYSQL_PASS = '123456'
connection = pymysql.connect(host=MYSQL_HOST, user=MYSQL_USER,
password=MYSQL_PASS, db=MYSQL_DB,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
| 26.714286 | 68 | 0.580214 |
import pymysql
MYSQL_HOST = 'localhost'
MYSQL_DB = 'telegram'
MYSQL_USER = 'root'
MYSQL_PASS = '123456'
connection = pymysql.connect(host=MYSQL_HOST, user=MYSQL_USER,
password=MYSQL_PASS, db=MYSQL_DB,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
| true | true |
f73eab6ea9dc15756d5e1e1db1880883de7fd011 | 18,584 | py | Python | back-end/tests/test_api.py | JAYqq/MonGo | e33c9f62c2cf494af2b2d33408853294f3aed168 | [
"MIT"
] | 1 | 2019-03-26T04:44:59.000Z | 2019-03-26T04:44:59.000Z | back-end/tests/test_api.py | JAYqq/MonGo | e33c9f62c2cf494af2b2d33408853294f3aed168 | [
"MIT"
] | 5 | 2020-02-12T13:32:08.000Z | 2021-06-02T00:27:16.000Z | back-end/tests/test_api.py | JAYqq/MonGo | e33c9f62c2cf494af2b2d33408853294f3aed168 | [
"MIT"
] | null | null | null | from base64 import b64encode
from datetime import datetime,timedelta
import json
import re
import unittest
from app import create_app,db
from app.models import User,Post
from tests import TestConfig
class APITestCase(unittest.TestCase):
'''测试API'''
def setUp(self):
self.app = create_app(TestConfig) # 创建Flask应用
self.app_context = self.app.app_context() # 激活(或推送)Flask应用上下文
self.app_context.push()
db.create_all() # 通过SQLAlchemy来使用SQLite内存数据库,db.create_all()快速创建所有的数据库表
self.client=self.app.test_client() # Flask内建的测试客户端,模拟浏览器行为
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_404(self):
#测试不存在的API
response=self.client.get('/api/wrong/url')
self.assertEqual(response.status_code,404)
json_response=json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['error'],'Not Found')
def get_basic_auth_headers(self,username,password):
'''创建Basic Auth认证的headers'''
return{
'Authorization':'Basic '+b64encode(
(username+':'+password).encode('utf-8')).decode('utf-8'),
'Accept':'application/json',
'Content-Type':'application/json'
}
def get_token_auth_headers(self,username,password):
'''创建JSON Web Token认证的headers'''
headers=self.get_basic_auth_headers(username,password)
response=self.client.post('/api/tokens',headers=headers)
self.assertEqual(response.status_code,200)
json_response=json.loads(response.get_data(as_text=True))
self.assertIsNotNone(json_response.get('token'))
token=json_response['token']
return{
'Authorization': 'Bearer ' + token,
'Accept': 'application/json',
'Content-Type': 'application/json'
}
def test_get_token(self):
# 测试用户登录,即获取JWT,需要输入正确的用户名和密码,通过Basic Auth之后发放JWT令牌
# 首先创建一个测试用户
u = User(username='john', email='john@163.com')
u.set_password('123')
db.session.add(u)
db.session.commit()
#输入错误用户名密码
headers=self.get_basic_auth_headers('john','456')
response=self.client.post('/api/tokens',headers=headers)
self.assertEqual(response.status_code,401)
#输入正确的
headers=self.get_basic_auth_headers('john','123')
response=self.client.post('/api/tokens',headers=headers)
self.assertEqual(response.status_code,200)
json_response=json.loads(response.get_data(as_text=True))
self.assertIsNotNone(json_response.get('token'))
self.assertTrue(re.match(r'(.+)\.(.+)\.(.+)', json_response.get('token')))
def test_not_attach_jwt(self):
# 测试请求头Authorization中没有附带JWT时,会返回401错误
response=self.client.get('/api/users')
self.assertEqual(response.status_code,401)
def test_attach_jwt(self):
# 测试请求头Authorization中没有附带JWT时,会返回401错误
u=User(username="mike",email="mike@sina.com")
u.set_password("123")
db.session.add(u)
db.session.commit()
#把JWT加到请求头中
headers=self.get_token_auth_headers('mike','123')
response=self.client.get('/api/users',headers=headers)
self.assertEqual(response.status_code,200)
def test_anonymous(self):
#检测那些不需要认证的API
response=self.client.get('/api/posts')
self.assertEqual(response.status_code,200)
#用户相关API
def get_api_headers(self):
return {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
def test_create_user(self):
# 测试用户注册
headers = self.get_api_headers()
# 1. 用户不传入任何必须的参数时
data = json.dumps({})
response = self.client.post('/api/users/', headers=headers, data=data)
self.assertEqual(response.status_code, 400)
# 2. 缺少 username 时
data = json.dumps({'email': 'john@163.com', 'password': '123'})
response = self.client.post('/api/users/', headers=headers, data=data)
self.assertEqual(response.status_code, 400)
# 3. 缺少 email 时
data = json.dumps({'username': 'john', 'password': '123'})
response = self.client.post('/api/users/', headers=headers, data=data)
self.assertEqual(response.status_code, 400)
# 4. 缺少 password 时
data = json.dumps({'username': 'john', 'email': 'john@163.com'})
response = self.client.post('/api/users/', headers=headers, data=data)
self.assertEqual(response.status_code, 400)
# 5. username 或者 email 已存在时
u = User(username='john', email='john@163.com')
u.set_password('123')
db.session.add(u)
db.session.commit()
data = json.dumps({'username': 'john', 'email': 'john@163.com', 'password': '123'})
response = self.client.post('/api/users/', headers=headers, data=data)
self.assertEqual(response.status_code, 400)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['message']['username'], 'Please use a different username.')
# print(json_response['message']['email'],"---------")
self.assertEqual(json_response['message']['email'], 'Please use a different email address.')
# 6. 正确提供参数时
data = json.dumps({'username': 'david', 'email': 'david@163.com', 'password': '123'})
response = self.client.post('/api/users/', headers=headers, data=data)
self.assertEqual(response.status_code, 201)
url = response.headers.get('Location')
self.assertIsNotNone(url)
def test_get_users(self):
# 测试获取用户列表
# 首先创建几个测试用户
u1 = User(username='john', email='john@163.com')
u1.set_password('123')
u2 = User(username='david', email='david@163.com')
u2.set_password('123')
u3 = User(username='susan', email='susan@163.com')
u3.set_password('123')
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
db.session.commit()
headers=self.get_token_auth_headers('john','123')
response=self.client.get('/api/users',headers=headers)
self.assertEqual(response.status_code,200)
# 判断返回的用户集合中包含刚创建的这个用户
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['_meta']['total_items'],3)
self.assertIsNotNone(json_response.get('items'))
self.assertEqual(json_response['items'][0]['username'], 'john')
self.assertEqual(json_response['items'][1]['username'], 'david')
self.assertEqual(json_response['items'][2]['username'], 'susan')
def test_get_user(self):
# 测试获取一个用户
# 首先创建一个测试用户
headers = self.get_api_headers()
data = json.dumps({'username': 'john', 'email': 'john@163.com', 'password': '123'})
response = self.client.post('/api/users/', headers=headers, data=data)
self.assertEqual(response.status_code, 201)
url = response.headers.get('Location')
self.assertIsNotNone(url)
# 附带JWT到请求头中
headers = self.get_token_auth_headers('john', '123')
response = self.client.get(url, headers=headers)
self.assertEqual(response.status_code, 200)
# 判断返回的用户是否就是刚创建的这个用户
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual('http://localhost' + json_response['_links']['self'], url)
self.assertEqual(json_response['username'], 'john')
# 请求的自己这个用户,所以响应中会包含 email 字段,不包含 is_following 字段
self.assertIsNotNone(json_response['email'])
self.assertIsNone(json_response.get('is_following'))
def test_update_user(self):
#测试修改用户
headers = self.get_api_headers()
data = json.dumps({'username': 'john', 'email': 'john@163.com', 'password': '123'})
response = self.client.post('/api/users/', headers=headers, data=data)
self.assertEqual(response.status_code, 201)
url = response.headers.get('Location')
self.assertIsNotNone(url)
# 附带JWT到请求头中
headers = self.get_token_auth_headers('john', '123')
# 1. 用户不传入任何必须的参数时(可以传入空字符串,但不能是空的 JSON)
data = json.dumps({})
response = self.client.put(url, headers=headers, data=data)
self.assertEqual(response.status_code, 400)
# 2. 如果传入了 username 或者 email,但是不是合法的数据时
data = json.dumps({'username': '', 'email': '1@1'})
response = self.client.put(url, headers=headers, data=data)
self.assertEqual(response.status_code, 400)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['message']['username'], 'Please provide a valid username.')
self.assertEqual(json_response['message']['email'], 'Please provide a valid email address.')
# 3. 如果传入了 username 或者 email,但已存在时
u = User(username='david', email='david@163.com')
u.set_password('123')
db.session.add(u)
db.session.commit()
data = json.dumps({'username': 'david', 'email': 'david@163.com'})
response = self.client.put(url, headers=headers, data=data)
self.assertEqual(response.status_code, 400)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['message']['username'], 'Please use a different username.')
self.assertEqual(json_response['message']['email'], 'Please use a different email address.')
# 4. 正确传入要修改的字段值时
data = json.dumps({'about_me': 'I love DevOps'})
response = self.client.put(url, headers=headers, data=data)
self.assertEqual(response.status_code, 200)
# 判断返回的信息中时候已更改了about_me字段
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual('http://localhost' + json_response['_links']['self'], url)
self.assertEqual(json_response['about_me'], 'I love DevOps')
def test_delete_user(self):
# 测试删除用户
u1 = User(username='john', email='john@163.com')
u1.set_password('123')
u2 = User(username='david', email='david@163.com')
u2.set_password('123')
u3 = User(username='susan', email='susan@163.com')
u3.set_password('123')
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
db.session.commit()
headers=self.get_token_auth_headers('john','123')
response=self.client.delete('/api/users/1',headers=headers)
self.assertEqual(response.status_code,204)
self.assertEqual(response.get_data(as_text=True), '')
#删除别人的账户是不允许的
headers = self.get_token_auth_headers('david', '123')
response = self.client.delete('/api/users/3', headers=headers)
self.assertEqual(response.status_code, 403)
def test_follow(self):
# 测试关注其它用户
u1 = User(username='john', email='john@163.com')
u1.set_password('123')
u2 = User(username='david', email='david@163.com')
u2.set_password('123')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
#不允许关注自己
headers=self.get_token_auth_headers('john','123')
response=self.client.get('/api/follow/1',headers=headers)
self.assertEqual(response.status_code,400)
json_response=json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['message'], 'You cannot follow yourself!')
# 2. 成功关注还没有关注过的人
response = self.client.get('/api/follow/2', headers=headers)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['message'], 'You are now following david')
# 3. 已经关注过的人,你不能重复关注
response = self.client.get('/api/follow/2', headers=headers)
self.assertEqual(response.status_code, 400)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['message'], 'You have already followed that user!')
def test_unfollow(self):
# 测试取消关注其它用户
u1 = User(username='john', email='john@163.com')
u1.set_password('123')
u2 = User(username='david', email='david@163.com')
u2.set_password('123')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
headers = self.get_token_auth_headers('john', '123')
# 1. 不允许取消关注自己
response = self.client.get('/api/unfollow/1', headers=headers)
self.assertEqual(response.status_code, 400)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['message'], 'You cannot unfollow yourself!')
# 2. 不允许取消关注还没有关注过的人
response = self.client.get('/api/unfollow/2', headers=headers)
self.assertEqual(response.status_code, 400)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['message'], 'You have not followed this user!')
# 3. 成功取消关注
# 先关注
response = self.client.get('/api/follow/2', headers=headers)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['message'], 'You are now following david')
# 开始取消关注
response = self.client.get('/api/unfollow/2', headers=headers)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['message'], 'You are not following david anymore')
def test_get_followeds(self):
# 测试获取你已关注的人的列表
u1 = User(username='john', email='john@163.com')
u1.set_password('123')
u2 = User(username='david', email='david@163.com')
u2.set_password('123')
u3 = User(username='susan', email='susan@163.com')
u3.set_password('123')
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
db.session.commit()
u1.follow(u2)
u1.follow(u3)
headers = self.get_token_auth_headers('john', '123')
response = self.client.get('/api/users/1/followeds/', headers=headers)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['items'][0]['username'], 'david')
self.assertEqual(json_response['items'][1]['username'], 'susan')
def test_get_followers(self):
# 测试获取你的粉丝列表
u1 = User(username='john', email='john@163.com')
u1.set_password('123')
u2 = User(username='david', email='david@163.com')
u2.set_password('123')
u3 = User(username='susan', email='susan@163.com')
u3.set_password('123')
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
db.session.commit()
u2.follow(u1)
u3.follow(u1)
headers = self.get_token_auth_headers('john', '123')
response = self.client.get('/api/users/1/followers/', headers=headers)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['items'][0]['username'], 'david')
self.assertEqual(json_response['items'][1]['username'], 'susan')
def test_get_user_posts(self):
# 测试返回用户自己的博客列表
# 创建用户
u = User(username='john', email='john@163.com')
u.set_password('123')
db.session.add(u)
# 创建几篇博客
now = datetime.utcnow()
p1 = Post(title='first post from john', body='post from john', author=u,
timestamp=now + timedelta(seconds=1))
p2 = Post(title='second post from john', body='post from john', author=u,
timestamp=now + timedelta(seconds=4))
db.session.add_all([p1, p2])
db.session.commit()
headers = self.get_token_auth_headers('john', '123')
response = self.client.get('/api/users/1/posts/', headers=headers)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['_meta']['total_items'], 2)
self.assertIsNotNone(json_response.get('items'))
self.assertEqual(json_response['items'][0]['title'], 'second post from john') # 倒序排列
self.assertEqual(json_response['items'][1]['title'], 'first post from john')
def test_get_user_followed_posts(self):
# 测试返回你关注的人的博客列表
u1 = User(username='john', email='john@163.com')
u1.set_password('123')
u2 = User(username='david', email='david@163.com')
u2.set_password('123')
u3 = User(username='susan', email='susan@163.com')
u3.set_password('123')
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
db.session.commit()
u1.follow(u2)
u1.follow(u3)
now = datetime.utcnow()
p1 = Post(title='first post from david', body='post from david', author=u2,
timestamp=now + timedelta(seconds=1))
p2 = Post(title='second post from david', body='post from david', author=u2,
timestamp=now + timedelta(seconds=4))
p3 = Post(title='first post from susan', body='post from susan', author=u3,
timestamp=now + timedelta(seconds=3))
p4 = Post(title='second post from susan', body='post from susan', author=u3,
timestamp=now + timedelta(seconds=2))
db.session.add_all([p1, p2, p3, p4])
db.session.commit()
headers = self.get_token_auth_headers('john', '123')
response = self.client.get('/api/users/1/followeds-posts/', headers=headers)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['_meta']['total_items'], 4)
self.assertIsNotNone(json_response.get('items'))
self.assertEqual(json_response['items'][0]['title'], 'second post from david') # 倒序排列
self.assertEqual(json_response['items'][1]['title'], 'first post from susan')
self.assertEqual(json_response['items'][2]['title'], 'second post from susan')
self.assertEqual(json_response['items'][3]['title'], 'first post from david')
| 41.950339 | 100 | 0.632426 | from base64 import b64encode
from datetime import datetime,timedelta
import json
import re
import unittest
from app import create_app,db
from app.models import User,Post
from tests import TestConfig
class APITestCase(unittest.TestCase):
def setUp(self):
self.app = create_app(TestConfig)
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
self.client=self.app.test_client()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_404(self):
response=self.client.get('/api/wrong/url')
self.assertEqual(response.status_code,404)
json_response=json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['error'],'Not Found')
def get_basic_auth_headers(self,username,password):
return{
'Authorization':'Basic '+b64encode(
(username+':'+password).encode('utf-8')).decode('utf-8'),
'Accept':'application/json',
'Content-Type':'application/json'
}
def get_token_auth_headers(self,username,password):
headers=self.get_basic_auth_headers(username,password)
response=self.client.post('/api/tokens',headers=headers)
self.assertEqual(response.status_code,200)
json_response=json.loads(response.get_data(as_text=True))
self.assertIsNotNone(json_response.get('token'))
token=json_response['token']
return{
'Authorization': 'Bearer ' + token,
'Accept': 'application/json',
'Content-Type': 'application/json'
}
def test_get_token(self):
u = User(username='john', email='john@163.com')
u.set_password('123')
db.session.add(u)
db.session.commit()
headers=self.get_basic_auth_headers('john','456')
response=self.client.post('/api/tokens',headers=headers)
self.assertEqual(response.status_code,401)
headers=self.get_basic_auth_headers('john','123')
response=self.client.post('/api/tokens',headers=headers)
self.assertEqual(response.status_code,200)
json_response=json.loads(response.get_data(as_text=True))
self.assertIsNotNone(json_response.get('token'))
self.assertTrue(re.match(r'(.+)\.(.+)\.(.+)', json_response.get('token')))
def test_not_attach_jwt(self):
response=self.client.get('/api/users')
self.assertEqual(response.status_code,401)
def test_attach_jwt(self):
u=User(username="mike",email="mike@sina.com")
u.set_password("123")
db.session.add(u)
db.session.commit()
headers=self.get_token_auth_headers('mike','123')
response=self.client.get('/api/users',headers=headers)
self.assertEqual(response.status_code,200)
def test_anonymous(self):
response=self.client.get('/api/posts')
self.assertEqual(response.status_code,200)
def get_api_headers(self):
return {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
def test_create_user(self):
headers = self.get_api_headers()
data = json.dumps({})
response = self.client.post('/api/users/', headers=headers, data=data)
self.assertEqual(response.status_code, 400)
data = json.dumps({'email': 'john@163.com', 'password': '123'})
response = self.client.post('/api/users/', headers=headers, data=data)
self.assertEqual(response.status_code, 400)
data = json.dumps({'username': 'john', 'password': '123'})
response = self.client.post('/api/users/', headers=headers, data=data)
self.assertEqual(response.status_code, 400)
data = json.dumps({'username': 'john', 'email': 'john@163.com'})
response = self.client.post('/api/users/', headers=headers, data=data)
self.assertEqual(response.status_code, 400)
u = User(username='john', email='john@163.com')
u.set_password('123')
db.session.add(u)
db.session.commit()
data = json.dumps({'username': 'john', 'email': 'john@163.com', 'password': '123'})
response = self.client.post('/api/users/', headers=headers, data=data)
self.assertEqual(response.status_code, 400)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['message']['username'], 'Please use a different username.')
self.assertEqual(json_response['message']['email'], 'Please use a different email address.')
data = json.dumps({'username': 'david', 'email': 'david@163.com', 'password': '123'})
response = self.client.post('/api/users/', headers=headers, data=data)
self.assertEqual(response.status_code, 201)
url = response.headers.get('Location')
self.assertIsNotNone(url)
def test_get_users(self):
u1 = User(username='john', email='john@163.com')
u1.set_password('123')
u2 = User(username='david', email='david@163.com')
u2.set_password('123')
u3 = User(username='susan', email='susan@163.com')
u3.set_password('123')
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
db.session.commit()
headers=self.get_token_auth_headers('john','123')
response=self.client.get('/api/users',headers=headers)
self.assertEqual(response.status_code,200)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['_meta']['total_items'],3)
self.assertIsNotNone(json_response.get('items'))
self.assertEqual(json_response['items'][0]['username'], 'john')
self.assertEqual(json_response['items'][1]['username'], 'david')
self.assertEqual(json_response['items'][2]['username'], 'susan')
def test_get_user(self):
headers = self.get_api_headers()
data = json.dumps({'username': 'john', 'email': 'john@163.com', 'password': '123'})
response = self.client.post('/api/users/', headers=headers, data=data)
self.assertEqual(response.status_code, 201)
url = response.headers.get('Location')
self.assertIsNotNone(url)
headers = self.get_token_auth_headers('john', '123')
response = self.client.get(url, headers=headers)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual('http://localhost' + json_response['_links']['self'], url)
self.assertEqual(json_response['username'], 'john')
self.assertIsNotNone(json_response['email'])
self.assertIsNone(json_response.get('is_following'))
def test_update_user(self):
headers = self.get_api_headers()
data = json.dumps({'username': 'john', 'email': 'john@163.com', 'password': '123'})
response = self.client.post('/api/users/', headers=headers, data=data)
self.assertEqual(response.status_code, 201)
url = response.headers.get('Location')
self.assertIsNotNone(url)
headers = self.get_token_auth_headers('john', '123')
data = json.dumps({})
response = self.client.put(url, headers=headers, data=data)
self.assertEqual(response.status_code, 400)
data = json.dumps({'username': '', 'email': '1@1'})
response = self.client.put(url, headers=headers, data=data)
self.assertEqual(response.status_code, 400)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['message']['username'], 'Please provide a valid username.')
self.assertEqual(json_response['message']['email'], 'Please provide a valid email address.')
u = User(username='david', email='david@163.com')
u.set_password('123')
db.session.add(u)
db.session.commit()
data = json.dumps({'username': 'david', 'email': 'david@163.com'})
response = self.client.put(url, headers=headers, data=data)
self.assertEqual(response.status_code, 400)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['message']['username'], 'Please use a different username.')
self.assertEqual(json_response['message']['email'], 'Please use a different email address.')
data = json.dumps({'about_me': 'I love DevOps'})
response = self.client.put(url, headers=headers, data=data)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual('http://localhost' + json_response['_links']['self'], url)
self.assertEqual(json_response['about_me'], 'I love DevOps')
def test_delete_user(self):
u1 = User(username='john', email='john@163.com')
u1.set_password('123')
u2 = User(username='david', email='david@163.com')
u2.set_password('123')
u3 = User(username='susan', email='susan@163.com')
u3.set_password('123')
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
db.session.commit()
headers=self.get_token_auth_headers('john','123')
response=self.client.delete('/api/users/1',headers=headers)
self.assertEqual(response.status_code,204)
self.assertEqual(response.get_data(as_text=True), '')
headers = self.get_token_auth_headers('david', '123')
response = self.client.delete('/api/users/3', headers=headers)
self.assertEqual(response.status_code, 403)
def test_follow(self):
u1 = User(username='john', email='john@163.com')
u1.set_password('123')
u2 = User(username='david', email='david@163.com')
u2.set_password('123')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
headers=self.get_token_auth_headers('john','123')
response=self.client.get('/api/follow/1',headers=headers)
self.assertEqual(response.status_code,400)
json_response=json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['message'], 'You cannot follow yourself!')
response = self.client.get('/api/follow/2', headers=headers)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['message'], 'You are now following david')
response = self.client.get('/api/follow/2', headers=headers)
self.assertEqual(response.status_code, 400)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['message'], 'You have already followed that user!')
def test_unfollow(self):
u1 = User(username='john', email='john@163.com')
u1.set_password('123')
u2 = User(username='david', email='david@163.com')
u2.set_password('123')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
headers = self.get_token_auth_headers('john', '123')
response = self.client.get('/api/unfollow/1', headers=headers)
self.assertEqual(response.status_code, 400)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['message'], 'You cannot unfollow yourself!')
response = self.client.get('/api/unfollow/2', headers=headers)
self.assertEqual(response.status_code, 400)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['message'], 'You have not followed this user!')
response = self.client.get('/api/follow/2', headers=headers)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['message'], 'You are now following david')
response = self.client.get('/api/unfollow/2', headers=headers)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['message'], 'You are not following david anymore')
def test_get_followeds(self):
u1 = User(username='john', email='john@163.com')
u1.set_password('123')
u2 = User(username='david', email='david@163.com')
u2.set_password('123')
u3 = User(username='susan', email='susan@163.com')
u3.set_password('123')
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
db.session.commit()
u1.follow(u2)
u1.follow(u3)
headers = self.get_token_auth_headers('john', '123')
response = self.client.get('/api/users/1/followeds/', headers=headers)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['items'][0]['username'], 'david')
self.assertEqual(json_response['items'][1]['username'], 'susan')
def test_get_followers(self):
u1 = User(username='john', email='john@163.com')
u1.set_password('123')
u2 = User(username='david', email='david@163.com')
u2.set_password('123')
u3 = User(username='susan', email='susan@163.com')
u3.set_password('123')
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
db.session.commit()
u2.follow(u1)
u3.follow(u1)
headers = self.get_token_auth_headers('john', '123')
response = self.client.get('/api/users/1/followers/', headers=headers)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['items'][0]['username'], 'david')
self.assertEqual(json_response['items'][1]['username'], 'susan')
def test_get_user_posts(self):
u = User(username='john', email='john@163.com')
u.set_password('123')
db.session.add(u)
now = datetime.utcnow()
p1 = Post(title='first post from john', body='post from john', author=u,
timestamp=now + timedelta(seconds=1))
p2 = Post(title='second post from john', body='post from john', author=u,
timestamp=now + timedelta(seconds=4))
db.session.add_all([p1, p2])
db.session.commit()
headers = self.get_token_auth_headers('john', '123')
response = self.client.get('/api/users/1/posts/', headers=headers)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['_meta']['total_items'], 2)
self.assertIsNotNone(json_response.get('items'))
self.assertEqual(json_response['items'][0]['title'], 'second post from john')
self.assertEqual(json_response['items'][1]['title'], 'first post from john')
def test_get_user_followed_posts(self):
u1 = User(username='john', email='john@163.com')
u1.set_password('123')
u2 = User(username='david', email='david@163.com')
u2.set_password('123')
u3 = User(username='susan', email='susan@163.com')
u3.set_password('123')
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
db.session.commit()
u1.follow(u2)
u1.follow(u3)
now = datetime.utcnow()
p1 = Post(title='first post from david', body='post from david', author=u2,
timestamp=now + timedelta(seconds=1))
p2 = Post(title='second post from david', body='post from david', author=u2,
timestamp=now + timedelta(seconds=4))
p3 = Post(title='first post from susan', body='post from susan', author=u3,
timestamp=now + timedelta(seconds=3))
p4 = Post(title='second post from susan', body='post from susan', author=u3,
timestamp=now + timedelta(seconds=2))
db.session.add_all([p1, p2, p3, p4])
db.session.commit()
headers = self.get_token_auth_headers('john', '123')
response = self.client.get('/api/users/1/followeds-posts/', headers=headers)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.get_data(as_text=True))
self.assertEqual(json_response['_meta']['total_items'], 4)
self.assertIsNotNone(json_response.get('items'))
self.assertEqual(json_response['items'][0]['title'], 'second post from david')
self.assertEqual(json_response['items'][1]['title'], 'first post from susan')
self.assertEqual(json_response['items'][2]['title'], 'second post from susan')
self.assertEqual(json_response['items'][3]['title'], 'first post from david')
| true | true |
f73eabfea7038a598748118e033e9eb6299c0cec | 4,963 | py | Python | app/utils.py | jkereako/flask-base-app | c83ef1748eaf253ef3da0efb83af0a81181d3d97 | [
"MIT"
] | null | null | null | app/utils.py | jkereako/flask-base-app | c83ef1748eaf253ef3da0efb83af0a81181d3d97 | [
"MIT"
] | null | null | null | app/utils.py | jkereako/flask-base-app | c83ef1748eaf253ef3da0efb83af0a81181d3d97 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utils
~~~~~
Utility methods.
I'm including this file in the skeleton because it contains methods I've
found useful.
The goal is to keep this file as lean as possible.
:author: Jeff Kereakoglow
:date: 2014-11-05
:copyright: (c) 2015 by Alexis Digital
:license: MIT, see LICENSE for more details
"""
import re
import logging
from hashlib import sha224
from datetime import date, datetime
from dateutil.parser import parse
from unicodedata import normalize
from flask import request
from app import app, cache
try:
compat_chr = unichr # Python 2
except NameError:
compat_chr = chr
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def logcat(message):
"""
Helper function which logs messages to the terminal.
Without this helper, the developer would have to write:
import logging
from nesn_api import app
app.logger.debug(message)
Whereas, with this helper, it has been reduced to:
from nesn_api.utils import logcat
logcat(message)
Coming from Android development, it is a lot easier to remember
logcat than it is to remember app.logger.debug
:param message: The log message
:type message: str
"""
app.logger.debug(message)
def query_string_arg_to_bool(arg):
"""
Converts various string representations of Boolean values to an actual
Boolean object.
:param arg: The string to convert
:type arg: str
:returns: The estimated Boolean representation
:rtype: Boolean
"""
param = request.args.get(arg)
if param is None:
return False
elif param.lower() in ("yes", 'y', "true", "t", "1"):
return True
return False
def prepare_query_string(args):
"""
Creates a simple query string.
This is an alternative to Requests's parameter feature. Requests
strips stuff out and coverts everything. This does a simple join,
preserving everything.
:param args: The data which is to be prepared
:type args: dict
:returns: A formatted query string
:rtype: string
"""
return '?' + '&'.join(["%s=%s" % (key, value) for (key, value) in args.items()])
def fetch_cached_data(args=None):
"""
Retrieves a cache object when given an optional cache key.
Because most cache keys within this app are URL dependent, the
code which retrieves the cache has been refactored here to maximize
consistency.
:param cache_key: The identifier for the cache object. This must be unique
:type cache_key: str
:returns: A dictionary of JSON data
:rtype: dict
"""
cache_key = request.base_url
if args:
cache_key += args
cache_key = sha224(cache_key).hexdigest()
rv = cache.get(cache_key)
# if rv is not None:
# rv = "<!-- served from cache -->" + rv
return rv
def cache_data(data, args=None, timeout=None):
"""
Stores data in the application cache using the base URL as the main
cache key.
To prevent all URLs from being cached, such as
/teams/nba?this_is_not_a_real_param=2
The base URL along with optional arguments are used. This ensures
that URLS passed with arbitrary query string arguments will not
break the cache.
Because most cache keys within this app are URL dependent, the
code which stores the cache has been refactored here to maximize
consistency.
:param data: The data object to cache
:type data: dict
:param cache_key: The identifier for the cache object. This must be unique
:type cache_key: str
:param timeout: The expiry for the cache
:type timeout: int
:returns: None
:rtype: None
"""
cache_key = request.base_url
if args:
cache_key += args
cache_key = sha224(cache_key).hexdigest()
timeout = app.config["CACHE_TIMEOUT"] if timeout is None else timeout
cache.set(cache_key, data, timeout)
#
def slugify(text, delimiter=u'-'):
"""
Generates an slightly worse ASCII-only slug.
:see http://flask.pocoo.org/snippets/5/
:returns: A URL slug
:rtype: str
"""
result = []
for word in _punct_re.split(text.lower()):
word = normalize("NFKD",word).encode("ascii", "ignore")
if word:
result.append(word)
return unicode(delimiter.join(result))
def current_time():
"""
Returns a UNIX timestamp for the current time.
:returns: The current timestamp as a string
:rtype: str
"""
return datetime.now().strftime("%s")
def timestamp_from_string(date_string):
"""
Returns a UNIX timestamp for a provided date string.
This function was created with PHP's strtotime in mind. This may be
removed if it discovered that Python supports this natively. I'm
certain it does.
:returns: The current timestamp as a string
:rtype: str
"""
return parse(date_string).strftime("%s")
| 24.815 | 84 | 0.667137 |
import re
import logging
from hashlib import sha224
from datetime import date, datetime
from dateutil.parser import parse
from unicodedata import normalize
from flask import request
from app import app, cache
try:
compat_chr = unichr
except NameError:
compat_chr = chr
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def logcat(message):
app.logger.debug(message)
def query_string_arg_to_bool(arg):
param = request.args.get(arg)
if param is None:
return False
elif param.lower() in ("yes", 'y', "true", "t", "1"):
return True
return False
def prepare_query_string(args):
return '?' + '&'.join(["%s=%s" % (key, value) for (key, value) in args.items()])
def fetch_cached_data(args=None):
cache_key = request.base_url
if args:
cache_key += args
cache_key = sha224(cache_key).hexdigest()
rv = cache.get(cache_key)
# if rv is not None:
# rv = "<!-- served from cache -->" + rv
return rv
def cache_data(data, args=None, timeout=None):
cache_key = request.base_url
if args:
cache_key += args
cache_key = sha224(cache_key).hexdigest()
timeout = app.config["CACHE_TIMEOUT"] if timeout is None else timeout
cache.set(cache_key, data, timeout)
#
def slugify(text, delimiter=u'-'):
result = []
for word in _punct_re.split(text.lower()):
word = normalize("NFKD",word).encode("ascii", "ignore")
if word:
result.append(word)
return unicode(delimiter.join(result))
def current_time():
return datetime.now().strftime("%s")
def timestamp_from_string(date_string):
return parse(date_string).strftime("%s")
| true | true |
f73eac5bf57fae7ffa78bb1afabfd0f7263f5bca | 3,748 | py | Python | utils/recalculator.py | Ausakura/gulag | 8a81d54a20ead735b234acd4acec550a5ce6f99c | [
"MIT"
] | null | null | null | utils/recalculator.py | Ausakura/gulag | 8a81d54a20ead735b234acd4acec550a5ce6f99c | [
"MIT"
] | null | null | null | utils/recalculator.py | Ausakura/gulag | 8a81d54a20ead735b234acd4acec550a5ce6f99c | [
"MIT"
] | 2 | 2022-01-19T15:18:33.000Z | 2022-03-28T12:02:13.000Z | # -*- coding: utf-8 -*-
import asyncio
import aiofiles
import aiohttp
import orjson
from pathlib import Path
from cmyui import log, Ansi
from constants.gamemodes import GameMode
from constants.mods import Mods
__all__ = 'PPCalculator',
BEATMAPS_PATH = Path.cwd() / '.data/osu'
class PPCalculator:
"""Asynchronously wraps the process of calculating difficulty in osu!."""
def __init__(self, map_id: int, **kwargs) -> None:
# NOTE: this constructor should not be called
# unless you are CERTAIN the map is on disk
# for normal usage, use the classmethods
self.file = f'.data/osu/{map_id}.osu'
self.mods = kwargs.get('mods', Mods.NOMOD)
self.combo = kwargs.get('combo', 0)
self.nmiss = kwargs.get('nmiss', 0)
self.mode = kwargs.get('mode', GameMode.vn_std)
self.acc = kwargs.get('acc', 100.00)
@staticmethod
async def get_from_osuapi(map_id: int, dest_path: Path) -> bool:
url = f'https://old.ppy.sh/osu/{map_id}'
async with aiohttp.ClientSession() as session:
async with session.get(url) as r:
if not r or r.status != 200:
log(f'Could not find map by id {map_id}!', Ansi.LRED)
return False
content = await r.read()
async with aiofiles.open(dest_path, 'wb') as f:
await f.write(content)
return True
@classmethod
async def get_file(cls, map_id: int) -> None:
path = BEATMAPS_PATH / f'{map_id}.osu'
# check if file exists on disk already
if not path.exists():
# not found on disk, try osu!api
if not await cls.get_from_osuapi(map_id, path):
# failed to find the map
return
# map is now on disk, return filepath.
return path
@classmethod
async def from_id(cls, map_id: int, **kwargs):
# ensure we have the file on disk for recalc
if not await cls.get_file(map_id):
return
return cls(map_id, **kwargs)
async def perform(self) -> tuple[float, float]:
"""Perform the calculations with the current state, returning (pp, sr)."""
# TODO: PLEASE rewrite this with c bindings,
# add ways to get specific stuff like aim pp
# for now, we'll generate a bash command and
# use subprocess to do the calculations (yikes).
cmd = [f'./oppai-ng/oppai {self.file}']
if self.mods: cmd.append(f'+{self.mods!r}')
if self.combo: cmd.append(f'{self.combo}x')
if self.nmiss: cmd.append(f'{self.nmiss}xM')
if self.acc: cmd.append(f'{self.acc:.4f}%')
if self.mode:
mode_vn = self.mode.as_vanilla
if mode_vn not in (0, 1):
# oppai-ng only supports std & taiko
# TODO: osu!catch & mania support
return
cmd.append(f'-m{mode_vn}')
if mode_vn == GameMode.vn_taiko:
cmd.append('-otaiko')
# XXX: could probably use binary to save a bit
# of time.. but in reality i should just write
# some bindings lmao this is so cursed overall
cmd.append('-ojson')
# join & run the command
pipe = asyncio.subprocess.PIPE
proc = await asyncio.create_subprocess_shell(
' '.join(cmd), stdout=pipe, stderr=pipe
)
stdout, _ = await proc.communicate() # stderr not needed
output = orjson.loads(stdout.decode())
if 'code' not in output or output['code'] != 200:
log(f"oppai-ng: {output['errstr']}", Ansi.LRED)
await proc.wait() # wait for exit
return output['pp'], output['stars']
| 32.310345 | 82 | 0.586446 |
import asyncio
import aiofiles
import aiohttp
import orjson
from pathlib import Path
from cmyui import log, Ansi
from constants.gamemodes import GameMode
from constants.mods import Mods
__all__ = 'PPCalculator',
BEATMAPS_PATH = Path.cwd() / '.data/osu'
class PPCalculator:
def __init__(self, map_id: int, **kwargs) -> None:
self.file = f'.data/osu/{map_id}.osu'
self.mods = kwargs.get('mods', Mods.NOMOD)
self.combo = kwargs.get('combo', 0)
self.nmiss = kwargs.get('nmiss', 0)
self.mode = kwargs.get('mode', GameMode.vn_std)
self.acc = kwargs.get('acc', 100.00)
@staticmethod
async def get_from_osuapi(map_id: int, dest_path: Path) -> bool:
url = f'https://old.ppy.sh/osu/{map_id}'
async with aiohttp.ClientSession() as session:
async with session.get(url) as r:
if not r or r.status != 200:
log(f'Could not find map by id {map_id}!', Ansi.LRED)
return False
content = await r.read()
async with aiofiles.open(dest_path, 'wb') as f:
await f.write(content)
return True
@classmethod
async def get_file(cls, map_id: int) -> None:
path = BEATMAPS_PATH / f'{map_id}.osu'
if not path.exists():
if not await cls.get_from_osuapi(map_id, path):
return
return path
@classmethod
async def from_id(cls, map_id: int, **kwargs):
if not await cls.get_file(map_id):
return
return cls(map_id, **kwargs)
async def perform(self) -> tuple[float, float]:
# use subprocess to do the calculations (yikes).
cmd = [f'./oppai-ng/oppai {self.file}']
if self.mods: cmd.append(f'+{self.mods!r}')
if self.combo: cmd.append(f'{self.combo}x')
if self.nmiss: cmd.append(f'{self.nmiss}xM')
if self.acc: cmd.append(f'{self.acc:.4f}%')
if self.mode:
mode_vn = self.mode.as_vanilla
if mode_vn not in (0, 1):
# oppai-ng only supports std & taiko
# TODO: osu!catch & mania support
return
cmd.append(f'-m{mode_vn}')
if mode_vn == GameMode.vn_taiko:
cmd.append('-otaiko')
# XXX: could probably use binary to save a bit
# of time.. but in reality i should just write
# some bindings lmao this is so cursed overall
cmd.append('-ojson')
# join & run the command
pipe = asyncio.subprocess.PIPE
proc = await asyncio.create_subprocess_shell(
' '.join(cmd), stdout=pipe, stderr=pipe
)
stdout, _ = await proc.communicate() # stderr not needed
output = orjson.loads(stdout.decode())
if 'code' not in output or output['code'] != 200:
log(f"oppai-ng: {output['errstr']}", Ansi.LRED)
await proc.wait() # wait for exit
return output['pp'], output['stars']
| true | true |
f73eaca3b126b8bc2fc49fbd3b4150fd51b86258 | 3,315 | py | Python | blog/views.py | swapnilkadakia/unicode-website-dev-temp | c7673cde5fdf66e5cad849e51908f86fb9c0544c | [
"MIT"
] | null | null | null | blog/views.py | swapnilkadakia/unicode-website-dev-temp | c7673cde5fdf66e5cad849e51908f86fb9c0544c | [
"MIT"
] | null | null | null | blog/views.py | swapnilkadakia/unicode-website-dev-temp | c7673cde5fdf66e5cad849e51908f86fb9c0544c | [
"MIT"
] | null | null | null | from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth import get_user_model
from django.core.paginator import Paginator
from django.db.models import Q
from django.utils import timezone
from .models import Post, Comment
from .forms import PostForm, CommentForm
from profiles.models import UserProfile
def post_create(request):
if not request.user.is_authenticated:
raise Http404
form = PostForm(request.POST or None)
user = get_object_or_404(UserProfile, user=request.user)
print(user)
if form.is_valid():
instance = form.save(commit=False)
instance.author = user
instance.save()
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"form": form,
"title": "Create"
}
return render(request, "blog/post_form.html", context)
def post_update(request, slug=None):
if not request.user.is_authenticated:
raise Http404
obj = get_object_or_404(Post, slug=slug)
form = PostForm(request.POST or None, instance=obj)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"form": form,
"title": "Update"
}
return render(request, "blog/post_form.html", context)
def post_detail(request, slug=None):
post = get_object_or_404(Post, slug=slug)
comment_list = Comment.objects.all().filter(post=post)
form = CommentForm(request.POST or None)
if form.is_valid():
form = form.save(commit=False)
form.post = post
form.save()
return HttpResponseRedirect(post.get_absolute_url())
context = {
"post": post,
"comment_list": comment_list,
"form": form,
"title": "Detail",
}
# print(request.user)
# print(object.author.user.username)
if request.user == post.author.user:
context["user"] = True
else:
context["user"] = False
return render(request, "blog/post_detail.html", context)
def post_list(request):
queryset_list = Post.objects.published()
query = request.GET.get("q", None)
if query:
queryset_list = queryset_list.filter(Q(title__icontains=query) | Q(content__icontains=query) | Q(
author__user__first_name__icontains=query) | Q(author__user__last_name__icontains=query) | Q(
technologies__icontains=query)).distinct()
paginator = Paginator(queryset_list, 10)
page = request.GET.get('page')
post_list = paginator.get_page(page)
context = {
"title": "List",
"post_list": post_list,
}
if request.user.is_authenticated:
context["user"] = True
else:
context["user"] = False
return render(request, "blog/post_list.html", context)
def post_delete(request, slug=None):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
instance = get_object_or_404(Post, slug=slug)
if request.POST:
instance.delete()
return redirect("posts:list")
context = {
"title": "Delete",
"object": instance
}
return render(request, "blog/confirm_delete.html", context)
| 32.184466 | 105 | 0.667873 | from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth import get_user_model
from django.core.paginator import Paginator
from django.db.models import Q
from django.utils import timezone
from .models import Post, Comment
from .forms import PostForm, CommentForm
from profiles.models import UserProfile
def post_create(request):
if not request.user.is_authenticated:
raise Http404
form = PostForm(request.POST or None)
user = get_object_or_404(UserProfile, user=request.user)
print(user)
if form.is_valid():
instance = form.save(commit=False)
instance.author = user
instance.save()
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"form": form,
"title": "Create"
}
return render(request, "blog/post_form.html", context)
def post_update(request, slug=None):
if not request.user.is_authenticated:
raise Http404
obj = get_object_or_404(Post, slug=slug)
form = PostForm(request.POST or None, instance=obj)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"form": form,
"title": "Update"
}
return render(request, "blog/post_form.html", context)
def post_detail(request, slug=None):
post = get_object_or_404(Post, slug=slug)
comment_list = Comment.objects.all().filter(post=post)
form = CommentForm(request.POST or None)
if form.is_valid():
form = form.save(commit=False)
form.post = post
form.save()
return HttpResponseRedirect(post.get_absolute_url())
context = {
"post": post,
"comment_list": comment_list,
"form": form,
"title": "Detail",
}
if request.user == post.author.user:
context["user"] = True
else:
context["user"] = False
return render(request, "blog/post_detail.html", context)
def post_list(request):
queryset_list = Post.objects.published()
query = request.GET.get("q", None)
if query:
queryset_list = queryset_list.filter(Q(title__icontains=query) | Q(content__icontains=query) | Q(
author__user__first_name__icontains=query) | Q(author__user__last_name__icontains=query) | Q(
technologies__icontains=query)).distinct()
paginator = Paginator(queryset_list, 10)
page = request.GET.get('page')
post_list = paginator.get_page(page)
context = {
"title": "List",
"post_list": post_list,
}
if request.user.is_authenticated:
context["user"] = True
else:
context["user"] = False
return render(request, "blog/post_list.html", context)
def post_delete(request, slug=None):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
instance = get_object_or_404(Post, slug=slug)
if request.POST:
instance.delete()
return redirect("posts:list")
context = {
"title": "Delete",
"object": instance
}
return render(request, "blog/confirm_delete.html", context)
| true | true |
f73eacd68651aa35688f3850987079d8a3b7f40f | 150 | py | Python | exercises/darts/darts.py | southpush/python | 048191583ed2cf668c6180d851d100f277a74101 | [
"MIT"
] | null | null | null | exercises/darts/darts.py | southpush/python | 048191583ed2cf668c6180d851d100f277a74101 | [
"MIT"
] | null | null | null | exercises/darts/darts.py | southpush/python | 048191583ed2cf668c6180d851d100f277a74101 | [
"MIT"
] | null | null | null | def score(x, y):
distance = x ** 2 + y ** 2
return 10 if distance <= 1 ** 2 else 5 if distance <= 5 ** 2 else 1 if distance <= 10 ** 2 else 0
| 37.5 | 101 | 0.546667 | def score(x, y):
distance = x ** 2 + y ** 2
return 10 if distance <= 1 ** 2 else 5 if distance <= 5 ** 2 else 1 if distance <= 10 ** 2 else 0
| true | true |
f73eadae4fdc856f5258f55231ef39bc6666f5e3 | 2,657 | py | Python | qf_lib/backtesting/order/order.py | webclinic017/qf-lib | 96463876719bba8a76c8269cef76addf3a2d836d | [
"Apache-2.0"
] | 198 | 2019-08-16T15:09:23.000Z | 2022-03-30T12:44:00.000Z | qf_lib/backtesting/order/order.py | webclinic017/qf-lib | 96463876719bba8a76c8269cef76addf3a2d836d | [
"Apache-2.0"
] | 13 | 2021-01-07T10:15:19.000Z | 2022-03-29T13:01:47.000Z | qf_lib/backtesting/order/order.py | webclinic017/qf-lib | 96463876719bba8a76c8269cef76addf3a2d836d | [
"Apache-2.0"
] | 29 | 2019-08-16T15:21:28.000Z | 2022-02-23T09:53:49.000Z | # Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from qf_lib.backtesting.contract.contract import Contract
from qf_lib.backtesting.order.execution_style import ExecutionStyle
from qf_lib.backtesting.order.time_in_force import TimeInForce
class Order(object):
"""
Order generated by a strategy, then processed by PositionSizer.
Finally executed by ExecutionHandler.
"""
def __init__(self, contract: Contract, quantity: int, execution_style: ExecutionStyle,
time_in_force: TimeInForce, order_state=""):
"""
This __init__ shouldn't be used anywhere beyond this module. Use OrderFactory for creating Order objects.
"""
self.id = None # type:int
self.contract = contract
self.quantity = quantity
self.time_in_force = time_in_force
self.execution_style = execution_style
self.order_state = order_state
def __str__(self):
return '\nOrder:\n' \
'\tid: {}\n' \
'\tcontract: {}\n' \
'\tquantity: {}\n' \
'\ttif: {}\n' \
'\texecution_style: {}\n' \
'\torder_state: {}'.format(self.id, str(self.contract), self.quantity, str(self.time_in_force),
self.execution_style, self.order_state)
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, Order):
return False
# one Order has id and another hasn't
if (self.id is None) != (other.id is None):
return False
if self.id is not None and other.id == self.id:
return True
# when both ids are none -> compare the values
return (self.contract, self.quantity, self.time_in_force, self.execution_style) == \
(other.contract, other.quantity, other.time_in_force, other.execution_style)
def __hash__(self):
return hash((self.contract, self.quantity, self.time_in_force, self.execution_style))
| 39.073529 | 113 | 0.640572 |
from qf_lib.backtesting.contract.contract import Contract
from qf_lib.backtesting.order.execution_style import ExecutionStyle
from qf_lib.backtesting.order.time_in_force import TimeInForce
class Order(object):
def __init__(self, contract: Contract, quantity: int, execution_style: ExecutionStyle,
time_in_force: TimeInForce, order_state=""):
self.id = None
self.contract = contract
self.quantity = quantity
self.time_in_force = time_in_force
self.execution_style = execution_style
self.order_state = order_state
def __str__(self):
return '\nOrder:\n' \
'\tid: {}\n' \
'\tcontract: {}\n' \
'\tquantity: {}\n' \
'\ttif: {}\n' \
'\texecution_style: {}\n' \
'\torder_state: {}'.format(self.id, str(self.contract), self.quantity, str(self.time_in_force),
self.execution_style, self.order_state)
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, Order):
return False
if (self.id is None) != (other.id is None):
return False
if self.id is not None and other.id == self.id:
return True
# when both ids are none -> compare the values
return (self.contract, self.quantity, self.time_in_force, self.execution_style) == \
(other.contract, other.quantity, other.time_in_force, other.execution_style)
def __hash__(self):
return hash((self.contract, self.quantity, self.time_in_force, self.execution_style))
| true | true |
f73eade7e277391770915d76474b462d5d695936 | 2,038 | py | Python | configs/textrecog/abinet/abinet_vision_only_academic.py | hongxuenong/mmocr | e8e3a059f8f2e4fca96af37751c33563fc48e2ba | [
"Apache-2.0"
] | 2,261 | 2021-04-08T03:45:41.000Z | 2022-03-31T23:37:46.000Z | configs/textrecog/abinet/abinet_vision_only_academic.py | hongxuenong/mmocr | e8e3a059f8f2e4fca96af37751c33563fc48e2ba | [
"Apache-2.0"
] | 789 | 2021-04-08T05:40:13.000Z | 2022-03-31T09:42:39.000Z | configs/textrecog/abinet/abinet_vision_only_academic.py | hongxuenong/mmocr | e8e3a059f8f2e4fca96af37751c33563fc48e2ba | [
"Apache-2.0"
] | 432 | 2021-04-08T03:56:16.000Z | 2022-03-30T18:44:43.000Z | _base_ = [
'../../_base_/default_runtime.py',
'../../_base_/schedules/schedule_adam_step_20e.py',
'../../_base_/recog_pipelines/abinet_pipeline.py',
'../../_base_/recog_datasets/ST_MJ_alphanumeric_train.py',
'../../_base_/recog_datasets/academic_test.py'
]
train_list = {{_base_.train_list}}
test_list = {{_base_.test_list}}
train_pipeline = {{_base_.train_pipeline}}
test_pipeline = {{_base_.test_pipeline}}
# Model
num_chars = 37
max_seq_len = 26
label_convertor = dict(
type='ABIConvertor',
dict_type='DICT36',
with_unknown=False,
with_padding=False,
lower=True,
)
model = dict(
type='ABINet',
backbone=dict(type='ResNetABI'),
encoder=dict(
type='ABIVisionModel',
encoder=dict(
type='TransformerEncoder',
n_layers=3,
n_head=8,
d_model=512,
d_inner=2048,
dropout=0.1,
max_len=8 * 32,
),
decoder=dict(
type='ABIVisionDecoder',
in_channels=512,
num_channels=64,
attn_height=8,
attn_width=32,
attn_mode='nearest',
use_result='feature',
num_chars=num_chars,
max_seq_len=max_seq_len,
init_cfg=dict(type='Xavier', layer='Conv2d')),
),
loss=dict(
type='ABILoss', enc_weight=1.0, dec_weight=1.0, fusion_weight=1.0),
label_convertor=label_convertor,
max_seq_len=max_seq_len,
iter_size=1)
data = dict(
samples_per_gpu=192,
workers_per_gpu=8,
val_dataloader=dict(samples_per_gpu=1),
test_dataloader=dict(samples_per_gpu=1),
train=dict(
type='UniformConcatDataset',
datasets=train_list,
pipeline=train_pipeline),
val=dict(
type='UniformConcatDataset',
datasets=test_list,
pipeline=test_pipeline),
test=dict(
type='UniformConcatDataset',
datasets=test_list,
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='acc')
| 26.467532 | 75 | 0.613837 | _base_ = [
'../../_base_/default_runtime.py',
'../../_base_/schedules/schedule_adam_step_20e.py',
'../../_base_/recog_pipelines/abinet_pipeline.py',
'../../_base_/recog_datasets/ST_MJ_alphanumeric_train.py',
'../../_base_/recog_datasets/academic_test.py'
]
train_list = {{_base_.train_list}}
test_list = {{_base_.test_list}}
train_pipeline = {{_base_.train_pipeline}}
test_pipeline = {{_base_.test_pipeline}}
num_chars = 37
max_seq_len = 26
label_convertor = dict(
type='ABIConvertor',
dict_type='DICT36',
with_unknown=False,
with_padding=False,
lower=True,
)
model = dict(
type='ABINet',
backbone=dict(type='ResNetABI'),
encoder=dict(
type='ABIVisionModel',
encoder=dict(
type='TransformerEncoder',
n_layers=3,
n_head=8,
d_model=512,
d_inner=2048,
dropout=0.1,
max_len=8 * 32,
),
decoder=dict(
type='ABIVisionDecoder',
in_channels=512,
num_channels=64,
attn_height=8,
attn_width=32,
attn_mode='nearest',
use_result='feature',
num_chars=num_chars,
max_seq_len=max_seq_len,
init_cfg=dict(type='Xavier', layer='Conv2d')),
),
loss=dict(
type='ABILoss', enc_weight=1.0, dec_weight=1.0, fusion_weight=1.0),
label_convertor=label_convertor,
max_seq_len=max_seq_len,
iter_size=1)
data = dict(
samples_per_gpu=192,
workers_per_gpu=8,
val_dataloader=dict(samples_per_gpu=1),
test_dataloader=dict(samples_per_gpu=1),
train=dict(
type='UniformConcatDataset',
datasets=train_list,
pipeline=train_pipeline),
val=dict(
type='UniformConcatDataset',
datasets=test_list,
pipeline=test_pipeline),
test=dict(
type='UniformConcatDataset',
datasets=test_list,
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='acc')
| true | true |
f73eae5181fdd60b3bb9fbc533decbc6808fbc1f | 1,141 | py | Python | Chapter02/quadratic_uni.py | svenpanne/ProgInPy3 | 93f642d44e27ccb4d2601eae4a960acd8c9f11f8 | [
"BSD-3-Clause"
] | null | null | null | Chapter02/quadratic_uni.py | svenpanne/ProgInPy3 | 93f642d44e27ccb4d2601eae4a960acd8c9f11f8 | [
"BSD-3-Clause"
] | null | null | null | Chapter02/quadratic_uni.py | svenpanne/ProgInPy3 | 93f642d44e27ccb4d2601eae4a960acd8c9f11f8 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import cmath
import math
import sys
SQUARED = "\N{SUPERSCRIPT TWO}"
ARROW = "\N{RIGHTWARDS ARROW}"
if not sys.platform.startswith("linux"):
SQUARED = "^2"
ARROW = "->"
def get_float(msg, allow_zero):
x = None
while x is None:
try:
x = float(input(msg))
if not allow_zero and abs(x) < sys.float_info.epsilon:
print("zero is not allowed")
x = None
except ValueError as err:
print(err)
return x
print("ax" + SQUARED + " + bx + c = 0")
a = get_float("enter a: ", False)
b = get_float("enter b: ", True)
c = get_float("enter c: ", True)
x1 = None
x2 = None
discriminant = (b ** 2) - (4 * a * c)
if discriminant == 0:
x1 = -(b / (2 * a))
else:
if discriminant > 0:
root = math.sqrt(discriminant)
else: # discriminant < 0
root = cmath.sqrt(discriminant)
x1 = (-b + root) / (2 * a)
x2 = (-b - root) / (2 * a)
equation = ("{a}x{SQUARED} + {b}x + {c} = 0 {ARROW} x = {x1}"
.format(**locals()))
if x2 is not None:
equation += " or x = {0}".format(x2)
print(equation)
| 23.770833 | 66 | 0.538124 |
import cmath
import math
import sys
SQUARED = "\N{SUPERSCRIPT TWO}"
ARROW = "\N{RIGHTWARDS ARROW}"
if not sys.platform.startswith("linux"):
SQUARED = "^2"
ARROW = "->"
def get_float(msg, allow_zero):
x = None
while x is None:
try:
x = float(input(msg))
if not allow_zero and abs(x) < sys.float_info.epsilon:
print("zero is not allowed")
x = None
except ValueError as err:
print(err)
return x
print("ax" + SQUARED + " + bx + c = 0")
a = get_float("enter a: ", False)
b = get_float("enter b: ", True)
c = get_float("enter c: ", True)
x1 = None
x2 = None
discriminant = (b ** 2) - (4 * a * c)
if discriminant == 0:
x1 = -(b / (2 * a))
else:
if discriminant > 0:
root = math.sqrt(discriminant)
else:
root = cmath.sqrt(discriminant)
x1 = (-b + root) / (2 * a)
x2 = (-b - root) / (2 * a)
equation = ("{a}x{SQUARED} + {b}x + {c} = 0 {ARROW} x = {x1}"
.format(**locals()))
if x2 is not None:
equation += " or x = {0}".format(x2)
print(equation)
| true | true |
f73eaedf47ff3acdcac8b8413faa3dd0f0a60aad | 2,493 | py | Python | auto_sort/asort.py | SONJAYA80026/Awesome-Linux-Software | 4e8344a0a9dfd3e7f14794e732e63e69a1858a64 | [
"MIT"
] | null | null | null | auto_sort/asort.py | SONJAYA80026/Awesome-Linux-Software | 4e8344a0a9dfd3e7f14794e732e63e69a1858a64 | [
"MIT"
] | null | null | null | auto_sort/asort.py | SONJAYA80026/Awesome-Linux-Software | 4e8344a0a9dfd3e7f14794e732e63e69a1858a64 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*-coding: utf-8-*-
# Author : Christopher L
# Blog : http://blog.chriscabin.com
# GitHub : https://www.github.com/chrisleegit
# File : asort.py
# Date : 2016/08/22 11:12
# Version: 0.1
# Description: A very simple python script that can sort items alphabetically.
from __future__ import print_function
import os
import shutil
import re
README_FILE = '../README.md'
TEMP_FILE = 'temp.md'
# only works for those items between BEGIN and END.
BEGIN = '## Applications'
END = '## Setup'
regex = re.compile(r"[^[]*\[([^]]*)\]")
def main():
global README_FILE
# make sure the script can find file: README.md
README_FILE = os.path.abspath(README_FILE)
if not os.path.exists(README_FILE):
print('Error: no such file or directory: {}'.format(README_FILE))
exit(1)
sort_enable = False
items = list()
print('Loading file: {}'.format(README_FILE))
# read file: README.md
with open(README_FILE) as infile, open(TEMP_FILE, 'w') as outfile:
# process each line
for line in infile:
if not sort_enable and BEGIN in line:
sort_enable = True
# if sort_enable and END in line:
# sort_enable = False
if sort_enable:
# each item starts with a character '-'
if line.startswith(('-')):
line = line.strip()
items.append(line)
# When no more items, blank line or new header
elif line == '\n':
# when we meet the next header, we should stop adding new item to the list.
for item in sorted(items, key=lambda x: regex.findall(x.upper())[len(regex.findall(x.upper()))-1]):
# write the ordered list to the temporary file.
print(item, file=outfile)
items.clear()
# remember to put the next header in the temporary file.
print(line, end='', file=outfile)
elif line.startswith('#'):
sort_enable = False if END in line else True
print(line, end='', file=outfile)
else:
print(line, end='', file=outfile)
else:
print(line, end='', file=outfile)
print('Replace the original file: README.md')
shutil.move(TEMP_FILE, README_FILE)
if __name__ == '__main__':
main()
| 30.777778 | 119 | 0.562375 |
from __future__ import print_function
import os
import shutil
import re
README_FILE = '../README.md'
TEMP_FILE = 'temp.md'
BEGIN = '## Applications'
END = '## Setup'
regex = re.compile(r"[^[]*\[([^]]*)\]")
def main():
global README_FILE
README_FILE = os.path.abspath(README_FILE)
if not os.path.exists(README_FILE):
print('Error: no such file or directory: {}'.format(README_FILE))
exit(1)
sort_enable = False
items = list()
print('Loading file: {}'.format(README_FILE))
with open(README_FILE) as infile, open(TEMP_FILE, 'w') as outfile:
for line in infile:
if not sort_enable and BEGIN in line:
sort_enable = True
if sort_enable:
if line.startswith(('-')):
line = line.strip()
items.append(line)
elif line == '\n':
for item in sorted(items, key=lambda x: regex.findall(x.upper())[len(regex.findall(x.upper()))-1]):
print(item, file=outfile)
items.clear()
print(line, end='', file=outfile)
elif line.startswith('#'):
sort_enable = False if END in line else True
print(line, end='', file=outfile)
else:
print(line, end='', file=outfile)
else:
print(line, end='', file=outfile)
print('Replace the original file: README.md')
shutil.move(TEMP_FILE, README_FILE)
if __name__ == '__main__':
main()
| true | true |
f73eaee079ad1abaad5c5ed2671e8658dbb6bf36 | 1,808 | py | Python | spec_cleaner/rpmcheck.py | dcermak/spec-cleaner | 917e35c09d054b5545806ab1e9ce408e9a517de6 | [
"BSD-3-Clause"
] | null | null | null | spec_cleaner/rpmcheck.py | dcermak/spec-cleaner | 917e35c09d054b5545806ab1e9ce408e9a517de6 | [
"BSD-3-Clause"
] | null | null | null | spec_cleaner/rpmcheck.py | dcermak/spec-cleaner | 917e35c09d054b5545806ab1e9ce408e9a517de6 | [
"BSD-3-Clause"
] | null | null | null | # vim: set ts=4 sw=4 et: coding=UTF-8
from .rpmsection import Section
class RpmCheck(Section):
"""
A class providing methods for %check section cleaning.
Replace various troublemakers in check phase.
"""
def add(self, line: str) -> None:
line = self._complete_cleanup(line)
# smp_mflags for jobs macro replacement
line = self.reg.re_jobs.sub('%{?_smp_mflags}', line)
if not self.minimal:
line = self._add_jobs(line)
line = self._replace_pytest(line)
Section.add(self, line)
def _add_jobs(self, line: str) -> str:
"""
Add %{?_smp_mflags} to 'make' call.
Args:
line: A string representing a line to process.
Return:
The processed line.
"""
# add jobs if we have just make call on line
# if user want single thread he should specify -j1
if self.reg.re_make.match(line):
# if there are no smp_flags or jobs spec
if line.find('%{?_smp_mflags}') == -1 and line.find('-j') == -1:
# Don't append %_smp_mflags if the line ends with a backslash,
# it would break the formatting
if not line.endswith('\\') and not line.lstrip().startswith('#'):
line = self.reg.re_make.sub(r'\1make %{?_smp_mflags}\2', line)
return line
def _replace_pytest(self, line: str) -> str:
"""
Replace various pytest calls with %pytest or %pytest_arch macros.
Args:
line: A string representing a line to process.
Return:
The processed line.
"""
line = self.reg.re_pytest.sub('%pytest', line)
line = self.reg.re_pytest_arch.sub('%pytest_arch', line)
return line
| 30.133333 | 82 | 0.573009 |
from .rpmsection import Section
class RpmCheck(Section):
def add(self, line: str) -> None:
line = self._complete_cleanup(line)
line = self.reg.re_jobs.sub('%{?_smp_mflags}', line)
if not self.minimal:
line = self._add_jobs(line)
line = self._replace_pytest(line)
Section.add(self, line)
def _add_jobs(self, line: str) -> str:
if self.reg.re_make.match(line):
if line.find('%{?_smp_mflags}') == -1 and line.find('-j') == -1:
# it would break the formatting
if not line.endswith('\\') and not line.lstrip().startswith('
line = self.reg.re_make.sub(r'\1make %{?_smp_mflags}\2', line)
return line
def _replace_pytest(self, line: str) -> str:
line = self.reg.re_pytest.sub('%pytest', line)
line = self.reg.re_pytest_arch.sub('%pytest_arch', line)
return line
| true | true |
f73eb0ecd6e9a7b395ba5e76bedcde1ea35053c6 | 831 | py | Python | src/drf_app/urls.py | inabhi9/drf-cloudstorage | 7f5e1b1e721208c5682f1f1e443a13af73d039bc | [
"MIT"
] | null | null | null | src/drf_app/urls.py | inabhi9/drf-cloudstorage | 7f5e1b1e721208c5682f1f1e443a13af73d039bc | [
"MIT"
] | null | null | null | src/drf_app/urls.py | inabhi9/drf-cloudstorage | 7f5e1b1e721208c5682f1f1e443a13af73d039bc | [
"MIT"
] | null | null | null | """drf_cloudstorage URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.urls import include
urlpatterns = [
url(r'^cloudfiles', include('drf_cloudstorage.urls')),
url(r'^examples', include('example.urls')),
]
| 36.130435 | 77 | 0.711191 | from django.conf.urls import url
from django.urls import include
urlpatterns = [
url(r'^cloudfiles', include('drf_cloudstorage.urls')),
url(r'^examples', include('example.urls')),
]
| true | true |
f73eb1084de2482fb9f79a252556c45f0246ecaf | 31,870 | py | Python | Lib/test/test_yield_from.py | Krrishdhaneja/cpython | 9ae9ad8ba35cdcece7ded73cd2207e4f8cb85578 | [
"0BSD"
] | 1 | 2020-10-25T16:33:22.000Z | 2020-10-25T16:33:22.000Z | Lib/test/test_yield_from.py | Krrishdhaneja/cpython | 9ae9ad8ba35cdcece7ded73cd2207e4f8cb85578 | [
"0BSD"
] | null | null | null | Lib/test/test_yield_from.py | Krrishdhaneja/cpython | 9ae9ad8ba35cdcece7ded73cd2207e4f8cb85578 | [
"0BSD"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Test suite for PEP 380 implementation
adapted from original tests written by Greg Ewing
see <http://www.cosc.canterbury.ac.nz/greg.ewing/python/yield-from/YieldFrom-Python3.1.2-rev5.zip>
"""
import unittest
import inspect
from test.support import captured_stderr, disable_gc, gc_collect
from test import support
class TestPEP380Operation(unittest.TestCase):
"""
Test semantics.
"""
def test_delegation_of_initial_next_to_subgenerator(self):
"""
Test delegation of initial next() call to subgenerator
"""
trace = []
def g1():
trace.append("Starting g1")
yield from g2()
trace.append("Finishing g1")
def g2():
trace.append("Starting g2")
yield 42
trace.append("Finishing g2")
for x in g1():
trace.append("Yielded %s" % (x,))
self.assertEqual(trace,[
"Starting g1",
"Starting g2",
"Yielded 42",
"Finishing g2",
"Finishing g1",
])
def test_raising_exception_in_initial_next_call(self):
"""
Test raising exception in initial next() call
"""
trace = []
def g1():
try:
trace.append("Starting g1")
yield from g2()
finally:
trace.append("Finishing g1")
def g2():
try:
trace.append("Starting g2")
raise ValueError("spanish inquisition occurred")
finally:
trace.append("Finishing g2")
try:
for x in g1():
trace.append("Yielded %s" % (x,))
except ValueError as e:
self.assertEqual(e.args[0], "spanish inquisition occurred")
else:
self.fail("subgenerator failed to raise ValueError")
self.assertEqual(trace,[
"Starting g1",
"Starting g2",
"Finishing g2",
"Finishing g1",
])
def test_delegation_of_next_call_to_subgenerator(self):
"""
Test delegation of next() call to subgenerator
"""
trace = []
def g1():
trace.append("Starting g1")
yield "g1 ham"
yield from g2()
yield "g1 eggs"
trace.append("Finishing g1")
def g2():
trace.append("Starting g2")
yield "g2 spam"
yield "g2 more spam"
trace.append("Finishing g2")
for x in g1():
trace.append("Yielded %s" % (x,))
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Yielded g2 more spam",
"Finishing g2",
"Yielded g1 eggs",
"Finishing g1",
])
def test_raising_exception_in_delegated_next_call(self):
"""
Test raising exception in delegated next() call
"""
trace = []
def g1():
try:
trace.append("Starting g1")
yield "g1 ham"
yield from g2()
yield "g1 eggs"
finally:
trace.append("Finishing g1")
def g2():
try:
trace.append("Starting g2")
yield "g2 spam"
raise ValueError("hovercraft is full of eels")
yield "g2 more spam"
finally:
trace.append("Finishing g2")
try:
for x in g1():
trace.append("Yielded %s" % (x,))
except ValueError as e:
self.assertEqual(e.args[0], "hovercraft is full of eels")
else:
self.fail("subgenerator failed to raise ValueError")
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Finishing g2",
"Finishing g1",
])
def test_delegation_of_send(self):
"""
Test delegation of send()
"""
trace = []
def g1():
trace.append("Starting g1")
x = yield "g1 ham"
trace.append("g1 received %s" % (x,))
yield from g2()
x = yield "g1 eggs"
trace.append("g1 received %s" % (x,))
trace.append("Finishing g1")
def g2():
trace.append("Starting g2")
x = yield "g2 spam"
trace.append("g2 received %s" % (x,))
x = yield "g2 more spam"
trace.append("g2 received %s" % (x,))
trace.append("Finishing g2")
g = g1()
y = next(g)
x = 1
try:
while 1:
y = g.send(x)
trace.append("Yielded %s" % (y,))
x += 1
except StopIteration:
pass
self.assertEqual(trace,[
"Starting g1",
"g1 received 1",
"Starting g2",
"Yielded g2 spam",
"g2 received 2",
"Yielded g2 more spam",
"g2 received 3",
"Finishing g2",
"Yielded g1 eggs",
"g1 received 4",
"Finishing g1",
])
def test_handling_exception_while_delegating_send(self):
"""
Test handling exception while delegating 'send'
"""
trace = []
def g1():
trace.append("Starting g1")
x = yield "g1 ham"
trace.append("g1 received %s" % (x,))
yield from g2()
x = yield "g1 eggs"
trace.append("g1 received %s" % (x,))
trace.append("Finishing g1")
def g2():
trace.append("Starting g2")
x = yield "g2 spam"
trace.append("g2 received %s" % (x,))
raise ValueError("hovercraft is full of eels")
x = yield "g2 more spam"
trace.append("g2 received %s" % (x,))
trace.append("Finishing g2")
def run():
g = g1()
y = next(g)
x = 1
try:
while 1:
y = g.send(x)
trace.append("Yielded %s" % (y,))
x += 1
except StopIteration:
trace.append("StopIteration")
self.assertRaises(ValueError,run)
self.assertEqual(trace,[
"Starting g1",
"g1 received 1",
"Starting g2",
"Yielded g2 spam",
"g2 received 2",
])
def test_delegating_close(self):
"""
Test delegating 'close'
"""
trace = []
def g1():
try:
trace.append("Starting g1")
yield "g1 ham"
yield from g2()
yield "g1 eggs"
finally:
trace.append("Finishing g1")
def g2():
try:
trace.append("Starting g2")
yield "g2 spam"
yield "g2 more spam"
finally:
trace.append("Finishing g2")
g = g1()
for i in range(2):
x = next(g)
trace.append("Yielded %s" % (x,))
g.close()
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Finishing g2",
"Finishing g1"
])
def test_handing_exception_while_delegating_close(self):
"""
Test handling exception while delegating 'close'
"""
trace = []
def g1():
try:
trace.append("Starting g1")
yield "g1 ham"
yield from g2()
yield "g1 eggs"
finally:
trace.append("Finishing g1")
def g2():
try:
trace.append("Starting g2")
yield "g2 spam"
yield "g2 more spam"
finally:
trace.append("Finishing g2")
raise ValueError("nybbles have exploded with delight")
try:
g = g1()
for i in range(2):
x = next(g)
trace.append("Yielded %s" % (x,))
g.close()
except ValueError as e:
self.assertEqual(e.args[0], "nybbles have exploded with delight")
self.assertIsInstance(e.__context__, GeneratorExit)
else:
self.fail("subgenerator failed to raise ValueError")
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Finishing g2",
"Finishing g1",
])
def test_delegating_throw(self):
"""
Test delegating 'throw'
"""
trace = []
def g1():
try:
trace.append("Starting g1")
yield "g1 ham"
yield from g2()
yield "g1 eggs"
finally:
trace.append("Finishing g1")
def g2():
try:
trace.append("Starting g2")
yield "g2 spam"
yield "g2 more spam"
finally:
trace.append("Finishing g2")
try:
g = g1()
for i in range(2):
x = next(g)
trace.append("Yielded %s" % (x,))
e = ValueError("tomato ejected")
g.throw(e)
except ValueError as e:
self.assertEqual(e.args[0], "tomato ejected")
else:
self.fail("subgenerator failed to raise ValueError")
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Finishing g2",
"Finishing g1",
])
def test_value_attribute_of_StopIteration_exception(self):
"""
Test 'value' attribute of StopIteration exception
"""
trace = []
def pex(e):
trace.append("%s: %s" % (e.__class__.__name__, e))
trace.append("value = %s" % (e.value,))
e = StopIteration()
pex(e)
e = StopIteration("spam")
pex(e)
e.value = "eggs"
pex(e)
self.assertEqual(trace,[
"StopIteration: ",
"value = None",
"StopIteration: spam",
"value = spam",
"StopIteration: spam",
"value = eggs",
])
def test_exception_value_crash(self):
# There used to be a refcount error when the return value
# stored in the StopIteration has a refcount of 1.
def g1():
yield from g2()
def g2():
yield "g2"
return [42]
self.assertEqual(list(g1()), ["g2"])
def test_generator_return_value(self):
"""
Test generator return value
"""
trace = []
def g1():
trace.append("Starting g1")
yield "g1 ham"
ret = yield from g2()
trace.append("g2 returned %r" % (ret,))
for v in 1, (2,), StopIteration(3):
ret = yield from g2(v)
trace.append("g2 returned %r" % (ret,))
yield "g1 eggs"
trace.append("Finishing g1")
def g2(v = None):
trace.append("Starting g2")
yield "g2 spam"
yield "g2 more spam"
trace.append("Finishing g2")
if v:
return v
for x in g1():
trace.append("Yielded %s" % (x,))
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Yielded g2 more spam",
"Finishing g2",
"g2 returned None",
"Starting g2",
"Yielded g2 spam",
"Yielded g2 more spam",
"Finishing g2",
"g2 returned 1",
"Starting g2",
"Yielded g2 spam",
"Yielded g2 more spam",
"Finishing g2",
"g2 returned (2,)",
"Starting g2",
"Yielded g2 spam",
"Yielded g2 more spam",
"Finishing g2",
"g2 returned StopIteration(3)",
"Yielded g1 eggs",
"Finishing g1",
])
def test_delegation_of_next_to_non_generator(self):
"""
Test delegation of next() to non-generator
"""
trace = []
def g():
yield from range(3)
for x in g():
trace.append("Yielded %s" % (x,))
self.assertEqual(trace,[
"Yielded 0",
"Yielded 1",
"Yielded 2",
])
def test_conversion_of_sendNone_to_next(self):
"""
Test conversion of send(None) to next()
"""
trace = []
def g():
yield from range(3)
gi = g()
for x in range(3):
y = gi.send(None)
trace.append("Yielded: %s" % (y,))
self.assertEqual(trace,[
"Yielded: 0",
"Yielded: 1",
"Yielded: 2",
])
def test_delegation_of_close_to_non_generator(self):
"""
Test delegation of close() to non-generator
"""
trace = []
def g():
try:
trace.append("starting g")
yield from range(3)
trace.append("g should not be here")
finally:
trace.append("finishing g")
gi = g()
next(gi)
with captured_stderr() as output:
gi.close()
self.assertEqual(output.getvalue(), '')
self.assertEqual(trace,[
"starting g",
"finishing g",
])
def test_delegating_throw_to_non_generator(self):
"""
Test delegating 'throw' to non-generator
"""
trace = []
def g():
try:
trace.append("Starting g")
yield from range(10)
finally:
trace.append("Finishing g")
try:
gi = g()
for i in range(5):
x = next(gi)
trace.append("Yielded %s" % (x,))
e = ValueError("tomato ejected")
gi.throw(e)
except ValueError as e:
self.assertEqual(e.args[0],"tomato ejected")
else:
self.fail("subgenerator failed to raise ValueError")
self.assertEqual(trace,[
"Starting g",
"Yielded 0",
"Yielded 1",
"Yielded 2",
"Yielded 3",
"Yielded 4",
"Finishing g",
])
def test_attempting_to_send_to_non_generator(self):
"""
Test attempting to send to non-generator
"""
trace = []
def g():
try:
trace.append("starting g")
yield from range(3)
trace.append("g should not be here")
finally:
trace.append("finishing g")
try:
gi = g()
next(gi)
for x in range(3):
y = gi.send(42)
trace.append("Should not have yielded: %s" % (y,))
except AttributeError as e:
self.assertIn("send", e.args[0])
else:
self.fail("was able to send into non-generator")
self.assertEqual(trace,[
"starting g",
"finishing g",
])
def test_broken_getattr_handling(self):
"""
Test subiterator with a broken getattr implementation
"""
class Broken:
def __iter__(self):
return self
def __next__(self):
return 1
def __getattr__(self, attr):
1/0
def g():
yield from Broken()
with self.assertRaises(ZeroDivisionError):
gi = g()
self.assertEqual(next(gi), 1)
gi.send(1)
with self.assertRaises(ZeroDivisionError):
gi = g()
self.assertEqual(next(gi), 1)
gi.throw(AttributeError)
with support.catch_unraisable_exception() as cm:
gi = g()
self.assertEqual(next(gi), 1)
gi.close()
self.assertEqual(ZeroDivisionError, cm.unraisable.exc_type)
def test_exception_in_initial_next_call(self):
"""
Test exception in initial next() call
"""
trace = []
def g1():
trace.append("g1 about to yield from g2")
yield from g2()
trace.append("g1 should not be here")
def g2():
yield 1/0
def run():
gi = g1()
next(gi)
self.assertRaises(ZeroDivisionError,run)
self.assertEqual(trace,[
"g1 about to yield from g2"
])
def test_attempted_yield_from_loop(self):
"""
Test attempted yield-from loop
"""
trace = []
def g1():
trace.append("g1: starting")
yield "y1"
trace.append("g1: about to yield from g2")
yield from g2()
trace.append("g1 should not be here")
def g2():
trace.append("g2: starting")
yield "y2"
trace.append("g2: about to yield from g1")
yield from gi
trace.append("g2 should not be here")
try:
gi = g1()
for y in gi:
trace.append("Yielded: %s" % (y,))
except ValueError as e:
self.assertEqual(e.args[0],"generator already executing")
else:
self.fail("subgenerator didn't raise ValueError")
self.assertEqual(trace,[
"g1: starting",
"Yielded: y1",
"g1: about to yield from g2",
"g2: starting",
"Yielded: y2",
"g2: about to yield from g1",
])
def test_returning_value_from_delegated_throw(self):
"""
Test returning value from delegated 'throw'
"""
trace = []
def g1():
try:
trace.append("Starting g1")
yield "g1 ham"
yield from g2()
yield "g1 eggs"
finally:
trace.append("Finishing g1")
def g2():
try:
trace.append("Starting g2")
yield "g2 spam"
yield "g2 more spam"
except LunchError:
trace.append("Caught LunchError in g2")
yield "g2 lunch saved"
yield "g2 yet more spam"
class LunchError(Exception):
pass
g = g1()
for i in range(2):
x = next(g)
trace.append("Yielded %s" % (x,))
e = LunchError("tomato ejected")
g.throw(e)
for x in g:
trace.append("Yielded %s" % (x,))
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Caught LunchError in g2",
"Yielded g2 yet more spam",
"Yielded g1 eggs",
"Finishing g1",
])
def test_next_and_return_with_value(self):
"""
Test next and return with value
"""
trace = []
def f(r):
gi = g(r)
next(gi)
try:
trace.append("f resuming g")
next(gi)
trace.append("f SHOULD NOT BE HERE")
except StopIteration as e:
trace.append("f caught %r" % (e,))
def g(r):
trace.append("g starting")
yield
trace.append("g returning %r" % (r,))
return r
f(None)
f(1)
f((2,))
f(StopIteration(3))
self.assertEqual(trace,[
"g starting",
"f resuming g",
"g returning None",
"f caught StopIteration()",
"g starting",
"f resuming g",
"g returning 1",
"f caught StopIteration(1)",
"g starting",
"f resuming g",
"g returning (2,)",
"f caught StopIteration((2,))",
"g starting",
"f resuming g",
"g returning StopIteration(3)",
"f caught StopIteration(StopIteration(3))",
])
def test_send_and_return_with_value(self):
"""
Test send and return with value
"""
trace = []
def f(r):
gi = g(r)
next(gi)
try:
trace.append("f sending spam to g")
gi.send("spam")
trace.append("f SHOULD NOT BE HERE")
except StopIteration as e:
trace.append("f caught %r" % (e,))
def g(r):
trace.append("g starting")
x = yield
trace.append("g received %r" % (x,))
trace.append("g returning %r" % (r,))
return r
f(None)
f(1)
f((2,))
f(StopIteration(3))
self.assertEqual(trace, [
"g starting",
"f sending spam to g",
"g received 'spam'",
"g returning None",
"f caught StopIteration()",
"g starting",
"f sending spam to g",
"g received 'spam'",
"g returning 1",
'f caught StopIteration(1)',
'g starting',
'f sending spam to g',
"g received 'spam'",
'g returning (2,)',
'f caught StopIteration((2,))',
'g starting',
'f sending spam to g',
"g received 'spam'",
'g returning StopIteration(3)',
'f caught StopIteration(StopIteration(3))'
])
def test_catching_exception_from_subgen_and_returning(self):
"""
Test catching an exception thrown into a
subgenerator and returning a value
"""
def inner():
try:
yield 1
except ValueError:
trace.append("inner caught ValueError")
return value
def outer():
v = yield from inner()
trace.append("inner returned %r to outer" % (v,))
yield v
for value in 2, (2,), StopIteration(2):
trace = []
g = outer()
trace.append(next(g))
trace.append(repr(g.throw(ValueError)))
self.assertEqual(trace, [
1,
"inner caught ValueError",
"inner returned %r to outer" % (value,),
repr(value),
])
def test_throwing_GeneratorExit_into_subgen_that_returns(self):
"""
Test throwing GeneratorExit into a subgenerator that
catches it and returns normally.
"""
trace = []
def f():
try:
trace.append("Enter f")
yield
trace.append("Exit f")
except GeneratorExit:
return
def g():
trace.append("Enter g")
yield from f()
trace.append("Exit g")
try:
gi = g()
next(gi)
gi.throw(GeneratorExit)
except GeneratorExit:
pass
else:
self.fail("subgenerator failed to raise GeneratorExit")
self.assertEqual(trace,[
"Enter g",
"Enter f",
])
def test_throwing_GeneratorExit_into_subgenerator_that_yields(self):
"""
Test throwing GeneratorExit into a subgenerator that
catches it and yields.
"""
trace = []
def f():
try:
trace.append("Enter f")
yield
trace.append("Exit f")
except GeneratorExit:
yield
def g():
trace.append("Enter g")
yield from f()
trace.append("Exit g")
try:
gi = g()
next(gi)
gi.throw(GeneratorExit)
except RuntimeError as e:
self.assertEqual(e.args[0], "generator ignored GeneratorExit")
else:
self.fail("subgenerator failed to raise GeneratorExit")
self.assertEqual(trace,[
"Enter g",
"Enter f",
])
def test_throwing_GeneratorExit_into_subgen_that_raises(self):
"""
Test throwing GeneratorExit into a subgenerator that
catches it and raises a different exception.
"""
trace = []
def f():
try:
trace.append("Enter f")
yield
trace.append("Exit f")
except GeneratorExit:
raise ValueError("Vorpal bunny encountered")
def g():
trace.append("Enter g")
yield from f()
trace.append("Exit g")
try:
gi = g()
next(gi)
gi.throw(GeneratorExit)
except ValueError as e:
self.assertEqual(e.args[0], "Vorpal bunny encountered")
self.assertIsInstance(e.__context__, GeneratorExit)
else:
self.fail("subgenerator failed to raise ValueError")
self.assertEqual(trace,[
"Enter g",
"Enter f",
])
def test_yield_from_empty(self):
def g():
yield from ()
self.assertRaises(StopIteration, next, g())
def test_delegating_generators_claim_to_be_running(self):
# Check with basic iteration
def one():
yield 0
yield from two()
yield 3
def two():
yield 1
try:
yield from g1
except ValueError:
pass
yield 2
g1 = one()
self.assertEqual(list(g1), [0, 1, 2, 3])
# Check with send
g1 = one()
res = [next(g1)]
try:
while True:
res.append(g1.send(42))
except StopIteration:
pass
self.assertEqual(res, [0, 1, 2, 3])
# Check with throw
class MyErr(Exception):
pass
def one():
try:
yield 0
except MyErr:
pass
yield from two()
try:
yield 3
except MyErr:
pass
def two():
try:
yield 1
except MyErr:
pass
try:
yield from g1
except ValueError:
pass
try:
yield 2
except MyErr:
pass
g1 = one()
res = [next(g1)]
try:
while True:
res.append(g1.throw(MyErr))
except StopIteration:
pass
except:
self.assertEqual(res, [0, 1, 2, 3])
raise
# Check with close
class MyIt(object):
def __iter__(self):
return self
def __next__(self):
return 42
def close(self_):
self.assertTrue(g1.gi_running)
self.assertRaises(ValueError, next, g1)
def one():
yield from MyIt()
g1 = one()
next(g1)
g1.close()
def test_delegator_is_visible_to_debugger(self):
def call_stack():
return [f[3] for f in inspect.stack()]
def gen():
yield call_stack()
yield call_stack()
yield call_stack()
def spam(g):
yield from g
def eggs(g):
yield from g
for stack in spam(gen()):
self.assertTrue('spam' in stack)
for stack in spam(eggs(gen())):
self.assertTrue('spam' in stack and 'eggs' in stack)
def test_custom_iterator_return(self):
# See issue #15568
class MyIter:
def __iter__(self):
return self
def __next__(self):
raise StopIteration(42)
def gen():
nonlocal ret
ret = yield from MyIter()
ret = None
list(gen())
self.assertEqual(ret, 42)
def test_close_with_cleared_frame(self):
# See issue #17669.
#
# Create a stack of generators: outer() delegating to inner()
# delegating to innermost(). The key point is that the instance of
# inner is created first: this ensures that its frame appears before
# the instance of outer in the GC linked list.
#
# At the gc.collect call:
# - frame_clear is called on the inner_gen frame.
# - gen_dealloc is called on the outer_gen generator (the only
# reference is in the frame's locals).
# - gen_close is called on the outer_gen generator.
# - gen_close_iter is called to close the inner_gen generator, which
# in turn calls gen_close, and gen_yf.
#
# Previously, gen_yf would crash since inner_gen's frame had been
# cleared (and in particular f_stacktop was NULL).
def innermost():
yield
def inner():
outer_gen = yield
yield from innermost()
def outer():
inner_gen = yield
yield from inner_gen
with disable_gc():
inner_gen = inner()
outer_gen = outer()
outer_gen.send(None)
outer_gen.send(inner_gen)
outer_gen.send(outer_gen)
del outer_gen
del inner_gen
gc_collect()
def test_send_tuple_with_custom_generator(self):
# See issue #21209.
class MyGen:
def __iter__(self):
return self
def __next__(self):
return 42
def send(self, what):
nonlocal v
v = what
return None
def outer():
v = yield from MyGen()
g = outer()
next(g)
v = None
g.send((1, 2, 3, 4))
self.assertEqual(v, (1, 2, 3, 4))
if __name__ == '__main__':
unittest.main()
| 30.208531 | 99 | 0.448321 |
import unittest
import inspect
from test.support import captured_stderr, disable_gc, gc_collect
from test import support
class TestPEP380Operation(unittest.TestCase):
def test_delegation_of_initial_next_to_subgenerator(self):
trace = []
def g1():
trace.append("Starting g1")
yield from g2()
trace.append("Finishing g1")
def g2():
trace.append("Starting g2")
yield 42
trace.append("Finishing g2")
for x in g1():
trace.append("Yielded %s" % (x,))
self.assertEqual(trace,[
"Starting g1",
"Starting g2",
"Yielded 42",
"Finishing g2",
"Finishing g1",
])
def test_raising_exception_in_initial_next_call(self):
trace = []
def g1():
try:
trace.append("Starting g1")
yield from g2()
finally:
trace.append("Finishing g1")
def g2():
try:
trace.append("Starting g2")
raise ValueError("spanish inquisition occurred")
finally:
trace.append("Finishing g2")
try:
for x in g1():
trace.append("Yielded %s" % (x,))
except ValueError as e:
self.assertEqual(e.args[0], "spanish inquisition occurred")
else:
self.fail("subgenerator failed to raise ValueError")
self.assertEqual(trace,[
"Starting g1",
"Starting g2",
"Finishing g2",
"Finishing g1",
])
def test_delegation_of_next_call_to_subgenerator(self):
trace = []
def g1():
trace.append("Starting g1")
yield "g1 ham"
yield from g2()
yield "g1 eggs"
trace.append("Finishing g1")
def g2():
trace.append("Starting g2")
yield "g2 spam"
yield "g2 more spam"
trace.append("Finishing g2")
for x in g1():
trace.append("Yielded %s" % (x,))
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Yielded g2 more spam",
"Finishing g2",
"Yielded g1 eggs",
"Finishing g1",
])
def test_raising_exception_in_delegated_next_call(self):
trace = []
def g1():
try:
trace.append("Starting g1")
yield "g1 ham"
yield from g2()
yield "g1 eggs"
finally:
trace.append("Finishing g1")
def g2():
try:
trace.append("Starting g2")
yield "g2 spam"
raise ValueError("hovercraft is full of eels")
yield "g2 more spam"
finally:
trace.append("Finishing g2")
try:
for x in g1():
trace.append("Yielded %s" % (x,))
except ValueError as e:
self.assertEqual(e.args[0], "hovercraft is full of eels")
else:
self.fail("subgenerator failed to raise ValueError")
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Finishing g2",
"Finishing g1",
])
def test_delegation_of_send(self):
trace = []
def g1():
trace.append("Starting g1")
x = yield "g1 ham"
trace.append("g1 received %s" % (x,))
yield from g2()
x = yield "g1 eggs"
trace.append("g1 received %s" % (x,))
trace.append("Finishing g1")
def g2():
trace.append("Starting g2")
x = yield "g2 spam"
trace.append("g2 received %s" % (x,))
x = yield "g2 more spam"
trace.append("g2 received %s" % (x,))
trace.append("Finishing g2")
g = g1()
y = next(g)
x = 1
try:
while 1:
y = g.send(x)
trace.append("Yielded %s" % (y,))
x += 1
except StopIteration:
pass
self.assertEqual(trace,[
"Starting g1",
"g1 received 1",
"Starting g2",
"Yielded g2 spam",
"g2 received 2",
"Yielded g2 more spam",
"g2 received 3",
"Finishing g2",
"Yielded g1 eggs",
"g1 received 4",
"Finishing g1",
])
def test_handling_exception_while_delegating_send(self):
trace = []
def g1():
trace.append("Starting g1")
x = yield "g1 ham"
trace.append("g1 received %s" % (x,))
yield from g2()
x = yield "g1 eggs"
trace.append("g1 received %s" % (x,))
trace.append("Finishing g1")
def g2():
trace.append("Starting g2")
x = yield "g2 spam"
trace.append("g2 received %s" % (x,))
raise ValueError("hovercraft is full of eels")
x = yield "g2 more spam"
trace.append("g2 received %s" % (x,))
trace.append("Finishing g2")
def run():
g = g1()
y = next(g)
x = 1
try:
while 1:
y = g.send(x)
trace.append("Yielded %s" % (y,))
x += 1
except StopIteration:
trace.append("StopIteration")
self.assertRaises(ValueError,run)
self.assertEqual(trace,[
"Starting g1",
"g1 received 1",
"Starting g2",
"Yielded g2 spam",
"g2 received 2",
])
def test_delegating_close(self):
trace = []
def g1():
try:
trace.append("Starting g1")
yield "g1 ham"
yield from g2()
yield "g1 eggs"
finally:
trace.append("Finishing g1")
def g2():
try:
trace.append("Starting g2")
yield "g2 spam"
yield "g2 more spam"
finally:
trace.append("Finishing g2")
g = g1()
for i in range(2):
x = next(g)
trace.append("Yielded %s" % (x,))
g.close()
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Finishing g2",
"Finishing g1"
])
def test_handing_exception_while_delegating_close(self):
trace = []
def g1():
try:
trace.append("Starting g1")
yield "g1 ham"
yield from g2()
yield "g1 eggs"
finally:
trace.append("Finishing g1")
def g2():
try:
trace.append("Starting g2")
yield "g2 spam"
yield "g2 more spam"
finally:
trace.append("Finishing g2")
raise ValueError("nybbles have exploded with delight")
try:
g = g1()
for i in range(2):
x = next(g)
trace.append("Yielded %s" % (x,))
g.close()
except ValueError as e:
self.assertEqual(e.args[0], "nybbles have exploded with delight")
self.assertIsInstance(e.__context__, GeneratorExit)
else:
self.fail("subgenerator failed to raise ValueError")
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Finishing g2",
"Finishing g1",
])
def test_delegating_throw(self):
trace = []
def g1():
try:
trace.append("Starting g1")
yield "g1 ham"
yield from g2()
yield "g1 eggs"
finally:
trace.append("Finishing g1")
def g2():
try:
trace.append("Starting g2")
yield "g2 spam"
yield "g2 more spam"
finally:
trace.append("Finishing g2")
try:
g = g1()
for i in range(2):
x = next(g)
trace.append("Yielded %s" % (x,))
e = ValueError("tomato ejected")
g.throw(e)
except ValueError as e:
self.assertEqual(e.args[0], "tomato ejected")
else:
self.fail("subgenerator failed to raise ValueError")
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Finishing g2",
"Finishing g1",
])
def test_value_attribute_of_StopIteration_exception(self):
trace = []
def pex(e):
trace.append("%s: %s" % (e.__class__.__name__, e))
trace.append("value = %s" % (e.value,))
e = StopIteration()
pex(e)
e = StopIteration("spam")
pex(e)
e.value = "eggs"
pex(e)
self.assertEqual(trace,[
"StopIteration: ",
"value = None",
"StopIteration: spam",
"value = spam",
"StopIteration: spam",
"value = eggs",
])
def test_exception_value_crash(self):
def g1():
yield from g2()
def g2():
yield "g2"
return [42]
self.assertEqual(list(g1()), ["g2"])
def test_generator_return_value(self):
trace = []
def g1():
trace.append("Starting g1")
yield "g1 ham"
ret = yield from g2()
trace.append("g2 returned %r" % (ret,))
for v in 1, (2,), StopIteration(3):
ret = yield from g2(v)
trace.append("g2 returned %r" % (ret,))
yield "g1 eggs"
trace.append("Finishing g1")
def g2(v = None):
trace.append("Starting g2")
yield "g2 spam"
yield "g2 more spam"
trace.append("Finishing g2")
if v:
return v
for x in g1():
trace.append("Yielded %s" % (x,))
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Yielded g2 more spam",
"Finishing g2",
"g2 returned None",
"Starting g2",
"Yielded g2 spam",
"Yielded g2 more spam",
"Finishing g2",
"g2 returned 1",
"Starting g2",
"Yielded g2 spam",
"Yielded g2 more spam",
"Finishing g2",
"g2 returned (2,)",
"Starting g2",
"Yielded g2 spam",
"Yielded g2 more spam",
"Finishing g2",
"g2 returned StopIteration(3)",
"Yielded g1 eggs",
"Finishing g1",
])
def test_delegation_of_next_to_non_generator(self):
trace = []
def g():
yield from range(3)
for x in g():
trace.append("Yielded %s" % (x,))
self.assertEqual(trace,[
"Yielded 0",
"Yielded 1",
"Yielded 2",
])
def test_conversion_of_sendNone_to_next(self):
trace = []
def g():
yield from range(3)
gi = g()
for x in range(3):
y = gi.send(None)
trace.append("Yielded: %s" % (y,))
self.assertEqual(trace,[
"Yielded: 0",
"Yielded: 1",
"Yielded: 2",
])
def test_delegation_of_close_to_non_generator(self):
trace = []
def g():
try:
trace.append("starting g")
yield from range(3)
trace.append("g should not be here")
finally:
trace.append("finishing g")
gi = g()
next(gi)
with captured_stderr() as output:
gi.close()
self.assertEqual(output.getvalue(), '')
self.assertEqual(trace,[
"starting g",
"finishing g",
])
def test_delegating_throw_to_non_generator(self):
trace = []
def g():
try:
trace.append("Starting g")
yield from range(10)
finally:
trace.append("Finishing g")
try:
gi = g()
for i in range(5):
x = next(gi)
trace.append("Yielded %s" % (x,))
e = ValueError("tomato ejected")
gi.throw(e)
except ValueError as e:
self.assertEqual(e.args[0],"tomato ejected")
else:
self.fail("subgenerator failed to raise ValueError")
self.assertEqual(trace,[
"Starting g",
"Yielded 0",
"Yielded 1",
"Yielded 2",
"Yielded 3",
"Yielded 4",
"Finishing g",
])
def test_attempting_to_send_to_non_generator(self):
trace = []
def g():
try:
trace.append("starting g")
yield from range(3)
trace.append("g should not be here")
finally:
trace.append("finishing g")
try:
gi = g()
next(gi)
for x in range(3):
y = gi.send(42)
trace.append("Should not have yielded: %s" % (y,))
except AttributeError as e:
self.assertIn("send", e.args[0])
else:
self.fail("was able to send into non-generator")
self.assertEqual(trace,[
"starting g",
"finishing g",
])
def test_broken_getattr_handling(self):
class Broken:
def __iter__(self):
return self
def __next__(self):
return 1
def __getattr__(self, attr):
1/0
def g():
yield from Broken()
with self.assertRaises(ZeroDivisionError):
gi = g()
self.assertEqual(next(gi), 1)
gi.send(1)
with self.assertRaises(ZeroDivisionError):
gi = g()
self.assertEqual(next(gi), 1)
gi.throw(AttributeError)
with support.catch_unraisable_exception() as cm:
gi = g()
self.assertEqual(next(gi), 1)
gi.close()
self.assertEqual(ZeroDivisionError, cm.unraisable.exc_type)
def test_exception_in_initial_next_call(self):
trace = []
def g1():
trace.append("g1 about to yield from g2")
yield from g2()
trace.append("g1 should not be here")
def g2():
yield 1/0
def run():
gi = g1()
next(gi)
self.assertRaises(ZeroDivisionError,run)
self.assertEqual(trace,[
"g1 about to yield from g2"
])
def test_attempted_yield_from_loop(self):
trace = []
def g1():
trace.append("g1: starting")
yield "y1"
trace.append("g1: about to yield from g2")
yield from g2()
trace.append("g1 should not be here")
def g2():
trace.append("g2: starting")
yield "y2"
trace.append("g2: about to yield from g1")
yield from gi
trace.append("g2 should not be here")
try:
gi = g1()
for y in gi:
trace.append("Yielded: %s" % (y,))
except ValueError as e:
self.assertEqual(e.args[0],"generator already executing")
else:
self.fail("subgenerator didn't raise ValueError")
self.assertEqual(trace,[
"g1: starting",
"Yielded: y1",
"g1: about to yield from g2",
"g2: starting",
"Yielded: y2",
"g2: about to yield from g1",
])
def test_returning_value_from_delegated_throw(self):
trace = []
def g1():
try:
trace.append("Starting g1")
yield "g1 ham"
yield from g2()
yield "g1 eggs"
finally:
trace.append("Finishing g1")
def g2():
try:
trace.append("Starting g2")
yield "g2 spam"
yield "g2 more spam"
except LunchError:
trace.append("Caught LunchError in g2")
yield "g2 lunch saved"
yield "g2 yet more spam"
class LunchError(Exception):
pass
g = g1()
for i in range(2):
x = next(g)
trace.append("Yielded %s" % (x,))
e = LunchError("tomato ejected")
g.throw(e)
for x in g:
trace.append("Yielded %s" % (x,))
self.assertEqual(trace,[
"Starting g1",
"Yielded g1 ham",
"Starting g2",
"Yielded g2 spam",
"Caught LunchError in g2",
"Yielded g2 yet more spam",
"Yielded g1 eggs",
"Finishing g1",
])
def test_next_and_return_with_value(self):
trace = []
def f(r):
gi = g(r)
next(gi)
try:
trace.append("f resuming g")
next(gi)
trace.append("f SHOULD NOT BE HERE")
except StopIteration as e:
trace.append("f caught %r" % (e,))
def g(r):
trace.append("g starting")
yield
trace.append("g returning %r" % (r,))
return r
f(None)
f(1)
f((2,))
f(StopIteration(3))
self.assertEqual(trace,[
"g starting",
"f resuming g",
"g returning None",
"f caught StopIteration()",
"g starting",
"f resuming g",
"g returning 1",
"f caught StopIteration(1)",
"g starting",
"f resuming g",
"g returning (2,)",
"f caught StopIteration((2,))",
"g starting",
"f resuming g",
"g returning StopIteration(3)",
"f caught StopIteration(StopIteration(3))",
])
def test_send_and_return_with_value(self):
trace = []
def f(r):
gi = g(r)
next(gi)
try:
trace.append("f sending spam to g")
gi.send("spam")
trace.append("f SHOULD NOT BE HERE")
except StopIteration as e:
trace.append("f caught %r" % (e,))
def g(r):
trace.append("g starting")
x = yield
trace.append("g received %r" % (x,))
trace.append("g returning %r" % (r,))
return r
f(None)
f(1)
f((2,))
f(StopIteration(3))
self.assertEqual(trace, [
"g starting",
"f sending spam to g",
"g received 'spam'",
"g returning None",
"f caught StopIteration()",
"g starting",
"f sending spam to g",
"g received 'spam'",
"g returning 1",
'f caught StopIteration(1)',
'g starting',
'f sending spam to g',
"g received 'spam'",
'g returning (2,)',
'f caught StopIteration((2,))',
'g starting',
'f sending spam to g',
"g received 'spam'",
'g returning StopIteration(3)',
'f caught StopIteration(StopIteration(3))'
])
def test_catching_exception_from_subgen_and_returning(self):
def inner():
try:
yield 1
except ValueError:
trace.append("inner caught ValueError")
return value
def outer():
v = yield from inner()
trace.append("inner returned %r to outer" % (v,))
yield v
for value in 2, (2,), StopIteration(2):
trace = []
g = outer()
trace.append(next(g))
trace.append(repr(g.throw(ValueError)))
self.assertEqual(trace, [
1,
"inner caught ValueError",
"inner returned %r to outer" % (value,),
repr(value),
])
def test_throwing_GeneratorExit_into_subgen_that_returns(self):
trace = []
def f():
try:
trace.append("Enter f")
yield
trace.append("Exit f")
except GeneratorExit:
return
def g():
trace.append("Enter g")
yield from f()
trace.append("Exit g")
try:
gi = g()
next(gi)
gi.throw(GeneratorExit)
except GeneratorExit:
pass
else:
self.fail("subgenerator failed to raise GeneratorExit")
self.assertEqual(trace,[
"Enter g",
"Enter f",
])
def test_throwing_GeneratorExit_into_subgenerator_that_yields(self):
trace = []
def f():
try:
trace.append("Enter f")
yield
trace.append("Exit f")
except GeneratorExit:
yield
def g():
trace.append("Enter g")
yield from f()
trace.append("Exit g")
try:
gi = g()
next(gi)
gi.throw(GeneratorExit)
except RuntimeError as e:
self.assertEqual(e.args[0], "generator ignored GeneratorExit")
else:
self.fail("subgenerator failed to raise GeneratorExit")
self.assertEqual(trace,[
"Enter g",
"Enter f",
])
def test_throwing_GeneratorExit_into_subgen_that_raises(self):
trace = []
def f():
try:
trace.append("Enter f")
yield
trace.append("Exit f")
except GeneratorExit:
raise ValueError("Vorpal bunny encountered")
def g():
trace.append("Enter g")
yield from f()
trace.append("Exit g")
try:
gi = g()
next(gi)
gi.throw(GeneratorExit)
except ValueError as e:
self.assertEqual(e.args[0], "Vorpal bunny encountered")
self.assertIsInstance(e.__context__, GeneratorExit)
else:
self.fail("subgenerator failed to raise ValueError")
self.assertEqual(trace,[
"Enter g",
"Enter f",
])
def test_yield_from_empty(self):
def g():
yield from ()
self.assertRaises(StopIteration, next, g())
def test_delegating_generators_claim_to_be_running(self):
# Check with basic iteration
def one():
yield 0
yield from two()
yield 3
def two():
yield 1
try:
yield from g1
except ValueError:
pass
yield 2
g1 = one()
self.assertEqual(list(g1), [0, 1, 2, 3])
# Check with send
g1 = one()
res = [next(g1)]
try:
while True:
res.append(g1.send(42))
except StopIteration:
pass
self.assertEqual(res, [0, 1, 2, 3])
# Check with throw
class MyErr(Exception):
pass
def one():
try:
yield 0
except MyErr:
pass
yield from two()
try:
yield 3
except MyErr:
pass
def two():
try:
yield 1
except MyErr:
pass
try:
yield from g1
except ValueError:
pass
try:
yield 2
except MyErr:
pass
g1 = one()
res = [next(g1)]
try:
while True:
res.append(g1.throw(MyErr))
except StopIteration:
pass
except:
self.assertEqual(res, [0, 1, 2, 3])
raise
# Check with close
class MyIt(object):
def __iter__(self):
return self
def __next__(self):
return 42
def close(self_):
self.assertTrue(g1.gi_running)
self.assertRaises(ValueError, next, g1)
def one():
yield from MyIt()
g1 = one()
next(g1)
g1.close()
def test_delegator_is_visible_to_debugger(self):
def call_stack():
return [f[3] for f in inspect.stack()]
def gen():
yield call_stack()
yield call_stack()
yield call_stack()
def spam(g):
yield from g
def eggs(g):
yield from g
for stack in spam(gen()):
self.assertTrue('spam' in stack)
for stack in spam(eggs(gen())):
self.assertTrue('spam' in stack and 'eggs' in stack)
def test_custom_iterator_return(self):
# See issue #15568
class MyIter:
def __iter__(self):
return self
def __next__(self):
raise StopIteration(42)
def gen():
nonlocal ret
ret = yield from MyIter()
ret = None
list(gen())
self.assertEqual(ret, 42)
def test_close_with_cleared_frame(self):
# See issue #17669.
#
# Create a stack of generators: outer() delegating to inner()
# delegating to innermost(). The key point is that the instance of
# inner is created first: this ensures that its frame appears before
# the instance of outer in the GC linked list.
#
# At the gc.collect call:
# - frame_clear is called on the inner_gen frame.
# - gen_dealloc is called on the outer_gen generator (the only
# reference is in the frame's locals).
# cleared (and in particular f_stacktop was NULL).
def innermost():
yield
def inner():
outer_gen = yield
yield from innermost()
def outer():
inner_gen = yield
yield from inner_gen
with disable_gc():
inner_gen = inner()
outer_gen = outer()
outer_gen.send(None)
outer_gen.send(inner_gen)
outer_gen.send(outer_gen)
del outer_gen
del inner_gen
gc_collect()
def test_send_tuple_with_custom_generator(self):
# See issue #21209.
class MyGen:
def __iter__(self):
return self
def __next__(self):
return 42
def send(self, what):
nonlocal v
v = what
return None
def outer():
v = yield from MyGen()
g = outer()
next(g)
v = None
g.send((1, 2, 3, 4))
self.assertEqual(v, (1, 2, 3, 4))
if __name__ == '__main__':
unittest.main()
| true | true |
f73eb22187c5538329154142b0de18d663b8e20d | 2,766 | py | Python | e3a.py | advaithca/CG_LAB | 07c4424be2f37d21ed7af804361f0a992a8124ac | [
"MIT"
] | null | null | null | e3a.py | advaithca/CG_LAB | 07c4424be2f37d21ed7af804361f0a992a8124ac | [
"MIT"
] | null | null | null | e3a.py | advaithca/CG_LAB | 07c4424be2f37d21ed7af804361f0a992a8124ac | [
"MIT"
] | null | null | null | # menu driven program to draw a circle using
# A) Mid point circle drawing algorithm
# B) Polar circle generation algorithm
# C) Non-Polar circle generation algorithm
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import sys
from math import pi, sin, cos, sqrt
xc = 0
yc = 0
r = 0
def init():
glClearColor(1.0,2.0,1.0,1.0)
gluOrtho2D(-100.0,100.0,-100.0,100.0)
def midpoint():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0,0.0,0.0)
glPointSize(5.0)
glBegin(GL_POINTS)
x = 0
y = r
glVertex2f(x+xc, y+yc)
p = 5/4 - r
while x < y:
x += 1
if p < 0:
p += 2*x + 1
else:
y -= 1
p += 2 * (x - y) + 1
glVertex2f(x+xc,y+yc)
glVertex2f(-x+xc,-y+yc)
glVertex2f(x+xc,-y+yc)
glVertex2f(-x+xc,y+yc)
glVertex2f(y+xc,x+yc)
glVertex2f(-y+xc,-x+yc)
glVertex2f(-y+xc,x+yc)
glVertex2f(y+xc,-x+yc)
glEnd()
glFlush()
def pol():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0,0.0,0.0)
glPointSize(5.0)
glBegin(GL_POINTS)
theta = 0
for i in range(0,360):
theta = i * 180 / pi
x = xc + r * cos(theta)
y = yc + r * sin(theta)
glVertex2f(x,y)
glEnd()
glFlush()
def npol():
global xc, yc, r
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0,0.0,0.0)
glPointSize(5.0)
glBegin(GL_POINTS)
for i in range(0,1000):
y = i
d = abs(r**2-(y)**2)
x = sqrt(d)
glVertex2f(x+xc,y+yc)
glEnd()
glFlush()
def main():
global xc, yc, r
t = True
while t:
print("\n\nDraw a Circle using : ")
print("1. Mid-Point circle drawing algorithm")
print("2. Polar circle generation algorithm")
print("3. Non-Polar circle generation algorithm")
print("4. Exit")
ch = int(input("\n Your Choice :: "))
global r, xc, yc
r = float(input("Enter radius : "))
xc = float(input("Enter X co-ordinate of the centre : "))
yc = float(input("Enter Y co-ordinate of the centre : "))
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB)
glutInitWindowSize(500,500)
glutInitWindowPosition(50,50)
glutCreateWindow("Drawing a Circle")
if ch == 1:
glutDisplayFunc(midpoint)
elif ch == 2:
glutDisplayFunc(pol)
elif ch == 3:
glutDisplayFunc(npol)
else:
t = False
init()
glutMainLoop()
print("Ending...")
if __name__ == "__main__":
main() | 24.263158 | 66 | 0.519523 |
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import sys
from math import pi, sin, cos, sqrt
xc = 0
yc = 0
r = 0
def init():
glClearColor(1.0,2.0,1.0,1.0)
gluOrtho2D(-100.0,100.0,-100.0,100.0)
def midpoint():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0,0.0,0.0)
glPointSize(5.0)
glBegin(GL_POINTS)
x = 0
y = r
glVertex2f(x+xc, y+yc)
p = 5/4 - r
while x < y:
x += 1
if p < 0:
p += 2*x + 1
else:
y -= 1
p += 2 * (x - y) + 1
glVertex2f(x+xc,y+yc)
glVertex2f(-x+xc,-y+yc)
glVertex2f(x+xc,-y+yc)
glVertex2f(-x+xc,y+yc)
glVertex2f(y+xc,x+yc)
glVertex2f(-y+xc,-x+yc)
glVertex2f(-y+xc,x+yc)
glVertex2f(y+xc,-x+yc)
glEnd()
glFlush()
def pol():
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0,0.0,0.0)
glPointSize(5.0)
glBegin(GL_POINTS)
theta = 0
for i in range(0,360):
theta = i * 180 / pi
x = xc + r * cos(theta)
y = yc + r * sin(theta)
glVertex2f(x,y)
glEnd()
glFlush()
def npol():
global xc, yc, r
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0,0.0,0.0)
glPointSize(5.0)
glBegin(GL_POINTS)
for i in range(0,1000):
y = i
d = abs(r**2-(y)**2)
x = sqrt(d)
glVertex2f(x+xc,y+yc)
glEnd()
glFlush()
def main():
global xc, yc, r
t = True
while t:
print("\n\nDraw a Circle using : ")
print("1. Mid-Point circle drawing algorithm")
print("2. Polar circle generation algorithm")
print("3. Non-Polar circle generation algorithm")
print("4. Exit")
ch = int(input("\n Your Choice :: "))
global r, xc, yc
r = float(input("Enter radius : "))
xc = float(input("Enter X co-ordinate of the centre : "))
yc = float(input("Enter Y co-ordinate of the centre : "))
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB)
glutInitWindowSize(500,500)
glutInitWindowPosition(50,50)
glutCreateWindow("Drawing a Circle")
if ch == 1:
glutDisplayFunc(midpoint)
elif ch == 2:
glutDisplayFunc(pol)
elif ch == 3:
glutDisplayFunc(npol)
else:
t = False
init()
glutMainLoop()
print("Ending...")
if __name__ == "__main__":
main() | true | true |
f73eb24dc43f550434f4ca9f74dd5e0ac82c4443 | 9,859 | py | Python | packages/core/minos-microservice-common/tests/test_common/test_config/test_v1/test_base.py | bhardwajRahul/minos-python | bad7a280ad92680abdeab01d1214688279cf6316 | [
"MIT"
] | null | null | null | packages/core/minos-microservice-common/tests/test_common/test_config/test_v1/test_base.py | bhardwajRahul/minos-python | bad7a280ad92680abdeab01d1214688279cf6316 | [
"MIT"
] | null | null | null | packages/core/minos-microservice-common/tests/test_common/test_config/test_v1/test_base.py | bhardwajRahul/minos-python | bad7a280ad92680abdeab01d1214688279cf6316 | [
"MIT"
] | null | null | null | import unittest
from unittest.mock import (
patch,
)
from minos.common import (
Config,
ConfigV1,
MinosConfigException,
)
from tests.utils import (
BASE_PATH,
FakeBrokerClientPool,
FakeBrokerPort,
FakeBrokerPublisher,
FakeBrokerSubscriberBuilder,
FakeCustomInjection,
FakeDatabasePool,
FakeDiscoveryConnector,
FakeEventRepository,
FakeHttpConnector,
FakeHttpPort,
FakeLockPool,
FakePeriodicPort,
FakeSagaManager,
FakeSnapshotRepository,
FakeTransactionRepository,
)
class TestConfigV1(unittest.TestCase):
def setUp(self) -> None:
self.file_path = BASE_PATH / "config" / "v1.yml"
self.config = ConfigV1(self.file_path)
def test_is_subclass(self):
self.assertTrue(issubclass(ConfigV1, Config))
def test_aggregate(self):
expected = {
"entities": [int],
"repositories": dict(),
}
self.assertEqual(expected, self.config.get_aggregate())
def test_version(self):
self.assertEqual(1, self.config.version)
def test_name(self):
self.assertEqual("Order", self.config.get_name())
def test_injections(self):
expected = [
FakeLockPool,
FakeDatabasePool,
FakeBrokerClientPool,
FakeHttpConnector,
FakeBrokerPublisher,
FakeBrokerSubscriberBuilder,
FakeEventRepository,
FakeSnapshotRepository,
FakeTransactionRepository,
FakeDiscoveryConnector,
FakeSagaManager,
FakeCustomInjection,
]
self.assertEqual(expected, self.config.get_injections())
def test_injections_not_defined(self):
with patch.object(ConfigV1, "get_by_key", side_effect=MinosConfigException("")):
self.assertEqual(list(), self.config.get_injections())
def test_interface_http(self):
observed = self.config.get_interface_by_name("http")
expected = {
"port": FakeHttpPort,
"connector": {
"host": "localhost",
"port": 8900,
},
}
self.assertEqual(expected, observed)
def test_interface_http_connector_not_defined(self):
base = self.config.get_by_key
def _fn(label):
if label == "rest":
raise MinosConfigException("")
return base(label)
with patch.object(ConfigV1, "get_by_key", side_effect=_fn):
observed = self.config.get_interface_by_name("http")
expected = {
"port": FakeHttpPort,
"connector": {},
}
self.assertEqual(expected, observed)
def test_interface_http_not_defined(self):
with patch.object(ConfigV1, "get_by_key", side_effect=MinosConfigException("")):
with self.assertRaises(MinosConfigException):
self.config.get_interface_by_name("http")
def test_interface_broker(self):
config = ConfigV1(self.file_path, with_environment=False)
broker = config.get_interface_by_name("broker")
expected = {
"port": FakeBrokerPort,
"common": {
"host": "localhost",
"port": 9092,
"queue": {"records": 10, "retry": 2},
},
"publisher": {},
"subscriber": {},
}
self.assertEqual(expected, broker)
def test_interface_broker_section_not_defined(self):
base = self.config.get_by_key
def _fn(label):
if label == "broker":
raise MinosConfigException("")
return base(label)
with patch.object(ConfigV1, "get_by_key", side_effect=_fn):
observed = self.config.get_interface_by_name("broker")
expected = {
"port": FakeBrokerPort,
"common": {
"queue": {"records": 10, "retry": 2},
},
"publisher": {},
"subscriber": {},
}
self.assertEqual(expected, observed)
def test_interface_broker_queue_not_defined(self):
base = self.config.get_by_key
def _fn(label):
if label == "broker.queue":
raise MinosConfigException("")
return base(label)
with patch.object(ConfigV1, "get_by_key", side_effect=_fn):
observed = self.config.get_interface_by_name("broker")
expected = {
"port": FakeBrokerPort,
"common": {"host": "localhost", "port": 9092, "queue": {}},
"publisher": {},
"subscriber": {},
}
self.assertEqual(expected, observed)
def test_interface_broker_not_defined(self):
with patch.object(ConfigV1, "get_by_key", side_effect=MinosConfigException("")):
with self.assertRaises(MinosConfigException):
self.config.get_interface_by_name("broker")
def test_interface_periodic(self):
observed = self.config.get_interface_by_name("periodic")
expected = {
"port": FakePeriodicPort,
}
self.assertEqual(expected, observed)
def test_interface_periodic_not_defined(self):
with patch.object(ConfigV1, "get_by_key", side_effect=MinosConfigException("")):
with self.assertRaises(MinosConfigException):
self.config.get_interface_by_name("periodic")
def test_interface_unknown(self):
config = ConfigV1(self.file_path, with_environment=False)
with self.assertRaises(MinosConfigException):
config.get_interface_by_name("unknown")
def test_pools(self):
self.assertEqual(dict(), self.config.get_pools())
def test_services(self):
self.assertEqual([float, int], self.config.get_services())
def test_services_not_defined(self):
with patch.object(ConfigV1, "get_by_key", side_effect=MinosConfigException("")):
self.assertEqual(list(), self.config.get_services())
def test_routers(self):
self.assertEqual([set, dict], self.config.get_routers())
def test_routers_not_defined(self):
with patch.object(ConfigV1, "get_by_key", side_effect=MinosConfigException("")):
self.assertEqual(list(), self.config.get_routers())
def test_middleware(self):
self.assertEqual([list, tuple], self.config.get_middleware())
def test_middleware_not_defined(self):
with patch.object(ConfigV1, "get_by_key", side_effect=MinosConfigException("")):
self.assertEqual(list(), self.config.get_middleware())
def test_saga(self):
config = ConfigV1(self.file_path, with_environment=False)
saga_config = config.get_saga()
expected = dict()
self.assertEqual(expected, saga_config)
def test_saga_not_defined(self):
with patch.object(ConfigV1, "get_by_key", side_effect=MinosConfigException("")):
self.assertEqual(dict(), self.config.get_saga())
def test_database_default(self):
config = ConfigV1(self.file_path, with_environment=False)
database_config = config.get_default_database()
self.assertEqual("order_db", database_config["database"])
self.assertEqual("minos", database_config["user"])
self.assertEqual("min0s", database_config["password"])
self.assertEqual("localhost", database_config["host"])
self.assertEqual(5432, database_config["port"])
def test_database_event(self):
config = ConfigV1(self.file_path, with_environment=False)
database_config = config.get_database_by_name("event")
self.assertEqual("order_db", database_config["database"])
self.assertEqual("minos", database_config["user"])
self.assertEqual("min0s", database_config["password"])
self.assertEqual("localhost", database_config["host"])
self.assertEqual(5432, database_config["port"])
def test_database_query(self):
config = ConfigV1(self.file_path, with_environment=False)
query_database = config.get_database_by_name("query")
self.assertEqual("order_query_db", query_database["database"])
self.assertEqual("minos", query_database["user"])
self.assertEqual("min0s", query_database["password"])
self.assertEqual("localhost", query_database["host"])
self.assertEqual(5432, query_database["port"])
def test_database_snapshot(self):
config = ConfigV1(self.file_path, with_environment=False)
snapshot = config.get_database_by_name("snapshot")
self.assertEqual("order_db", snapshot["database"])
self.assertEqual("minos", snapshot["user"])
self.assertEqual("min0s", snapshot["password"])
self.assertEqual("localhost", snapshot["host"])
self.assertEqual(5432, snapshot["port"])
def test_database_broker(self):
config = ConfigV1(self.file_path, with_environment=False)
snapshot = config.get_database_by_name("broker")
self.assertEqual("order_db", snapshot["database"])
self.assertEqual("minos", snapshot["user"])
self.assertEqual("min0s", snapshot["password"])
self.assertEqual("localhost", snapshot["host"])
self.assertEqual(5432, snapshot["port"])
def test_database_saga(self):
config = ConfigV1(self.file_path, with_environment=False)
saga = config.get_database_by_name("saga")
self.assertEqual("./order.lmdb", saga["path"])
def test_discovery(self):
config = ConfigV1(self.file_path, with_environment=False)
observed = config.get_discovery()
expected = {
"client": str,
"host": "localhost",
"port": 8080,
}
self.assertEqual(expected, observed)
if __name__ == "__main__":
unittest.main()
| 34.351916 | 88 | 0.632823 | import unittest
from unittest.mock import (
patch,
)
from minos.common import (
Config,
ConfigV1,
MinosConfigException,
)
from tests.utils import (
BASE_PATH,
FakeBrokerClientPool,
FakeBrokerPort,
FakeBrokerPublisher,
FakeBrokerSubscriberBuilder,
FakeCustomInjection,
FakeDatabasePool,
FakeDiscoveryConnector,
FakeEventRepository,
FakeHttpConnector,
FakeHttpPort,
FakeLockPool,
FakePeriodicPort,
FakeSagaManager,
FakeSnapshotRepository,
FakeTransactionRepository,
)
class TestConfigV1(unittest.TestCase):
def setUp(self) -> None:
self.file_path = BASE_PATH / "config" / "v1.yml"
self.config = ConfigV1(self.file_path)
def test_is_subclass(self):
self.assertTrue(issubclass(ConfigV1, Config))
def test_aggregate(self):
expected = {
"entities": [int],
"repositories": dict(),
}
self.assertEqual(expected, self.config.get_aggregate())
def test_version(self):
self.assertEqual(1, self.config.version)
def test_name(self):
self.assertEqual("Order", self.config.get_name())
def test_injections(self):
expected = [
FakeLockPool,
FakeDatabasePool,
FakeBrokerClientPool,
FakeHttpConnector,
FakeBrokerPublisher,
FakeBrokerSubscriberBuilder,
FakeEventRepository,
FakeSnapshotRepository,
FakeTransactionRepository,
FakeDiscoveryConnector,
FakeSagaManager,
FakeCustomInjection,
]
self.assertEqual(expected, self.config.get_injections())
def test_injections_not_defined(self):
with patch.object(ConfigV1, "get_by_key", side_effect=MinosConfigException("")):
self.assertEqual(list(), self.config.get_injections())
def test_interface_http(self):
observed = self.config.get_interface_by_name("http")
expected = {
"port": FakeHttpPort,
"connector": {
"host": "localhost",
"port": 8900,
},
}
self.assertEqual(expected, observed)
def test_interface_http_connector_not_defined(self):
base = self.config.get_by_key
def _fn(label):
if label == "rest":
raise MinosConfigException("")
return base(label)
with patch.object(ConfigV1, "get_by_key", side_effect=_fn):
observed = self.config.get_interface_by_name("http")
expected = {
"port": FakeHttpPort,
"connector": {},
}
self.assertEqual(expected, observed)
def test_interface_http_not_defined(self):
with patch.object(ConfigV1, "get_by_key", side_effect=MinosConfigException("")):
with self.assertRaises(MinosConfigException):
self.config.get_interface_by_name("http")
def test_interface_broker(self):
config = ConfigV1(self.file_path, with_environment=False)
broker = config.get_interface_by_name("broker")
expected = {
"port": FakeBrokerPort,
"common": {
"host": "localhost",
"port": 9092,
"queue": {"records": 10, "retry": 2},
},
"publisher": {},
"subscriber": {},
}
self.assertEqual(expected, broker)
def test_interface_broker_section_not_defined(self):
base = self.config.get_by_key
def _fn(label):
if label == "broker":
raise MinosConfigException("")
return base(label)
with patch.object(ConfigV1, "get_by_key", side_effect=_fn):
observed = self.config.get_interface_by_name("broker")
expected = {
"port": FakeBrokerPort,
"common": {
"queue": {"records": 10, "retry": 2},
},
"publisher": {},
"subscriber": {},
}
self.assertEqual(expected, observed)
def test_interface_broker_queue_not_defined(self):
base = self.config.get_by_key
def _fn(label):
if label == "broker.queue":
raise MinosConfigException("")
return base(label)
with patch.object(ConfigV1, "get_by_key", side_effect=_fn):
observed = self.config.get_interface_by_name("broker")
expected = {
"port": FakeBrokerPort,
"common": {"host": "localhost", "port": 9092, "queue": {}},
"publisher": {},
"subscriber": {},
}
self.assertEqual(expected, observed)
def test_interface_broker_not_defined(self):
with patch.object(ConfigV1, "get_by_key", side_effect=MinosConfigException("")):
with self.assertRaises(MinosConfigException):
self.config.get_interface_by_name("broker")
def test_interface_periodic(self):
observed = self.config.get_interface_by_name("periodic")
expected = {
"port": FakePeriodicPort,
}
self.assertEqual(expected, observed)
def test_interface_periodic_not_defined(self):
with patch.object(ConfigV1, "get_by_key", side_effect=MinosConfigException("")):
with self.assertRaises(MinosConfigException):
self.config.get_interface_by_name("periodic")
def test_interface_unknown(self):
config = ConfigV1(self.file_path, with_environment=False)
with self.assertRaises(MinosConfigException):
config.get_interface_by_name("unknown")
def test_pools(self):
self.assertEqual(dict(), self.config.get_pools())
def test_services(self):
self.assertEqual([float, int], self.config.get_services())
def test_services_not_defined(self):
with patch.object(ConfigV1, "get_by_key", side_effect=MinosConfigException("")):
self.assertEqual(list(), self.config.get_services())
def test_routers(self):
self.assertEqual([set, dict], self.config.get_routers())
def test_routers_not_defined(self):
with patch.object(ConfigV1, "get_by_key", side_effect=MinosConfigException("")):
self.assertEqual(list(), self.config.get_routers())
def test_middleware(self):
self.assertEqual([list, tuple], self.config.get_middleware())
def test_middleware_not_defined(self):
with patch.object(ConfigV1, "get_by_key", side_effect=MinosConfigException("")):
self.assertEqual(list(), self.config.get_middleware())
def test_saga(self):
config = ConfigV1(self.file_path, with_environment=False)
saga_config = config.get_saga()
expected = dict()
self.assertEqual(expected, saga_config)
def test_saga_not_defined(self):
with patch.object(ConfigV1, "get_by_key", side_effect=MinosConfigException("")):
self.assertEqual(dict(), self.config.get_saga())
def test_database_default(self):
config = ConfigV1(self.file_path, with_environment=False)
database_config = config.get_default_database()
self.assertEqual("order_db", database_config["database"])
self.assertEqual("minos", database_config["user"])
self.assertEqual("min0s", database_config["password"])
self.assertEqual("localhost", database_config["host"])
self.assertEqual(5432, database_config["port"])
def test_database_event(self):
config = ConfigV1(self.file_path, with_environment=False)
database_config = config.get_database_by_name("event")
self.assertEqual("order_db", database_config["database"])
self.assertEqual("minos", database_config["user"])
self.assertEqual("min0s", database_config["password"])
self.assertEqual("localhost", database_config["host"])
self.assertEqual(5432, database_config["port"])
def test_database_query(self):
config = ConfigV1(self.file_path, with_environment=False)
query_database = config.get_database_by_name("query")
self.assertEqual("order_query_db", query_database["database"])
self.assertEqual("minos", query_database["user"])
self.assertEqual("min0s", query_database["password"])
self.assertEqual("localhost", query_database["host"])
self.assertEqual(5432, query_database["port"])
def test_database_snapshot(self):
config = ConfigV1(self.file_path, with_environment=False)
snapshot = config.get_database_by_name("snapshot")
self.assertEqual("order_db", snapshot["database"])
self.assertEqual("minos", snapshot["user"])
self.assertEqual("min0s", snapshot["password"])
self.assertEqual("localhost", snapshot["host"])
self.assertEqual(5432, snapshot["port"])
def test_database_broker(self):
config = ConfigV1(self.file_path, with_environment=False)
snapshot = config.get_database_by_name("broker")
self.assertEqual("order_db", snapshot["database"])
self.assertEqual("minos", snapshot["user"])
self.assertEqual("min0s", snapshot["password"])
self.assertEqual("localhost", snapshot["host"])
self.assertEqual(5432, snapshot["port"])
def test_database_saga(self):
config = ConfigV1(self.file_path, with_environment=False)
saga = config.get_database_by_name("saga")
self.assertEqual("./order.lmdb", saga["path"])
def test_discovery(self):
config = ConfigV1(self.file_path, with_environment=False)
observed = config.get_discovery()
expected = {
"client": str,
"host": "localhost",
"port": 8080,
}
self.assertEqual(expected, observed)
if __name__ == "__main__":
unittest.main()
| true | true |
f73eb2876443a53efac68a5e544fb7980b20321f | 493 | py | Python | plotly/validators/layout/polar/radialaxis/tickformatstop/_templateitemname.py | omridanan/plotly.py | a8d26670cba49ce15ce9b7639ae0f55a6088a825 | [
"MIT"
] | 2 | 2020-03-24T11:41:14.000Z | 2021-01-14T07:59:43.000Z | plotly/validators/layout/polar/radialaxis/tickformatstop/_templateitemname.py | omridanan/plotly.py | a8d26670cba49ce15ce9b7639ae0f55a6088a825 | [
"MIT"
] | null | null | null | plotly/validators/layout/polar/radialaxis/tickformatstop/_templateitemname.py | omridanan/plotly.py | a8d26670cba49ce15ce9b7639ae0f55a6088a825 | [
"MIT"
] | 4 | 2019-06-03T14:49:12.000Z | 2022-01-06T01:05:12.000Z | import _plotly_utils.basevalidators
class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='templateitemname',
parent_name='layout.polar.radialaxis.tickformatstop',
**kwargs
):
super(TemplateitemnameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='plot',
role='info',
**kwargs
)
| 25.947368 | 78 | 0.63286 | import _plotly_utils.basevalidators
class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='templateitemname',
parent_name='layout.polar.radialaxis.tickformatstop',
**kwargs
):
super(TemplateitemnameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='plot',
role='info',
**kwargs
)
| true | true |
f73eb300672f802f2ebd72871418ec63ada26f73 | 1,371 | py | Python | construct/tests/test_actionparams.py | construct-org/construct | ac1211e6dedbf87be2fa42e3eb197c2acea6de68 | [
"MIT"
] | 8 | 2018-04-04T18:25:18.000Z | 2020-01-16T16:45:16.000Z | construct/tests/test_actionparams.py | construct-org/construct | ac1211e6dedbf87be2fa42e3eb197c2acea6de68 | [
"MIT"
] | 36 | 2018-02-23T23:53:39.000Z | 2020-01-15T19:38:57.000Z | construct/tests/test_actionparams.py | construct-org/construct | ac1211e6dedbf87be2fa42e3eb197c2acea6de68 | [
"MIT"
] | 1 | 2019-03-05T20:19:19.000Z | 2019-03-05T20:19:19.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from nose.tools import raises
from construct import actionparams
from construct.errors import ArgumentError
params_0 = dict()
params_1 = dict(
str_arg={
'label': 'String Argument',
'help': 'A String Argument',
'required': True,
'type': str
},
int_arg={
'label': 'Integer Argument',
'help': 'An Integer Argument',
'required': True,
'default': 1,
'type': int
},
float_arg={
'label': 'Float Argument',
'help': 'A Float Argument',
'required': False,
'default': 10.0,
'type': float
},
dict_arg={
'label': 'Dict Argument',
'help': 'A Dict Argument',
'required': True,
'type': dict
}
)
def test_validate_nada():
'''Validate empty params dict'''
actionparams.validate(params_0)
actionparams.validate(params_1)
@raises(ArgumentError)
def test_pass_args_to_empty_params():
'''Validate kwargs against empty params'''
actionparams.validate_kwargs(params_0, {'invalid': 'kwargs'})
@raises(ArgumentError)
def test_missing_required():
'''Validate kwargs with missing required argument'''
actionparams.validate_kwargs(
params_1,
{'str_arg': 'str', 'dict_arg': {}}
)
| 23.637931 | 65 | 0.606856 |
from __future__ import absolute_import, division, print_function
from nose.tools import raises
from construct import actionparams
from construct.errors import ArgumentError
params_0 = dict()
params_1 = dict(
str_arg={
'label': 'String Argument',
'help': 'A String Argument',
'required': True,
'type': str
},
int_arg={
'label': 'Integer Argument',
'help': 'An Integer Argument',
'required': True,
'default': 1,
'type': int
},
float_arg={
'label': 'Float Argument',
'help': 'A Float Argument',
'required': False,
'default': 10.0,
'type': float
},
dict_arg={
'label': 'Dict Argument',
'help': 'A Dict Argument',
'required': True,
'type': dict
}
)
def test_validate_nada():
actionparams.validate(params_0)
actionparams.validate(params_1)
@raises(ArgumentError)
def test_pass_args_to_empty_params():
actionparams.validate_kwargs(params_0, {'invalid': 'kwargs'})
@raises(ArgumentError)
def test_missing_required():
actionparams.validate_kwargs(
params_1,
{'str_arg': 'str', 'dict_arg': {}}
)
| true | true |
f73eb32cf11821a8db22abbfaf4672fb58c3ceac | 29,274 | py | Python | bananas/testing/learners.py | owahltinez/bananas | 4d37af1713b7f166ead3459a7004748f954d336e | [
"MIT"
] | null | null | null | bananas/testing/learners.py | owahltinez/bananas | 4d37af1713b7f166ead3459a7004748f954d336e | [
"MIT"
] | null | null | null | bananas/testing/learners.py | owahltinez/bananas | 4d37af1713b7f166ead3459a7004748f954d336e | [
"MIT"
] | null | null | null | """
This library comes with a comprehensive testing suite that checks for API compliance, input type
handling, change map handling, and more. Not all tests are run for every learner; tests specific
to certain estimators like [supervised learners](../core/index.md#supervised) or [transformers](
../transformers/index.md) are only run when the learner instance is of the corresponding type.
"""
import warnings
from inspect import signature, Parameter
from typing import Union
from unittest import TestCase, TestResult
from ..changemap.changemap import ChangeMap
from ..core.learner import Learner, SupervisedLearner, UnsupervisedLearner
from ..core.mixins import BaseClassifier, BaseRegressor
from ..dataset.dataset import DataSet
from ..training.train_history import TrainHistory
from ..transformers.base import BaseTransformer
from ..utils.arrays import check_array
from ..utils.misc import warn_with_traceback
from .generators import (
generate_array_booleans,
generate_array_chars,
generate_array_floats,
generate_array_ints,
generate_array_int_floats,
generate_array_uints,
generate_array_strings,
)
# Number of samples in the test data
TEST_SAMPLE_SIZE = 1024
def test_learner(learner_type: Union[type, Learner], **learner_kwargs):
"""
Performs a battery of tests against the provided learner. If the learner must be initialized
with certain parameters, those can be passed to this function too.
Parameters
----------
learner_type : Union[type, Learner]
TODO
learner_kwargs
TODO
"""
# Change warnings behavior to display stack trace
showwarning = warnings.showwarning
warnings.showwarning = warn_with_traceback
# If we were given an instance instead of a type, convert back to type and guess args
if isinstance(learner_type, Learner):
learner_type = type(learner_type)
# Set random state via keyword argument for all learners if they support it
params = signature(learner_type.__init__).parameters
if "random_seed" in params.keys() or any(
[param.kind == Parameter.VAR_KEYWORD for param in params.values()]
):
learner_kwargs["random_seed"] = 0
# Test options apply to all tests
test_opts = {"learner_class": learner_type, "learner_kwargs": learner_kwargs}
# Pick test suites based on subclasses
test_suites = []
if issubclass(learner_type, Learner):
test_suites.append(LearnerTests(**test_opts))
if issubclass(learner_type, SupervisedLearner):
test_suites.append(SupervisedLearnerTests(**test_opts))
if issubclass(learner_type, UnsupervisedLearner):
test_suites.append(UnsupervisedLearnerTests(**test_opts))
if issubclass(learner_type, BaseTransformer):
test_suites.append(TransformerTests(**test_opts))
if issubclass(learner_type, BaseRegressor):
test_suites.append(RegressorTests(**test_opts))
if issubclass(learner_type, BaseClassifier):
test_suites.append(ClassifierTests(**test_opts))
# Accumulate results over all test suites
result = TestResult()
for suite in test_suites:
suite.run(result=result)
# Display errors
for err in result.errors:
print("\n%s\n" % err[1])
for fail in result.failures:
print("\n%s\n" % fail[1])
# Restore warning behavior
warnings.showwarning = showwarning
# If any errors or failures, raise the exception
assert not result.errors and not result.failures, (
"One or more tests failed. Please see "
"output for all Tracebacks.\n\n%s\n"
% (result.errors[0][1] if result.errors else result.failures[0][1])
)
return True
def assert_predictions_match_cloned_learner(test: TestCase, y1, y2):
y1, y2 = list(y1[:100]), list(y2[:100]) # limit amount of comparisons for perf and put in list
test.assertListEqual(
y1,
y2,
(
"Calling `predict` after fitting the same set of data to a cloned learner should "
"yield the same result"
),
)
class _LearnerTests(TestCase):
def __init__(self, learner_class: type, learner_kwargs: dict):
super().__init__()
self.learner_class = learner_class
self.learner_kwargs = learner_kwargs
def runTest(self):
for test_name in [key for key in dir(self) if key.startswith("test_")]:
test_case = getattr(self, test_name)
try:
test_case()
except Exception as err:
print("%s_%s ... fail" % (self.learner_class.__name__, test_name))
raise err
print("%s_%s ... ok" % (self.learner_class.__name__, test_name))
def _get_learner(self, **learner_kwargs):
learner: Learner = self.learner_class(**{**self.learner_kwargs, **learner_kwargs})
return learner
class LearnerTests(_LearnerTests):
""" Basic tests for the base Learner class """
def test_learner_calls_init(self):
# Test whether parent's init is called by monkey patching it
flag = [False]
__init_old__ = Learner.__init__
def __init_new__(self, **kwargs):
flag[0] = True
Learner.__init__ = __init_new__
# Initialize learner, which will trigger new __init__
learner = self._get_learner()
# Put back the original init
Learner.__init__ = __init_old__
# Perform assertion
self.assertEqual(True, flag[0], "Learner must call super().__init__() method")
class UnsupervisedLearnerTests(_LearnerTests):
""" Tests designed for unsupervised learners. """
def test_unsupervised_signature(self):
learner: UnsupervisedLearner = self._get_learner()
X = generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0)
y = generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0)
# Fit returns self
self.assertEqual(learner, learner.fit(X))
# Fit and predict only take X, or return NotImplementedError
for func in ("fit", "predict"):
if not hasattr(learner, func):
continue
try:
self.assertTrue(getattr(learner, func)(X) is not None)
except NotImplementedError:
pass
self.assertRaises(TypeError, lambda: getattr(learner, func)(X, y))
# score takes X, y, or returns NotImplementedError
for func in ("score",):
if not hasattr(learner, func):
continue
try:
self.assertTrue(getattr(learner, func)(X, y) is not None)
except NotImplementedError:
pass
self.assertRaises(TypeError, lambda: getattr(learner, func)(X))
def test_unsupervised_input_changed(self):
learner: UnsupervisedLearner = self._get_learner()
X1 = generate_array_floats(n=TEST_SAMPLE_SIZE * 16, random_seed=0).reshape(16, -1)
X2 = generate_array_floats(n=TEST_SAMPLE_SIZE * 8, random_seed=0).reshape(8, -1)
# Fit first batch
learner.fit(X1)
# Send input changed event
n = 16
change_map = ChangeMap(n, idx_del=[i * 2 for i in range(n // 2)])
learner.on_input_shape_changed(change_map)
# Fit second batch
learner.fit(X2)
def test_unsupervised_batch_size_changed(self):
learner: UnsupervisedLearner = self._get_learner()
features = [
(
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_floats(n=TEST_SAMPLE_SIZE // 2, random_seed=0),
),
(
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0).reshape(2, -1),
generate_array_floats(n=TEST_SAMPLE_SIZE // 2, random_seed=0).reshape(2, -1),
),
]
# (generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0).reshape(2, -1, 2),
# generate_array_floats(n=TEST_SAMPLE_SIZE // 2, random_seed=0)).reshape(2, -1, 2)]
for X1, X2 in features:
learner = self._get_learner()
# Fit first batch
learner.fit(X1)
# Fit second batch
learner.fit(X2)
class SupervisedLearnerTests(_LearnerTests):
""" Tests designed for supervised learners. """
def test_supervised_signature(self):
learner: SupervisedLearner = self._get_learner()
X = generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0)
y = generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0)
# Fit returns self
self.assertEqual(learner, learner.fit(X, y))
# Fit and score take X, y
for func in ("fit", "score"):
self.assertTrue(getattr(learner, func)(X, y) is not None)
self.assertRaises(TypeError, lambda: getattr(learner, func)(X))
# Predict only takes X
for func in ("predict",):
self.assertTrue(getattr(learner, func)(X) is not None)
self.assertRaises(TypeError, lambda: getattr(learner, func)(X, y))
def test_supervised_input_changed(self):
learner: SupervisedLearner = self._get_learner()
X1 = generate_array_floats(n=TEST_SAMPLE_SIZE * 16, random_seed=0).reshape(16, -1)
X2 = generate_array_floats(n=TEST_SAMPLE_SIZE * 8, random_seed=0).reshape(8, -1)
y = generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0)
# Fit first batch
learner.fit(X1, y)
# Send input changed event
n = 16
change_map = ChangeMap(n, idx_del=[i * 2 for i in range(n // 2)])
learner.on_input_shape_changed(change_map)
# Fit second batch
learner.fit(X2, y)
def test_supervised_batch_size_changed(self):
features = [
(
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_floats(n=TEST_SAMPLE_SIZE // 2, random_seed=0),
),
(
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0).reshape(2, -1),
generate_array_floats(n=TEST_SAMPLE_SIZE // 2, random_seed=0).reshape(2, -1),
),
]
# (generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0).reshape(2, -1, 2),
# generate_array_floats(n=TEST_SAMPLE_SIZE // 2, random_seed=0)).reshape(2, -1, 2)]
targets = [
(
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE // 2, random_seed=0),
),
(
generate_array_int_floats(n=TEST_SAMPLE_SIZE // 2, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE // 4, random_seed=0),
),
]
for (X1, X2), (y1, y2) in zip(features, targets):
learner: SupervisedLearner = self._get_learner()
# Fit first batch
learner.fit(X1, y1)
# Fit second batch
learner.fit(X2, y2)
class TransformerTests(_LearnerTests):
""" Tests designed for transformers. """
def test_transformer_transform(self):
learner: BaseTransformer = self._get_learner()
X = generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0)
Xt1 = learner.fit(X).transform(X)
learner = self._get_learner()
Xt2 = learner.fit(X).transform(X)
Xt1 = check_array(Xt1).tolist()
Xt2 = check_array(Xt2).tolist()
self.assertListEqual(Xt1, Xt2)
def test_transformer_inverse_transform(self):
# Inverse transform should work at least with one of continuous or multiclass
continous_data = generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0)
categorical_data = generate_array_chars(n=TEST_SAMPLE_SIZE, random_seed=0)
success = False
for X in (continous_data, categorical_data):
learner = self._get_learner()
try:
Xt = learner.fit(X).transform(X)
success = True
except TypeError:
# Some transformers only accept continuous or multiclass
continue
try:
X_ = learner.inverse_transform(Xt)
# Inverse transformation may not be exact, so use fuzzy comparison
for v1, v2 in zip(X[:100], X_[:100]):
self.assertAlmostEqual(v1, v2)
except NotImplementedError:
# Having a transformer that does not implement inverse_transform is OK
pass
self.assertTrue(success, "fit-transform did not work for continuous or categorical data")
def test_transformer_input_changed(self):
learner: BaseTransformer = self._get_learner()
X1 = generate_array_floats(n=TEST_SAMPLE_SIZE * 16, random_seed=0).reshape(16, -1)
X2 = generate_array_floats(n=TEST_SAMPLE_SIZE * 8, random_seed=0).reshape(8, -1)
# Fit first batch
learner.fit(X1).transform(X1)
# Send input changed event
n = 16
change_map = ChangeMap(n, idx_del=[i * 2 for i in range(n // 2)])
learner.on_input_shape_changed(change_map)
# Fit second batch
learner.fit(X2).transform(X2)
class RegressorTests(_LearnerTests):
""" Tests designed for regressors. """
def test_regressor_fit_1D(self):
features = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
]
targets = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
]
for X in features:
for y in targets:
learner = self._get_learner()
learner.fit(X, y)
def test_regressor_fit_1D_single_sample(self):
features = [
generate_array_floats(n=1, random_seed=0),
generate_array_int_floats(n=1, random_seed=0),
generate_array_ints(n=1, random_seed=0),
generate_array_uints(n=1, random_seed=0),
generate_array_booleans(n=1, random_seed=0),
]
targets = [
generate_array_floats(n=1, random_seed=0),
generate_array_int_floats(n=1, random_seed=0),
generate_array_ints(n=1, random_seed=0),
generate_array_uints(n=1, random_seed=0),
generate_array_booleans(n=1, random_seed=0),
]
for X in features:
for y in targets:
learner = self._get_learner()
learner.fit(X, y)
def test_regressor_fit_2D(self):
learner: BaseRegressor = self._get_learner()
features = [
generate_array_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_int_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_ints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_uints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_booleans(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
]
targets = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
]
for X in features:
for y in targets:
learner = self._get_learner()
learner.fit(X, y)
def test_regressor_fit_3D(self):
features = [
generate_array_floats(n=TEST_SAMPLE_SIZE * 3, random_seed=0).reshape(3, -1),
generate_array_int_floats(n=TEST_SAMPLE_SIZE * 3, random_seed=0).reshape(3, -1),
generate_array_ints(n=TEST_SAMPLE_SIZE * 3, random_seed=0).reshape(3, -1),
generate_array_uints(n=TEST_SAMPLE_SIZE * 3, random_seed=0).reshape(3, -1),
generate_array_booleans(n=TEST_SAMPLE_SIZE * 3, random_seed=0).reshape(3, -1),
]
targets = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
]
for X in features:
for y in targets:
learner = self._get_learner()
learner.fit(X, y)
def test_regressor_predict(self):
features = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_int_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_ints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_uints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_booleans(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
]
targets = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
]
for X in features:
for y in targets:
learner_ = self._get_learner()
y1 = learner_.fit(X, y).predict(X)
learner_ = self._get_learner()
y2 = learner_.fit(X, y).predict(X)
assert_predictions_match_cloned_learner(self, y1, y2)
learner_ = self._get_learner()
y3 = learner_.fit(X, y).predict(X)
assert_predictions_match_cloned_learner(self, y1, y3)
def test_regressor_train(self):
features = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_int_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_ints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_uints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_booleans(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
]
targets = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
]
for X in features:
for y in targets:
# Make sure that `train` returns history object
learner1 = self._get_learner()
input_fn = DataSet.from_ndarray(X, y, random_seed=0).input_fn
history = learner1.train(input_fn, max_steps=10)
self.assertEqual(type(history), TrainHistory)
# Make sure that learners predict same data
learner2 = self._get_learner()
input_fn = DataSet.from_ndarray(X, y, random_seed=0).input_fn
learner2_ = learner2.train(input_fn, max_steps=10)
y1 = learner1.predict(X)[:100]
y2 = learner2.predict(X)[:100]
assert_predictions_match_cloned_learner(self, y1, y2)
class ClassifierTests(_LearnerTests):
""" Tests designed for classifiers. """
def test_classifier_fit_1D(self):
features = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
]
targets = [
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_chars(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_strings(n=TEST_SAMPLE_SIZE, random_seed=0),
]
for X in features:
for y in targets:
learner = self._get_learner()
learner.fit(X, y)
def test_classifier_fit_1D_single_sample(self):
features = [
generate_array_floats(n=1, random_seed=0),
generate_array_int_floats(n=1, random_seed=0),
generate_array_ints(n=1, random_seed=0),
generate_array_uints(n=1, random_seed=0),
generate_array_booleans(n=1, random_seed=0),
]
targets = [
generate_array_int_floats(n=1, random_seed=0),
generate_array_ints(n=1, random_seed=0),
generate_array_uints(n=1, random_seed=0),
generate_array_booleans(n=1, random_seed=0),
generate_array_chars(n=1, random_seed=0),
generate_array_strings(n=1, random_seed=0),
]
for X in features:
for y in targets:
learner = self._get_learner()
learner.fit(X, y)
def test_classifier_fit_2D(self):
features = [
generate_array_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_int_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_ints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_uints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_booleans(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
]
targets = [
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_chars(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_strings(n=TEST_SAMPLE_SIZE, random_seed=0),
]
for X in features:
for y in targets:
learner = self._get_learner()
learner.fit(X, y)
def test_classifier_fit_3D(self):
learner: BaseClassifier = self._get_learner()
features = [
generate_array_floats(n=TEST_SAMPLE_SIZE * 3, random_seed=0).reshape(3, -1),
generate_array_int_floats(n=TEST_SAMPLE_SIZE * 3, random_seed=0).reshape(3, -1),
generate_array_ints(n=TEST_SAMPLE_SIZE * 3, random_seed=0).reshape(3, -1),
generate_array_uints(n=TEST_SAMPLE_SIZE * 3, random_seed=0).reshape(3, -1),
generate_array_booleans(n=TEST_SAMPLE_SIZE * 3, random_seed=0).reshape(3, -1),
]
targets = [
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_chars(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_strings(n=TEST_SAMPLE_SIZE, random_seed=0),
]
for X in features:
for y in targets:
learner = self._get_learner()
learner.fit(X, y)
def test_classifier_predict(self):
features = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_int_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_ints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_uints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_booleans(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
]
targets = [
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_chars(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_strings(n=TEST_SAMPLE_SIZE, random_seed=0),
]
for X in features:
for y in targets:
learner_ = self._get_learner()
y1 = learner_.fit(X, y).predict(X)
learner_ = self._get_learner()
y2 = learner_.fit(X, y).predict(X)
assert_predictions_match_cloned_learner(self, y1, y2)
learner_ = self._get_learner()
y3 = learner_.fit(X, y).predict(X)
assert_predictions_match_cloned_learner(self, y1, y3)
def test_classifier_predict_proba(self):
pass # TODO: implement this test
def test_classifier_train(self):
features = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_int_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_ints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_uints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_booleans(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
]
targets = [
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_chars(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_strings(n=TEST_SAMPLE_SIZE, random_seed=0),
]
for X in features:
for y in targets:
# Make sure that `train` returns history object
learner1 = self._get_learner()
input_fn = DataSet.from_ndarray(X, y, random_seed=0).input_fn
history = learner1.train(input_fn, max_steps=10)
self.assertEqual(type(history), TrainHistory)
# Make sure that learners predict same data
learner2 = self._get_learner()
input_fn = DataSet.from_ndarray(X, y, random_seed=0).input_fn
learner2.train(input_fn, max_steps=10)
y1 = learner1.predict(X)
y2 = learner2.predict(X)
assert_predictions_match_cloned_learner(self, y1, y2)
| 41.82 | 99 | 0.640944 |
import warnings
from inspect import signature, Parameter
from typing import Union
from unittest import TestCase, TestResult
from ..changemap.changemap import ChangeMap
from ..core.learner import Learner, SupervisedLearner, UnsupervisedLearner
from ..core.mixins import BaseClassifier, BaseRegressor
from ..dataset.dataset import DataSet
from ..training.train_history import TrainHistory
from ..transformers.base import BaseTransformer
from ..utils.arrays import check_array
from ..utils.misc import warn_with_traceback
from .generators import (
generate_array_booleans,
generate_array_chars,
generate_array_floats,
generate_array_ints,
generate_array_int_floats,
generate_array_uints,
generate_array_strings,
)
TEST_SAMPLE_SIZE = 1024
def test_learner(learner_type: Union[type, Learner], **learner_kwargs):
showwarning = warnings.showwarning
warnings.showwarning = warn_with_traceback
if isinstance(learner_type, Learner):
learner_type = type(learner_type)
params = signature(learner_type.__init__).parameters
if "random_seed" in params.keys() or any(
[param.kind == Parameter.VAR_KEYWORD for param in params.values()]
):
learner_kwargs["random_seed"] = 0
test_opts = {"learner_class": learner_type, "learner_kwargs": learner_kwargs}
test_suites = []
if issubclass(learner_type, Learner):
test_suites.append(LearnerTests(**test_opts))
if issubclass(learner_type, SupervisedLearner):
test_suites.append(SupervisedLearnerTests(**test_opts))
if issubclass(learner_type, UnsupervisedLearner):
test_suites.append(UnsupervisedLearnerTests(**test_opts))
if issubclass(learner_type, BaseTransformer):
test_suites.append(TransformerTests(**test_opts))
if issubclass(learner_type, BaseRegressor):
test_suites.append(RegressorTests(**test_opts))
if issubclass(learner_type, BaseClassifier):
test_suites.append(ClassifierTests(**test_opts))
result = TestResult()
for suite in test_suites:
suite.run(result=result)
for err in result.errors:
print("\n%s\n" % err[1])
for fail in result.failures:
print("\n%s\n" % fail[1])
warnings.showwarning = showwarning
assert not result.errors and not result.failures, (
"One or more tests failed. Please see "
"output for all Tracebacks.\n\n%s\n"
% (result.errors[0][1] if result.errors else result.failures[0][1])
)
return True
def assert_predictions_match_cloned_learner(test: TestCase, y1, y2):
y1, y2 = list(y1[:100]), list(y2[:100])
test.assertListEqual(
y1,
y2,
(
"Calling `predict` after fitting the same set of data to a cloned learner should "
"yield the same result"
),
)
class _LearnerTests(TestCase):
def __init__(self, learner_class: type, learner_kwargs: dict):
super().__init__()
self.learner_class = learner_class
self.learner_kwargs = learner_kwargs
def runTest(self):
for test_name in [key for key in dir(self) if key.startswith("test_")]:
test_case = getattr(self, test_name)
try:
test_case()
except Exception as err:
print("%s_%s ... fail" % (self.learner_class.__name__, test_name))
raise err
print("%s_%s ... ok" % (self.learner_class.__name__, test_name))
def _get_learner(self, **learner_kwargs):
learner: Learner = self.learner_class(**{**self.learner_kwargs, **learner_kwargs})
return learner
class LearnerTests(_LearnerTests):
def test_learner_calls_init(self):
flag = [False]
__init_old__ = Learner.__init__
def __init_new__(self, **kwargs):
flag[0] = True
Learner.__init__ = __init_new__
# Initialize learner, which will trigger new __init__
learner = self._get_learner()
# Put back the original init
Learner.__init__ = __init_old__
# Perform assertion
self.assertEqual(True, flag[0], "Learner must call super().__init__() method")
class UnsupervisedLearnerTests(_LearnerTests):
def test_unsupervised_signature(self):
learner: UnsupervisedLearner = self._get_learner()
X = generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0)
y = generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0)
# Fit returns self
self.assertEqual(learner, learner.fit(X))
# Fit and predict only take X, or return NotImplementedError
for func in ("fit", "predict"):
if not hasattr(learner, func):
continue
try:
self.assertTrue(getattr(learner, func)(X) is not None)
except NotImplementedError:
pass
self.assertRaises(TypeError, lambda: getattr(learner, func)(X, y))
# score takes X, y, or returns NotImplementedError
for func in ("score",):
if not hasattr(learner, func):
continue
try:
self.assertTrue(getattr(learner, func)(X, y) is not None)
except NotImplementedError:
pass
self.assertRaises(TypeError, lambda: getattr(learner, func)(X))
def test_unsupervised_input_changed(self):
learner: UnsupervisedLearner = self._get_learner()
X1 = generate_array_floats(n=TEST_SAMPLE_SIZE * 16, random_seed=0).reshape(16, -1)
X2 = generate_array_floats(n=TEST_SAMPLE_SIZE * 8, random_seed=0).reshape(8, -1)
# Fit first batch
learner.fit(X1)
# Send input changed event
n = 16
change_map = ChangeMap(n, idx_del=[i * 2 for i in range(n // 2)])
learner.on_input_shape_changed(change_map)
# Fit second batch
learner.fit(X2)
def test_unsupervised_batch_size_changed(self):
learner: UnsupervisedLearner = self._get_learner()
features = [
(
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_floats(n=TEST_SAMPLE_SIZE // 2, random_seed=0),
),
(
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0).reshape(2, -1),
generate_array_floats(n=TEST_SAMPLE_SIZE // 2, random_seed=0).reshape(2, -1),
),
]
# (generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0).reshape(2, -1, 2),
# generate_array_floats(n=TEST_SAMPLE_SIZE // 2, random_seed=0)).reshape(2, -1, 2)]
for X1, X2 in features:
learner = self._get_learner()
# Fit first batch
learner.fit(X1)
# Fit second batch
learner.fit(X2)
class SupervisedLearnerTests(_LearnerTests):
def test_supervised_signature(self):
learner: SupervisedLearner = self._get_learner()
X = generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0)
y = generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0)
# Fit returns self
self.assertEqual(learner, learner.fit(X, y))
# Fit and score take X, y
for func in ("fit", "score"):
self.assertTrue(getattr(learner, func)(X, y) is not None)
self.assertRaises(TypeError, lambda: getattr(learner, func)(X))
# Predict only takes X
for func in ("predict",):
self.assertTrue(getattr(learner, func)(X) is not None)
self.assertRaises(TypeError, lambda: getattr(learner, func)(X, y))
def test_supervised_input_changed(self):
learner: SupervisedLearner = self._get_learner()
X1 = generate_array_floats(n=TEST_SAMPLE_SIZE * 16, random_seed=0).reshape(16, -1)
X2 = generate_array_floats(n=TEST_SAMPLE_SIZE * 8, random_seed=0).reshape(8, -1)
y = generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0)
# Fit first batch
learner.fit(X1, y)
# Send input changed event
n = 16
change_map = ChangeMap(n, idx_del=[i * 2 for i in range(n // 2)])
learner.on_input_shape_changed(change_map)
# Fit second batch
learner.fit(X2, y)
def test_supervised_batch_size_changed(self):
features = [
(
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_floats(n=TEST_SAMPLE_SIZE // 2, random_seed=0),
),
(
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0).reshape(2, -1),
generate_array_floats(n=TEST_SAMPLE_SIZE // 2, random_seed=0).reshape(2, -1),
),
]
# (generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0).reshape(2, -1, 2),
# generate_array_floats(n=TEST_SAMPLE_SIZE // 2, random_seed=0)).reshape(2, -1, 2)]
targets = [
(
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE // 2, random_seed=0),
),
(
generate_array_int_floats(n=TEST_SAMPLE_SIZE // 2, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE // 4, random_seed=0),
),
]
for (X1, X2), (y1, y2) in zip(features, targets):
learner: SupervisedLearner = self._get_learner()
# Fit first batch
learner.fit(X1, y1)
# Fit second batch
learner.fit(X2, y2)
class TransformerTests(_LearnerTests):
def test_transformer_transform(self):
learner: BaseTransformer = self._get_learner()
X = generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0)
Xt1 = learner.fit(X).transform(X)
learner = self._get_learner()
Xt2 = learner.fit(X).transform(X)
Xt1 = check_array(Xt1).tolist()
Xt2 = check_array(Xt2).tolist()
self.assertListEqual(Xt1, Xt2)
def test_transformer_inverse_transform(self):
# Inverse transform should work at least with one of continuous or multiclass
continous_data = generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0)
categorical_data = generate_array_chars(n=TEST_SAMPLE_SIZE, random_seed=0)
success = False
for X in (continous_data, categorical_data):
learner = self._get_learner()
try:
Xt = learner.fit(X).transform(X)
success = True
except TypeError:
# Some transformers only accept continuous or multiclass
continue
try:
X_ = learner.inverse_transform(Xt)
# Inverse transformation may not be exact, so use fuzzy comparison
for v1, v2 in zip(X[:100], X_[:100]):
self.assertAlmostEqual(v1, v2)
except NotImplementedError:
# Having a transformer that does not implement inverse_transform is OK
pass
self.assertTrue(success, "fit-transform did not work for continuous or categorical data")
def test_transformer_input_changed(self):
learner: BaseTransformer = self._get_learner()
X1 = generate_array_floats(n=TEST_SAMPLE_SIZE * 16, random_seed=0).reshape(16, -1)
X2 = generate_array_floats(n=TEST_SAMPLE_SIZE * 8, random_seed=0).reshape(8, -1)
# Fit first batch
learner.fit(X1).transform(X1)
# Send input changed event
n = 16
change_map = ChangeMap(n, idx_del=[i * 2 for i in range(n // 2)])
learner.on_input_shape_changed(change_map)
# Fit second batch
learner.fit(X2).transform(X2)
class RegressorTests(_LearnerTests):
def test_regressor_fit_1D(self):
features = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
]
targets = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
]
for X in features:
for y in targets:
learner = self._get_learner()
learner.fit(X, y)
def test_regressor_fit_1D_single_sample(self):
features = [
generate_array_floats(n=1, random_seed=0),
generate_array_int_floats(n=1, random_seed=0),
generate_array_ints(n=1, random_seed=0),
generate_array_uints(n=1, random_seed=0),
generate_array_booleans(n=1, random_seed=0),
]
targets = [
generate_array_floats(n=1, random_seed=0),
generate_array_int_floats(n=1, random_seed=0),
generate_array_ints(n=1, random_seed=0),
generate_array_uints(n=1, random_seed=0),
generate_array_booleans(n=1, random_seed=0),
]
for X in features:
for y in targets:
learner = self._get_learner()
learner.fit(X, y)
def test_regressor_fit_2D(self):
learner: BaseRegressor = self._get_learner()
features = [
generate_array_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_int_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_ints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_uints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_booleans(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
]
targets = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
]
for X in features:
for y in targets:
learner = self._get_learner()
learner.fit(X, y)
def test_regressor_fit_3D(self):
features = [
generate_array_floats(n=TEST_SAMPLE_SIZE * 3, random_seed=0).reshape(3, -1),
generate_array_int_floats(n=TEST_SAMPLE_SIZE * 3, random_seed=0).reshape(3, -1),
generate_array_ints(n=TEST_SAMPLE_SIZE * 3, random_seed=0).reshape(3, -1),
generate_array_uints(n=TEST_SAMPLE_SIZE * 3, random_seed=0).reshape(3, -1),
generate_array_booleans(n=TEST_SAMPLE_SIZE * 3, random_seed=0).reshape(3, -1),
]
targets = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
]
for X in features:
for y in targets:
learner = self._get_learner()
learner.fit(X, y)
def test_regressor_predict(self):
features = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_int_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_ints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_uints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_booleans(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
]
targets = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
]
for X in features:
for y in targets:
learner_ = self._get_learner()
y1 = learner_.fit(X, y).predict(X)
learner_ = self._get_learner()
y2 = learner_.fit(X, y).predict(X)
assert_predictions_match_cloned_learner(self, y1, y2)
learner_ = self._get_learner()
y3 = learner_.fit(X, y).predict(X)
assert_predictions_match_cloned_learner(self, y1, y3)
def test_regressor_train(self):
features = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_int_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_ints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_uints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_booleans(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
]
targets = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
]
for X in features:
for y in targets:
# Make sure that `train` returns history object
learner1 = self._get_learner()
input_fn = DataSet.from_ndarray(X, y, random_seed=0).input_fn
history = learner1.train(input_fn, max_steps=10)
self.assertEqual(type(history), TrainHistory)
# Make sure that learners predict same data
learner2 = self._get_learner()
input_fn = DataSet.from_ndarray(X, y, random_seed=0).input_fn
learner2_ = learner2.train(input_fn, max_steps=10)
y1 = learner1.predict(X)[:100]
y2 = learner2.predict(X)[:100]
assert_predictions_match_cloned_learner(self, y1, y2)
class ClassifierTests(_LearnerTests):
def test_classifier_fit_1D(self):
features = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
]
targets = [
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_chars(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_strings(n=TEST_SAMPLE_SIZE, random_seed=0),
]
for X in features:
for y in targets:
learner = self._get_learner()
learner.fit(X, y)
def test_classifier_fit_1D_single_sample(self):
features = [
generate_array_floats(n=1, random_seed=0),
generate_array_int_floats(n=1, random_seed=0),
generate_array_ints(n=1, random_seed=0),
generate_array_uints(n=1, random_seed=0),
generate_array_booleans(n=1, random_seed=0),
]
targets = [
generate_array_int_floats(n=1, random_seed=0),
generate_array_ints(n=1, random_seed=0),
generate_array_uints(n=1, random_seed=0),
generate_array_booleans(n=1, random_seed=0),
generate_array_chars(n=1, random_seed=0),
generate_array_strings(n=1, random_seed=0),
]
for X in features:
for y in targets:
learner = self._get_learner()
learner.fit(X, y)
def test_classifier_fit_2D(self):
features = [
generate_array_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_int_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_ints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_uints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_booleans(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
]
targets = [
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_chars(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_strings(n=TEST_SAMPLE_SIZE, random_seed=0),
]
for X in features:
for y in targets:
learner = self._get_learner()
learner.fit(X, y)
def test_classifier_fit_3D(self):
learner: BaseClassifier = self._get_learner()
features = [
generate_array_floats(n=TEST_SAMPLE_SIZE * 3, random_seed=0).reshape(3, -1),
generate_array_int_floats(n=TEST_SAMPLE_SIZE * 3, random_seed=0).reshape(3, -1),
generate_array_ints(n=TEST_SAMPLE_SIZE * 3, random_seed=0).reshape(3, -1),
generate_array_uints(n=TEST_SAMPLE_SIZE * 3, random_seed=0).reshape(3, -1),
generate_array_booleans(n=TEST_SAMPLE_SIZE * 3, random_seed=0).reshape(3, -1),
]
targets = [
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_chars(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_strings(n=TEST_SAMPLE_SIZE, random_seed=0),
]
for X in features:
for y in targets:
learner = self._get_learner()
learner.fit(X, y)
def test_classifier_predict(self):
features = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_int_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_ints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_uints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_booleans(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
]
targets = [
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_chars(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_strings(n=TEST_SAMPLE_SIZE, random_seed=0),
]
for X in features:
for y in targets:
learner_ = self._get_learner()
y1 = learner_.fit(X, y).predict(X)
learner_ = self._get_learner()
y2 = learner_.fit(X, y).predict(X)
assert_predictions_match_cloned_learner(self, y1, y2)
learner_ = self._get_learner()
y3 = learner_.fit(X, y).predict(X)
assert_predictions_match_cloned_learner(self, y1, y3)
def test_classifier_predict_proba(self):
pass # TODO: implement this test
def test_classifier_train(self):
features = [
generate_array_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_int_floats(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_ints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_uints(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
generate_array_booleans(n=TEST_SAMPLE_SIZE * 2, random_seed=0).reshape(2, -1),
]
targets = [
generate_array_int_floats(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_ints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_uints(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_booleans(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_chars(n=TEST_SAMPLE_SIZE, random_seed=0),
generate_array_strings(n=TEST_SAMPLE_SIZE, random_seed=0),
]
for X in features:
for y in targets:
# Make sure that `train` returns history object
learner1 = self._get_learner()
input_fn = DataSet.from_ndarray(X, y, random_seed=0).input_fn
history = learner1.train(input_fn, max_steps=10)
self.assertEqual(type(history), TrainHistory)
# Make sure that learners predict same data
learner2 = self._get_learner()
input_fn = DataSet.from_ndarray(X, y, random_seed=0).input_fn
learner2.train(input_fn, max_steps=10)
y1 = learner1.predict(X)
y2 = learner2.predict(X)
assert_predictions_match_cloned_learner(self, y1, y2)
| true | true |
f73eb47f9800e745fbf266923083ac6ed6610b41 | 11,063 | py | Python | libtbx/citations.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
] | null | null | null | libtbx/citations.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
] | null | null | null | libtbx/citations.py | jbeilstenedmands/cctbx_project | c228fb15ab10377f664c39553d866281358195aa | [
"BSD-3-Clause-LBNL"
] | null | null | null | '''
Functionality for handling citations
'''
from __future__ import absolute_import, division, print_function
import importlib
import os
import string
from operator import attrgetter
import libtbx.load_env
import libtbx.phil
from libtbx import str_utils
from libtbx.utils import to_unicode
# =============================================================================
# PHIL definition for citations
master_citation_phil_str = '''
citation
.multiple = True
{
article_id = None
.type = str
.optional = False
caption = None
.type = str
authors = None
.type = str
title = None
.type = str
year = None
.type = int
journal = None
.type = str
volume = None
.type = str
pages = None
.type = str
pmid = None
.type = int
doi_id = None
.type = str
url = None
.type = str
}
'''
master_citation_phil = libtbx.phil.parse(master_citation_phil_str)
# -----------------------------------------------------------------------------
# PHIL definition for journals
# This is used for providing information in CIF blocks
master_journal_phil_str = '''
journal
.multiple = True
{
name = None
.type = str
.multiple = True
name_full = None
.type = str
abbrev_CAS = None
.type = str
.help = Abbreviated name of the cited journal as given in the \
Chemical Abstracts Service Source Index.
id_ASTM = None
.type = str
.help = The American Society for Testing and Materials (ASTM) code \
assigned to the journal cited (also referred to as the CODEN \
designator of the Chemical Abstracts Service).
id_CSD = None
.type = str
.help = The Cambridge Structural Database (CSD) code assigned to the \
journal cited.
id_ISSN = None
.type = str
.help = The International Standard Serial Number (ISSN) code assigned to \
the journal cited.
}
'''
master_journal_phil = libtbx.phil.parse(master_journal_phil_str)
# -----------------------------------------------------------------------------
# Construct common database of citations and journals
# This prevents duplication of citations in individual programs if methods from
# different references are used in multiple programs
citations_and_journals = libtbx.phil.read_default(__file__)
citations = master_citation_phil.fetch(source=citations_and_journals).extract()
citations_db = dict( [ (c.article_id, c) for c in citations.citation ] )
journals_db = dict()
journals = master_journal_phil.fetch(source=citations_and_journals).extract()
for journal in journals.journal:
for name in journal.name:
journals_db[name] = journal
# =============================================================================
def format_citation (article) :
authors = article.authors
author_list = authors.split(", ")
if len(author_list) == 1 :
authors_out = authors
else :
authors_out = ", ".join(author_list[:-1]) + ", and %s" % author_list[-1]
output = "%s." % authors
if article.year is not None : output += " (%d)" % article.year
title = article.title
if (title is not None) :
title = title.strip()
if (not title.endswith(".")) :
title += "."
output += " %s" % title
if article.journal is not None : output += " %s" % article.journal
if article.volume is not None :
if article.journal is not None and 'Acta Cryst. ' in article.journal:
# special case for Acta Cryst journals to get e.g.:
# Acta Cryst. D66
output += "%s" % article.volume
else:
output += " %s" % article.volume
if article.pages is not None :
if article.volume is not None : output += ":%s" % article.pages
else : output += ", pp. %s" % article.pages
if output[-1] != '.' : output += "."
return output
# -----------------------------------------------------------------------------
def author_list_with_periods (authors, initials_first=False) :
author_list = authors.split(", ")
authors_formatted = []
for author in author_list :
names = author.split(" ")
if len(names) == 1 :
authors_formatted.append(names[0])
else :
initials = names[-1]
new_initials = ""
for letter in initials :
if letter in string.ascii_letters :
new_initials += ("%s." % letter)
else : # usually '-'
new_initials += letter
if initials_first :
reformatted = "%s %s" % (new_initials, " ".join(names[:-1]))
else :
reformatted = "%s %s" % (" ".join(names[:-1]), new_initials)
authors_formatted.append(reformatted)
return authors_formatted
# -----------------------------------------------------------------------------
def format_citation_cell (article) :
author_list = author_list_with_periods(article.authors)
if len(author_list) == 1 :
authors_out = author_list[0]
else :
authors_out = ", ".join(author_list[:-1]) + ", and %s" % author_list[-1]
output = "%s" % authors_out # XXX no extra period at end!
if article.year is not None : output += " (%d)." % article.year
title = article.title
if (title is not None) :
title = title.strip()
if (not title.endswith(".")) :
title += "."
output += " %s" % title
if article.journal is not None : output += " %s" % article.journal
if article.volume is not None :
if article.journal is not None and 'Acta Cryst. ' in article.journal:
# special case for Acta Cryst journals to get e.g.:
# Acta Cryst. D66
output += "%s" % article.volume
else:
output += " %s" % article.volume
if article.pages is not None :
if article.volume is not None : output += ", %s" % article.pages
else : output += ", pp. %s" % article.pages
if output[-1] != '.' : output += "."
return output
# -----------------------------------------------------------------------------
def format_citation_iucr (article) :
author_list = author_list_with_periods(article.authors)
if len(author_list) == 1 :
authors_out = author_list[0]
else :
authors_out = ", ".join(author_list[:-1]) + ", & %s" % author_list[-1]
output = "%s" % authors_out
if article.year is not None : output += " (%d)." % article.year
if article.journal is not None : output += " %s" % article.journal
if article.volume is not None :
if article.journal is not None and 'Acta Cryst. ' in article.journal:
# special case for Acta Cryst journals to get e.g.:
# Acta Cryst. D66
output += "%s" % article.volume
else:
output += " %s" % article.volume
if article.pages is not None :
if article.volume is not None : output += ", %s" % article.pages
else : output += ", pp. %s" % article.pages
if output[-1] != '.' : output += "."
return output
# -----------------------------------------------------------------------------
def format_citation_doc(article_id):
# check database
article = citations_db.get(article_id)
output = '<ul>'
# check program templates
if (article is None):
# construct dictionary of program templates
modules_dict = dict()
for module in libtbx.env.module_list:
for p in module.program_directory_paths():
modules = list()
for f in p.listdir():
if ( f.endswith('.py') and (f != '__init__.py') and
(not f.startswith('.')) ):
basename = os.path.splitext(os.path.basename(f))[0]
modules.append(basename)
if (len(modules) > 0):
modules_dict[module.name] = modules
# find specific program template by article_id
for module in modules_dict.keys():
for package in modules_dict[module]:
if (package == article_id):
importlib.import_module(module)
program_template = importlib.import_module(
'.' + package, package='.'.join([module, 'programs']))
if (hasattr(program_template, 'Program')):
working_phil = master_citation_phil.fetch(
source=program_template.Program.citations)
for article in working_phil.extract().citation:
output += '<li>'
output += format_citation_html(article)
output += '</li>\n'
else:
raise ValueError('Citations for %s could not be found.' % article_id)
break
else:
output += '<li>'
output += format_citation_html(article)
output += '</li>\n'
output += '</ul>'
return output
# -----------------------------------------------------------------------------
def format_citation_html (article) :
if (article.journal is None) :
raise ValueError("Missing journal name for '%s'." % article.article_id)
author_list = author_list_with_periods(article.authors, initials_first=True)
if len(author_list) == 1 :
authors_out = author_list[0]
else :
authors_out = ", ".join(author_list[:-1]) + ", and %s" % author_list[-1]
title = article.title.strip()
if (not title.endswith(".")) :
title += "."
output = "<b>%s</b> %s. " % (title, authors_out)
if 'Acta Cryst.' in article.journal:
journal_ref = "<i>Acta Cryst.</i>"
journal_section = article.journal.split("Acta Cryst. ")[1]
else:
journal_ref = "<i>%s</i>" % article.journal
journal_section = None
if (article.volume is not None) :
if journal_section is not None:
journal_ref += " %s<b>%s</b>" %(journal_section, article.volume)
else:
journal_ref += " <b>%s</b>" % article.volume
if (article.pages is not None) :
journal_ref += ", %s" % article.pages
if (article.year is not None) :
journal_ref += " (%s)" % article.year
if (article.url is not None) :
output += """<a href="%s">%s</a>.""" % (article.url, journal_ref)
elif (article.doi_id is not None) :
output += """<a href="https://doi.org/%s">%s</a>.""" % (article.doi_id,
journal_ref)
elif (article.pmid is not None) :
output += """<a href="http://www.ncbi.nlm.nih.gov/pubmed/%s">%s</a>.""" % \
(article.pmid, journal_ref)
else :
output += " %s." % journal_ref
return output
# -----------------------------------------------------------------------------
def show_citation(article, out=None, max_width=79, format='default'):
if format == 'default' :
output = format_citation(article)
elif format == 'iucr' :
output = format_citation_iucr(article)
elif format == 'cell' :
output = format_citation_cell(article)
if max_width is None or max_width < 1 :
print(to_unicode(output), file=out)
else :
for line in str_utils.line_breaker(output, max_width) :
print(to_unicode(line), file=out)
print(to_unicode(''), file=out)
def show_citations(articles, out=None, max_width=79, sort_by_name=True,
format='default'):
if (sort_by_name): # sort in place
articles.sort(key=attrgetter('authors'))
for article in articles:
show_citation(article, out, max_width, format)
# =============================================================================
# end
| 35.120635 | 81 | 0.580313 | from __future__ import absolute_import, division, print_function
import importlib
import os
import string
from operator import attrgetter
import libtbx.load_env
import libtbx.phil
from libtbx import str_utils
from libtbx.utils import to_unicode
master_citation_phil_str = '''
citation
.multiple = True
{
article_id = None
.type = str
.optional = False
caption = None
.type = str
authors = None
.type = str
title = None
.type = str
year = None
.type = int
journal = None
.type = str
volume = None
.type = str
pages = None
.type = str
pmid = None
.type = int
doi_id = None
.type = str
url = None
.type = str
}
'''
master_citation_phil = libtbx.phil.parse(master_citation_phil_str)
master_journal_phil_str = '''
journal
.multiple = True
{
name = None
.type = str
.multiple = True
name_full = None
.type = str
abbrev_CAS = None
.type = str
.help = Abbreviated name of the cited journal as given in the \
Chemical Abstracts Service Source Index.
id_ASTM = None
.type = str
.help = The American Society for Testing and Materials (ASTM) code \
assigned to the journal cited (also referred to as the CODEN \
designator of the Chemical Abstracts Service).
id_CSD = None
.type = str
.help = The Cambridge Structural Database (CSD) code assigned to the \
journal cited.
id_ISSN = None
.type = str
.help = The International Standard Serial Number (ISSN) code assigned to \
the journal cited.
}
'''
master_journal_phil = libtbx.phil.parse(master_journal_phil_str)
citations_and_journals = libtbx.phil.read_default(__file__)
citations = master_citation_phil.fetch(source=citations_and_journals).extract()
citations_db = dict( [ (c.article_id, c) for c in citations.citation ] )
journals_db = dict()
journals = master_journal_phil.fetch(source=citations_and_journals).extract()
for journal in journals.journal:
for name in journal.name:
journals_db[name] = journal
def format_citation (article) :
authors = article.authors
author_list = authors.split(", ")
if len(author_list) == 1 :
authors_out = authors
else :
authors_out = ", ".join(author_list[:-1]) + ", and %s" % author_list[-1]
output = "%s." % authors
if article.year is not None : output += " (%d)" % article.year
title = article.title
if (title is not None) :
title = title.strip()
if (not title.endswith(".")) :
title += "."
output += " %s" % title
if article.journal is not None : output += " %s" % article.journal
if article.volume is not None :
if article.journal is not None and 'Acta Cryst. ' in article.journal:
output += "%s" % article.volume
else:
output += " %s" % article.volume
if article.pages is not None :
if article.volume is not None : output += ":%s" % article.pages
else : output += ", pp. %s" % article.pages
if output[-1] != '.' : output += "."
return output
def author_list_with_periods (authors, initials_first=False) :
author_list = authors.split(", ")
authors_formatted = []
for author in author_list :
names = author.split(" ")
if len(names) == 1 :
authors_formatted.append(names[0])
else :
initials = names[-1]
new_initials = ""
for letter in initials :
if letter in string.ascii_letters :
new_initials += ("%s." % letter)
else :
new_initials += letter
if initials_first :
reformatted = "%s %s" % (new_initials, " ".join(names[:-1]))
else :
reformatted = "%s %s" % (" ".join(names[:-1]), new_initials)
authors_formatted.append(reformatted)
return authors_formatted
def format_citation_cell (article) :
author_list = author_list_with_periods(article.authors)
if len(author_list) == 1 :
authors_out = author_list[0]
else :
authors_out = ", ".join(author_list[:-1]) + ", and %s" % author_list[-1]
output = "%s" % authors_out
if article.year is not None : output += " (%d)." % article.year
title = article.title
if (title is not None) :
title = title.strip()
if (not title.endswith(".")) :
title += "."
output += " %s" % title
if article.journal is not None : output += " %s" % article.journal
if article.volume is not None :
if article.journal is not None and 'Acta Cryst. ' in article.journal:
output += "%s" % article.volume
else:
output += " %s" % article.volume
if article.pages is not None :
if article.volume is not None : output += ", %s" % article.pages
else : output += ", pp. %s" % article.pages
if output[-1] != '.' : output += "."
return output
def format_citation_iucr (article) :
author_list = author_list_with_periods(article.authors)
if len(author_list) == 1 :
authors_out = author_list[0]
else :
authors_out = ", ".join(author_list[:-1]) + ", & %s" % author_list[-1]
output = "%s" % authors_out
if article.year is not None : output += " (%d)." % article.year
if article.journal is not None : output += " %s" % article.journal
if article.volume is not None :
if article.journal is not None and 'Acta Cryst. ' in article.journal:
output += "%s" % article.volume
else:
output += " %s" % article.volume
if article.pages is not None :
if article.volume is not None : output += ", %s" % article.pages
else : output += ", pp. %s" % article.pages
if output[-1] != '.' : output += "."
return output
def format_citation_doc(article_id):
article = citations_db.get(article_id)
output = '<ul>'
if (article is None):
modules_dict = dict()
for module in libtbx.env.module_list:
for p in module.program_directory_paths():
modules = list()
for f in p.listdir():
if ( f.endswith('.py') and (f != '__init__.py') and
(not f.startswith('.')) ):
basename = os.path.splitext(os.path.basename(f))[0]
modules.append(basename)
if (len(modules) > 0):
modules_dict[module.name] = modules
for module in modules_dict.keys():
for package in modules_dict[module]:
if (package == article_id):
importlib.import_module(module)
program_template = importlib.import_module(
'.' + package, package='.'.join([module, 'programs']))
if (hasattr(program_template, 'Program')):
working_phil = master_citation_phil.fetch(
source=program_template.Program.citations)
for article in working_phil.extract().citation:
output += '<li>'
output += format_citation_html(article)
output += '</li>\n'
else:
raise ValueError('Citations for %s could not be found.' % article_id)
break
else:
output += '<li>'
output += format_citation_html(article)
output += '</li>\n'
output += '</ul>'
return output
def format_citation_html (article) :
if (article.journal is None) :
raise ValueError("Missing journal name for '%s'." % article.article_id)
author_list = author_list_with_periods(article.authors, initials_first=True)
if len(author_list) == 1 :
authors_out = author_list[0]
else :
authors_out = ", ".join(author_list[:-1]) + ", and %s" % author_list[-1]
title = article.title.strip()
if (not title.endswith(".")) :
title += "."
output = "<b>%s</b> %s. " % (title, authors_out)
if 'Acta Cryst.' in article.journal:
journal_ref = "<i>Acta Cryst.</i>"
journal_section = article.journal.split("Acta Cryst. ")[1]
else:
journal_ref = "<i>%s</i>" % article.journal
journal_section = None
if (article.volume is not None) :
if journal_section is not None:
journal_ref += " %s<b>%s</b>" %(journal_section, article.volume)
else:
journal_ref += " <b>%s</b>" % article.volume
if (article.pages is not None) :
journal_ref += ", %s" % article.pages
if (article.year is not None) :
journal_ref += " (%s)" % article.year
if (article.url is not None) :
output += """<a href="%s">%s</a>.""" % (article.url, journal_ref)
elif (article.doi_id is not None) :
output += """<a href="https://doi.org/%s">%s</a>.""" % (article.doi_id,
journal_ref)
elif (article.pmid is not None) :
output += """<a href="http://www.ncbi.nlm.nih.gov/pubmed/%s">%s</a>.""" % \
(article.pmid, journal_ref)
else :
output += " %s." % journal_ref
return output
def show_citation(article, out=None, max_width=79, format='default'):
if format == 'default' :
output = format_citation(article)
elif format == 'iucr' :
output = format_citation_iucr(article)
elif format == 'cell' :
output = format_citation_cell(article)
if max_width is None or max_width < 1 :
print(to_unicode(output), file=out)
else :
for line in str_utils.line_breaker(output, max_width) :
print(to_unicode(line), file=out)
print(to_unicode(''), file=out)
def show_citations(articles, out=None, max_width=79, sort_by_name=True,
format='default'):
if (sort_by_name):
articles.sort(key=attrgetter('authors'))
for article in articles:
show_citation(article, out, max_width, format)
| true | true |
f73eb4fbf5785b8c8fcb4df51a58337a3c02c35d | 5,877 | py | Python | pythia/utils/general.py | winnerineast/pythia | b6fe288405490f6e02a3e59dbf32a181aee35645 | [
"BSD-3-Clause"
] | null | null | null | pythia/utils/general.py | winnerineast/pythia | b6fe288405490f6e02a3e59dbf32a181aee35645 | [
"BSD-3-Clause"
] | null | null | null | pythia/utils/general.py | winnerineast/pythia | b6fe288405490f6e02a3e59dbf32a181aee35645 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
import collections
import gc
import os
from bisect import bisect
import requests
import torch
import tqdm
import yaml
from torch import nn
def lr_lambda_update(i_iter, cfg):
if (
cfg["training_parameters"]["use_warmup"] is True
and i_iter <= cfg["training_parameters"]["warmup_iterations"]
):
alpha = float(i_iter) / float(cfg["training_parameters"]["warmup_iterations"])
return cfg["training_parameters"]["warmup_factor"] * (1.0 - alpha) + alpha
else:
idx = bisect(cfg["training_parameters"]["lr_steps"], i_iter)
return pow(cfg["training_parameters"]["lr_ratio"], idx)
def clip_gradients(model, i_iter, writer, config):
# TODO: Fix question model retrieval
max_grad_l2_norm = config["training_parameters"]["max_grad_l2_norm"]
clip_norm_mode = config["training_parameters"]["clip_norm_mode"]
if max_grad_l2_norm is not None:
if clip_norm_mode == "all":
norm = nn.utils.clip_grad_norm_(model.parameters(), max_grad_l2_norm)
writer.add_scalars({"grad_norm": norm}, i_iter)
elif clip_norm_mode == "question":
question_embedding = model.module.question_embedding_module
norm = nn.utils.clip_grad_norm(
question_embedding.parameters(), max_grad_l2_norm
)
writer.add_scalars({"question_grad_norm": norm}, i_iter)
else:
raise NotImplementedError(
"Clip norm mode %s not implemented" % clip_norm_mode
)
def ckpt_name_from_core_args(config):
return "%s_%s_%s_%d" % (
config["tasks"],
config["datasets"],
config["model"],
config["training_parameters"]["seed"],
)
def foldername_from_config_override(args):
cfg_override = None
if hasattr(args, "config_override"):
cfg_override = args.config_override
elif "config_override" in args:
cfg_override = args["config_override"]
folder_name = ""
if cfg_override is not None and len(cfg_override) > 0:
folder_name = yaml.safe_dump(cfg_override, default_flow_style=True)
folder_name = folder_name.replace(":", ".").replace("\n", " ")
folder_name = folder_name.replace("/", "_")
folder_name = " ".join(folder_name.split())
folder_name = folder_name.replace(". ", ".").replace(" ", "_")
folder_name = "_" + folder_name
return folder_name
def get_pythia_root():
from pythia.common.registry import registry
pythia_root = registry.get("pythia_root", no_warning=True)
if pythia_root is None:
pythia_root = os.path.dirname(os.path.abspath(__file__))
pythia_root = os.path.abspath(os.path.join(pythia_root, ".."))
registry.register("pythia_root", pythia_root)
return pythia_root
def download_file(url, output_dir=".", filename=""):
if len(filename) == 0:
filename = os.path.join(".", url.split("/")[-1])
os.makedirs(output_dir, exist_ok=True)
filename = os.path.join(output_dir, filename)
r = requests.get(url, stream=True)
file_size = int(r.headers["Content-Length"])
chunk_size = 1024 * 1024
num_bars = int(file_size / chunk_size)
with open(filename, "wb") as fh:
for chunk in tqdm.tqdm(
r.iter_content(chunk_size=chunk_size),
total=num_bars,
unit="MB",
desc=filename,
leave=True,
):
fh.write(chunk)
def get_optimizer_parameters(model, config):
parameters = model.parameters()
has_custom = hasattr(model, "get_optimizer_parameters")
if has_custom:
parameters = model.get_optimizer_parameters(config)
is_parallel = isinstance(model, nn.DataParallel)
if is_parallel and hasattr(model.module, "get_optimizer_parameters"):
parameters = model.module.get_optimizer_parameters(config)
return parameters
def dict_to_string(dictionary):
logs = []
if dictionary is None:
return ""
for key, val in dictionary.items():
if hasattr(val, "item"):
val = val.item()
# if key.count('_') == 2:
# key = key[key.find('_') + 1:]
logs.append("%s: %.4f" % (key, val))
return ", ".join(logs)
def get_overlap_score(candidate, target):
"""Takes a candidate word and a target word and returns the overlap
score between the two.
Parameters
----------
candidate : str
Candidate word whose overlap has to be detected.
target : str
Target word against which the overlap will be detected
Returns
-------
float
Overlap score betwen candidate and the target.
"""
if len(candidate) < len(target):
temp = candidate
candidate = target
target = temp
overlap = 0.0
while len(target) >= 2:
if target in candidate:
overlap = len(target)
return overlap * 1.0 / len(candidate)
else:
target = target[:-1]
return 0.0
def updir(d, n):
"""Given path d, go up n dirs from d and return that path"""
ret_val = d
for _ in range(n):
ret_val = os.path.dirname(ret_val)
return ret_val
def print_cuda_usage():
print("Memory Allocated:", torch.cuda.memory_allocated() / (1024 * 1024))
print("Max Memory Allocated:", torch.cuda.max_memory_allocated() / (1024 * 1024))
print("Memory Cached:", torch.cuda.memory_cached() / (1024 * 1024))
print("Max Memory Cached:", torch.cuda.max_memory_cached() / (1024 * 1024))
def get_current_tensors():
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (
hasattr(obj, "data") and torch.is_tensor(obj.data)
):
print(type(obj), obj.size())
except:
pass
| 29.984694 | 86 | 0.627361 |
import collections
import gc
import os
from bisect import bisect
import requests
import torch
import tqdm
import yaml
from torch import nn
def lr_lambda_update(i_iter, cfg):
if (
cfg["training_parameters"]["use_warmup"] is True
and i_iter <= cfg["training_parameters"]["warmup_iterations"]
):
alpha = float(i_iter) / float(cfg["training_parameters"]["warmup_iterations"])
return cfg["training_parameters"]["warmup_factor"] * (1.0 - alpha) + alpha
else:
idx = bisect(cfg["training_parameters"]["lr_steps"], i_iter)
return pow(cfg["training_parameters"]["lr_ratio"], idx)
def clip_gradients(model, i_iter, writer, config):
max_grad_l2_norm = config["training_parameters"]["max_grad_l2_norm"]
clip_norm_mode = config["training_parameters"]["clip_norm_mode"]
if max_grad_l2_norm is not None:
if clip_norm_mode == "all":
norm = nn.utils.clip_grad_norm_(model.parameters(), max_grad_l2_norm)
writer.add_scalars({"grad_norm": norm}, i_iter)
elif clip_norm_mode == "question":
question_embedding = model.module.question_embedding_module
norm = nn.utils.clip_grad_norm(
question_embedding.parameters(), max_grad_l2_norm
)
writer.add_scalars({"question_grad_norm": norm}, i_iter)
else:
raise NotImplementedError(
"Clip norm mode %s not implemented" % clip_norm_mode
)
def ckpt_name_from_core_args(config):
return "%s_%s_%s_%d" % (
config["tasks"],
config["datasets"],
config["model"],
config["training_parameters"]["seed"],
)
def foldername_from_config_override(args):
cfg_override = None
if hasattr(args, "config_override"):
cfg_override = args.config_override
elif "config_override" in args:
cfg_override = args["config_override"]
folder_name = ""
if cfg_override is not None and len(cfg_override) > 0:
folder_name = yaml.safe_dump(cfg_override, default_flow_style=True)
folder_name = folder_name.replace(":", ".").replace("\n", " ")
folder_name = folder_name.replace("/", "_")
folder_name = " ".join(folder_name.split())
folder_name = folder_name.replace(". ", ".").replace(" ", "_")
folder_name = "_" + folder_name
return folder_name
def get_pythia_root():
from pythia.common.registry import registry
pythia_root = registry.get("pythia_root", no_warning=True)
if pythia_root is None:
pythia_root = os.path.dirname(os.path.abspath(__file__))
pythia_root = os.path.abspath(os.path.join(pythia_root, ".."))
registry.register("pythia_root", pythia_root)
return pythia_root
def download_file(url, output_dir=".", filename=""):
if len(filename) == 0:
filename = os.path.join(".", url.split("/")[-1])
os.makedirs(output_dir, exist_ok=True)
filename = os.path.join(output_dir, filename)
r = requests.get(url, stream=True)
file_size = int(r.headers["Content-Length"])
chunk_size = 1024 * 1024
num_bars = int(file_size / chunk_size)
with open(filename, "wb") as fh:
for chunk in tqdm.tqdm(
r.iter_content(chunk_size=chunk_size),
total=num_bars,
unit="MB",
desc=filename,
leave=True,
):
fh.write(chunk)
def get_optimizer_parameters(model, config):
parameters = model.parameters()
has_custom = hasattr(model, "get_optimizer_parameters")
if has_custom:
parameters = model.get_optimizer_parameters(config)
is_parallel = isinstance(model, nn.DataParallel)
if is_parallel and hasattr(model.module, "get_optimizer_parameters"):
parameters = model.module.get_optimizer_parameters(config)
return parameters
def dict_to_string(dictionary):
logs = []
if dictionary is None:
return ""
for key, val in dictionary.items():
if hasattr(val, "item"):
val = val.item()
logs.append("%s: %.4f" % (key, val))
return ", ".join(logs)
def get_overlap_score(candidate, target):
if len(candidate) < len(target):
temp = candidate
candidate = target
target = temp
overlap = 0.0
while len(target) >= 2:
if target in candidate:
overlap = len(target)
return overlap * 1.0 / len(candidate)
else:
target = target[:-1]
return 0.0
def updir(d, n):
ret_val = d
for _ in range(n):
ret_val = os.path.dirname(ret_val)
return ret_val
def print_cuda_usage():
print("Memory Allocated:", torch.cuda.memory_allocated() / (1024 * 1024))
print("Max Memory Allocated:", torch.cuda.max_memory_allocated() / (1024 * 1024))
print("Memory Cached:", torch.cuda.memory_cached() / (1024 * 1024))
print("Max Memory Cached:", torch.cuda.max_memory_cached() / (1024 * 1024))
def get_current_tensors():
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (
hasattr(obj, "data") and torch.is_tensor(obj.data)
):
print(type(obj), obj.size())
except:
pass
| true | true |
f73eb5b6fefe38addcf70235bf748054ab734316 | 235 | py | Python | python-100-examples/test65.py | zengxianbin/Practice | d0ca7fe8c52f3c0b10fd44f9a52914d5359821fa | [
"MIT"
] | 2 | 2020-04-16T15:33:06.000Z | 2020-04-16T15:33:15.000Z | python-100-examples/test65.py | zengxianbin/Practice | d0ca7fe8c52f3c0b10fd44f9a52914d5359821fa | [
"MIT"
] | null | null | null | python-100-examples/test65.py | zengxianbin/Practice | d0ca7fe8c52f3c0b10fd44f9a52914d5359821fa | [
"MIT"
] | null | null | null | #!/usr/local/bin/python3
# -*- coding: UTF-8 -*-
class Solution(object):
def test65(self):
'''
题目:一个最优美的图案。
'''
return ""
if __name__ == "__main__":
solution = Solution()
solution.test65()
| 19.583333 | 26 | 0.52766 |
class Solution(object):
def test65(self):
return ""
if __name__ == "__main__":
solution = Solution()
solution.test65()
| true | true |
f73eb7ef400a4a4bfb9b313a941d4c94b35f9a93 | 13,163 | py | Python | mmdet/models/detectors/two_stage_with_MetaEmbedding.py | Qianna00/mmdetection | 31e7dff4c61000002d27117543b85e68d2619b4c | [
"Apache-2.0"
] | 1 | 2021-08-01T08:44:35.000Z | 2021-08-01T08:44:35.000Z | mmdet/models/detectors/two_stage_with_MetaEmbedding.py | Qianna00/mmdetection | 31e7dff4c61000002d27117543b85e68d2619b4c | [
"Apache-2.0"
] | null | null | null | mmdet/models/detectors/two_stage_with_MetaEmbedding.py | Qianna00/mmdetection | 31e7dff4c61000002d27117543b85e68d2619b4c | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import numpy as np
# from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
from tqdm import tqdm
from mmdet.datasets import build_dataloader, build_dataset
from mmcv import Config
from mmdet.core import bbox2roi
from functools import partial
from torch.utils.data.dataloader import DataLoader
@DETECTORS.register_module()
class TwoStageDetectorMetaEmbedding(BaseDetector):
"""Base class for two-stage detectors.
Two-stage detectors typically consisting of a region proposal network and a
task-specific regression head.
"""
def __init__(self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
init_centroids=False,
pretrained=None):
super(TwoStageDetectorMetaEmbedding, self).__init__()
self.backbone = build_backbone(backbone)
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.init_centroids = init_centroids
if neck is not None:
self.neck = build_neck(neck)
if rpn_head is not None:
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
rpn_head_ = rpn_head.copy()
rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)
self.rpn_head = build_head(rpn_head_)
"""if self.init_centroids:
for p in self.parameters():
p.requires_grad = False"""
if roi_head is not None:
# update train and test cfg here for now
# TODO: refactor assigner & sampler
rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None
roi_head.update(train_cfg=rcnn_train_cfg)
roi_head.update(test_cfg=test_cfg.rcnn)
self.roi_head = build_head(roi_head)
if self.init_centroids:
self.centroids = self.roi_head.loss_feat.centroids.data
else:
self.centroids = None
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
if roi_head["type"] == "MetaEmbedding_RoIHead":
# calculate init_centroids using training dataset
if self.train_cfg is not None:
if init_centroids:
cfg = Config.fromfile(
"/mmdetection/configs/faster_rcnn_meta/faster_rcnn_r50_c4_meta_smd_stage2.py")
dataset = build_dataset(cfg.centroids_cal)
# data = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=0, num_gpus=1, shuffle=False)
# print(data[0])
self.roi_head.loss_feat.centroids.data = self.centroids_cal(dataset)
@property
def with_rpn(self):
return hasattr(self, 'rpn_head') and self.rpn_head is not None
@property
def with_roi_head(self):
return hasattr(self, 'roi_head') and self.roi_head is not None
def init_weights(self, pretrained=None):
super(TwoStageDetectorMetaEmbedding, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
if self.with_rpn:
self.rpn_head.init_weights()
if self.with_roi_head:
self.roi_head.init_weights(pretrained)
def extract_feat(self, img):
"""Directly extract features from the backbone+neck
"""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/get_flops.py`
"""
outs = ()
# backbone
x = self.extract_feat(img)
# rpn
if self.with_rpn:
rpn_outs = self.rpn_head(x)
outs = outs + (rpn_outs, )
proposals = torch.randn(1000, 4).to(img.device)
# roi_head
roi_outs = self.roi_head.forward_dummy(x, proposals)
outs = outs + (roi_outs, )
return outs
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None,
**kwargs):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
proposals : override rpn proposals with custom proposals. Use when
`with_rpn` is False.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
x = self.extract_feat(img)
losses = dict()
# RPN forward and loss
if self.with_rpn:
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
rpn_losses, proposal_list = self.rpn_head.forward_train(
x,
img_metas,
gt_bboxes,
gt_labels=None,
gt_bboxes_ignore=gt_bboxes_ignore,
proposal_cfg=proposal_cfg)
losses.update(rpn_losses)
else:
proposal_list = proposals
"""roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,
gt_bboxes, gt_labels,
gt_bboxes_ignore, gt_masks,
**kwargs)"""
roi_losses = self.roi_head(x,
centroids=self.centroids,
img_metas=img_metas,
proposal_list=proposal_list,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_masks=gt_masks,
test=False,
**kwargs)
losses.update(roi_losses)
return losses
async def async_simple_test(self,
img,
img_meta,
proposals=None,
rescale=False):
"""Async test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
proposal_list = await self.rpn_head.async_simple_test_rpn(
x, img_meta)
else:
proposal_list = proposals
return await self.roi_head.async_simple_test(
x, proposal_list, img_meta, rescale=rescale)
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Test without augmentation."""
# assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
else:
proposal_list = proposals
return self.roi_head(x,
centroids=self.centroids,
proposal_list=proposal_list,
img_metas=img_metas,
test=True)
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
# recompute feats to save memory
x = self.extract_feats(imgs)
proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
def centroids_cal(self, data):
centroids = torch.zeros(self.roi_head.num_classes,
self.roi_head.feat_dim,
14,
14).cuda()
print('Calculating centroids.')
# Calculate initial centroids only on training data.
with torch.set_grad_enabled(False):
self.backbone.cuda()
self.rpn_head.cuda()
self.roi_head.cuda()
class_data_num = [0, 0, 0, 0, 0, 0]
# class_data_num = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for i in tqdm(range(len(data))):
"""imgs, gt_labels, gt_bboxes, img_metas = inputs["img"], \
inputs["gt_labels"], \
inputs["gt_bboxes"],\
inputs["img_metas"]"""
imgs, gt_labels, gt_bboxes, img_metas = \
torch.unsqueeze(data[i]['img'], 0).to(next(self.backbone.parameters()).device), \
[data[i]['gt_labels'].to(next(self.backbone.parameters()).device)], \
[data[i]['gt_bboxes'].to(next(self.backbone.parameters()).device)], \
[data[i]['img_metas']]
# Calculate Features of each training data
feats = self.backbone(imgs)
"""proposal_list = self.rpn_head.simple_test_rpn(feats, img_metas)
num_imgs = len(img_metas)
# if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = self.roi_head.std_roi_head.bbox_assigner.assign(
proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = self.roi_head.std_roi_head.bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in feats])
sampling_results.append(sampling_result)
rois = bbox2roi([res.bboxes for res in sampling_results])"""
rois = bbox2roi(gt_bboxes)
bbox_feats = self.roi_head.std_roi_head.bbox_roi_extractor(
feats[:self.roi_head.std_roi_head.bbox_roi_extractor.num_inputs], rois)
"""labels = self.roi_head.std_roi_head.bbox_head.get_targets(sampling_results, gt_bboxes,
gt_labels, self.train_cfg.rcnn)[0]
# Add all calculated features to center tensor
for i in range(len(labels)):
label = labels[i]
if label < self.roi_head.num_classes:
centroids[label] += bbox_feats[i]
class_data_num[label] += 1"""
for j in range(len(gt_labels[0])):
label = gt_labels[0][j]
centroids[label] += bbox_feats[j]
class_data_num[label] += 1
for i in range(len(class_data_num)):
if class_data_num[i] == 0:
class_data_num[i] = 1
# Average summed features with class count
centroids /= torch.tensor(class_data_num).float().unsqueeze(1).unsqueeze(2).\
unsqueeze(3).repeat(1, 1024, 14, 14).cuda()
return centroids
def class_count(data):
labels = np.array(data.dataset.labels)
class_data_num = []
for l in np.unique(labels):
class_data_num.append(len(labels[labels == l]))
return class_data_num | 40.253823 | 119 | 0.542278 | import torch
import torch.nn as nn
import numpy as np
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
from tqdm import tqdm
from mmdet.datasets import build_dataloader, build_dataset
from mmcv import Config
from mmdet.core import bbox2roi
from functools import partial
from torch.utils.data.dataloader import DataLoader
@DETECTORS.register_module()
class TwoStageDetectorMetaEmbedding(BaseDetector):
def __init__(self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
init_centroids=False,
pretrained=None):
super(TwoStageDetectorMetaEmbedding, self).__init__()
self.backbone = build_backbone(backbone)
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.init_centroids = init_centroids
if neck is not None:
self.neck = build_neck(neck)
if rpn_head is not None:
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
rpn_head_ = rpn_head.copy()
rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)
self.rpn_head = build_head(rpn_head_)
if roi_head is not None:
rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None
roi_head.update(train_cfg=rcnn_train_cfg)
roi_head.update(test_cfg=test_cfg.rcnn)
self.roi_head = build_head(roi_head)
if self.init_centroids:
self.centroids = self.roi_head.loss_feat.centroids.data
else:
self.centroids = None
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
if roi_head["type"] == "MetaEmbedding_RoIHead":
if self.train_cfg is not None:
if init_centroids:
cfg = Config.fromfile(
"/mmdetection/configs/faster_rcnn_meta/faster_rcnn_r50_c4_meta_smd_stage2.py")
dataset = build_dataset(cfg.centroids_cal)
self.roi_head.loss_feat.centroids.data = self.centroids_cal(dataset)
@property
def with_rpn(self):
return hasattr(self, 'rpn_head') and self.rpn_head is not None
@property
def with_roi_head(self):
return hasattr(self, 'roi_head') and self.roi_head is not None
def init_weights(self, pretrained=None):
super(TwoStageDetectorMetaEmbedding, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
if self.with_rpn:
self.rpn_head.init_weights()
if self.with_roi_head:
self.roi_head.init_weights(pretrained)
def extract_feat(self, img):
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
outs = ()
x = self.extract_feat(img)
if self.with_rpn:
rpn_outs = self.rpn_head(x)
outs = outs + (rpn_outs, )
proposals = torch.randn(1000, 4).to(img.device)
roi_outs = self.roi_head.forward_dummy(x, proposals)
outs = outs + (roi_outs, )
return outs
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None,
**kwargs):
x = self.extract_feat(img)
losses = dict()
if self.with_rpn:
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
rpn_losses, proposal_list = self.rpn_head.forward_train(
x,
img_metas,
gt_bboxes,
gt_labels=None,
gt_bboxes_ignore=gt_bboxes_ignore,
proposal_cfg=proposal_cfg)
losses.update(rpn_losses)
else:
proposal_list = proposals
roi_losses = self.roi_head(x,
centroids=self.centroids,
img_metas=img_metas,
proposal_list=proposal_list,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_masks=gt_masks,
test=False,
**kwargs)
losses.update(roi_losses)
return losses
async def async_simple_test(self,
img,
img_meta,
proposals=None,
rescale=False):
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
proposal_list = await self.rpn_head.async_simple_test_rpn(
x, img_meta)
else:
proposal_list = proposals
return await self.roi_head.async_simple_test(
x, proposal_list, img_meta, rescale=rescale)
def simple_test(self, img, img_metas, proposals=None, rescale=False):
x = self.extract_feat(img)
if proposals is None:
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
else:
proposal_list = proposals
return self.roi_head(x,
centroids=self.centroids,
proposal_list=proposal_list,
img_metas=img_metas,
test=True)
def aug_test(self, imgs, img_metas, rescale=False):
x = self.extract_feats(imgs)
proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
def centroids_cal(self, data):
centroids = torch.zeros(self.roi_head.num_classes,
self.roi_head.feat_dim,
14,
14).cuda()
print('Calculating centroids.')
with torch.set_grad_enabled(False):
self.backbone.cuda()
self.rpn_head.cuda()
self.roi_head.cuda()
class_data_num = [0, 0, 0, 0, 0, 0]
for i in tqdm(range(len(data))):
imgs, gt_labels, gt_bboxes, img_metas = \
torch.unsqueeze(data[i]['img'], 0).to(next(self.backbone.parameters()).device), \
[data[i]['gt_labels'].to(next(self.backbone.parameters()).device)], \
[data[i]['gt_bboxes'].to(next(self.backbone.parameters()).device)], \
[data[i]['img_metas']]
feats = self.backbone(imgs)
rois = bbox2roi(gt_bboxes)
bbox_feats = self.roi_head.std_roi_head.bbox_roi_extractor(
feats[:self.roi_head.std_roi_head.bbox_roi_extractor.num_inputs], rois)
for j in range(len(gt_labels[0])):
label = gt_labels[0][j]
centroids[label] += bbox_feats[j]
class_data_num[label] += 1
for i in range(len(class_data_num)):
if class_data_num[i] == 0:
class_data_num[i] = 1
centroids /= torch.tensor(class_data_num).float().unsqueeze(1).unsqueeze(2).\
unsqueeze(3).repeat(1, 1024, 14, 14).cuda()
return centroids
def class_count(data):
labels = np.array(data.dataset.labels)
class_data_num = []
for l in np.unique(labels):
class_data_num.append(len(labels[labels == l]))
return class_data_num | true | true |
f73eb7f19db30babb245f80d3076eac60dbc1cc6 | 4,343 | py | Python | tests/responses/test_fileresponse.py | peteralexandercharles/django | 61c7350f41f2534daf3888709f3c987b7d779a29 | [
"BSD-3-Clause",
"0BSD"
] | null | null | null | tests/responses/test_fileresponse.py | peteralexandercharles/django | 61c7350f41f2534daf3888709f3c987b7d779a29 | [
"BSD-3-Clause",
"0BSD"
] | null | null | null | tests/responses/test_fileresponse.py | peteralexandercharles/django | 61c7350f41f2534daf3888709f3c987b7d779a29 | [
"BSD-3-Clause",
"0BSD"
] | null | null | null | import io
import os
import sys
import tempfile
from unittest import skipIf
from django.core.files.base import ContentFile
from django.http import FileResponse
from django.test import SimpleTestCase
class FileResponseTests(SimpleTestCase):
def test_file_from_disk_response(self):
response = FileResponse(open(__file__, "rb"))
self.assertEqual(
response.headers["Content-Length"], str(os.path.getsize(__file__))
)
self.assertIn(response.headers["Content-Type"], ["text/x-python", "text/plain"])
self.assertEqual(
response.headers["Content-Disposition"],
'inline; filename="test_fileresponse.py"',
)
response.close()
def test_file_from_buffer_response(self):
response = FileResponse(io.BytesIO(b"binary content"))
self.assertEqual(response.headers["Content-Length"], "14")
self.assertEqual(response.headers["Content-Type"], "application/octet-stream")
self.assertFalse(response.has_header("Content-Disposition"))
self.assertEqual(list(response), [b"binary content"])
def test_file_from_buffer_unnamed_attachment(self):
response = FileResponse(io.BytesIO(b"binary content"), as_attachment=True)
self.assertEqual(response.headers["Content-Length"], "14")
self.assertEqual(response.headers["Content-Type"], "application/octet-stream")
self.assertEqual(response.headers["Content-Disposition"], "attachment")
self.assertEqual(list(response), [b"binary content"])
@skipIf(sys.platform == "win32", "Named pipes are Unix-only.")
def test_file_from_named_pipe_response(self):
with tempfile.TemporaryDirectory() as temp_dir:
pipe_file = os.path.join(temp_dir, "named_pipe")
os.mkfifo(pipe_file)
pipe_for_read = os.open(pipe_file, os.O_RDONLY | os.O_NONBLOCK)
with open(pipe_file, "wb") as pipe_for_write:
pipe_for_write.write(b"binary content")
response = FileResponse(os.fdopen(pipe_for_read, mode="rb"))
self.assertEqual(list(response), [b"binary content"])
response.close()
self.assertFalse(response.has_header("Content-Length"))
def test_file_from_disk_as_attachment(self):
response = FileResponse(open(__file__, "rb"), as_attachment=True)
self.assertEqual(
response.headers["Content-Length"], str(os.path.getsize(__file__))
)
self.assertIn(response.headers["Content-Type"], ["text/x-python", "text/plain"])
self.assertEqual(
response.headers["Content-Disposition"],
'attachment; filename="test_fileresponse.py"',
)
response.close()
def test_compressed_response(self):
"""
If compressed responses are served with the uncompressed Content-Type
and a compression Content-Encoding, browsers might automatically
uncompress the file, which is most probably not wanted.
"""
test_tuples = (
(".tar.gz", "application/gzip"),
(".tar.bz2", "application/x-bzip"),
(".tar.xz", "application/x-xz"),
)
for extension, mimetype in test_tuples:
with self.subTest(ext=extension):
with tempfile.NamedTemporaryFile(suffix=extension) as tmp:
response = FileResponse(tmp)
self.assertEqual(response.headers["Content-Type"], mimetype)
self.assertFalse(response.has_header("Content-Encoding"))
def test_unicode_attachment(self):
response = FileResponse(
ContentFile(b"binary content", name="祝您平安.odt"),
as_attachment=True,
content_type="application/vnd.oasis.opendocument.text",
)
self.assertEqual(
response.headers["Content-Type"],
"application/vnd.oasis.opendocument.text",
)
self.assertEqual(
response.headers["Content-Disposition"],
"attachment; filename*=utf-8''%E7%A5%9D%E6%82%A8%E5%B9%B3%E5%AE%89.odt",
)
def test_repr(self):
response = FileResponse(io.BytesIO(b"binary content"))
self.assertEqual(
repr(response),
'<FileResponse status_code=200, "application/octet-stream">',
)
| 41.759615 | 88 | 0.643564 | import io
import os
import sys
import tempfile
from unittest import skipIf
from django.core.files.base import ContentFile
from django.http import FileResponse
from django.test import SimpleTestCase
class FileResponseTests(SimpleTestCase):
def test_file_from_disk_response(self):
response = FileResponse(open(__file__, "rb"))
self.assertEqual(
response.headers["Content-Length"], str(os.path.getsize(__file__))
)
self.assertIn(response.headers["Content-Type"], ["text/x-python", "text/plain"])
self.assertEqual(
response.headers["Content-Disposition"],
'inline; filename="test_fileresponse.py"',
)
response.close()
def test_file_from_buffer_response(self):
response = FileResponse(io.BytesIO(b"binary content"))
self.assertEqual(response.headers["Content-Length"], "14")
self.assertEqual(response.headers["Content-Type"], "application/octet-stream")
self.assertFalse(response.has_header("Content-Disposition"))
self.assertEqual(list(response), [b"binary content"])
def test_file_from_buffer_unnamed_attachment(self):
response = FileResponse(io.BytesIO(b"binary content"), as_attachment=True)
self.assertEqual(response.headers["Content-Length"], "14")
self.assertEqual(response.headers["Content-Type"], "application/octet-stream")
self.assertEqual(response.headers["Content-Disposition"], "attachment")
self.assertEqual(list(response), [b"binary content"])
@skipIf(sys.platform == "win32", "Named pipes are Unix-only.")
def test_file_from_named_pipe_response(self):
with tempfile.TemporaryDirectory() as temp_dir:
pipe_file = os.path.join(temp_dir, "named_pipe")
os.mkfifo(pipe_file)
pipe_for_read = os.open(pipe_file, os.O_RDONLY | os.O_NONBLOCK)
with open(pipe_file, "wb") as pipe_for_write:
pipe_for_write.write(b"binary content")
response = FileResponse(os.fdopen(pipe_for_read, mode="rb"))
self.assertEqual(list(response), [b"binary content"])
response.close()
self.assertFalse(response.has_header("Content-Length"))
def test_file_from_disk_as_attachment(self):
response = FileResponse(open(__file__, "rb"), as_attachment=True)
self.assertEqual(
response.headers["Content-Length"], str(os.path.getsize(__file__))
)
self.assertIn(response.headers["Content-Type"], ["text/x-python", "text/plain"])
self.assertEqual(
response.headers["Content-Disposition"],
'attachment; filename="test_fileresponse.py"',
)
response.close()
def test_compressed_response(self):
test_tuples = (
(".tar.gz", "application/gzip"),
(".tar.bz2", "application/x-bzip"),
(".tar.xz", "application/x-xz"),
)
for extension, mimetype in test_tuples:
with self.subTest(ext=extension):
with tempfile.NamedTemporaryFile(suffix=extension) as tmp:
response = FileResponse(tmp)
self.assertEqual(response.headers["Content-Type"], mimetype)
self.assertFalse(response.has_header("Content-Encoding"))
def test_unicode_attachment(self):
response = FileResponse(
ContentFile(b"binary content", name="祝您平安.odt"),
as_attachment=True,
content_type="application/vnd.oasis.opendocument.text",
)
self.assertEqual(
response.headers["Content-Type"],
"application/vnd.oasis.opendocument.text",
)
self.assertEqual(
response.headers["Content-Disposition"],
"attachment; filename*=utf-8''%E7%A5%9D%E6%82%A8%E5%B9%B3%E5%AE%89.odt",
)
def test_repr(self):
response = FileResponse(io.BytesIO(b"binary content"))
self.assertEqual(
repr(response),
'<FileResponse status_code=200, "application/octet-stream">',
)
| true | true |
f73eb96c048c0b4013cc5763dce4816837a36598 | 47,661 | py | Python | test/dialect/oracle/test_compiler.py | aadel/sqlalchemy | 380f4389922004589bfa7cb4f9b8c8208aa68659 | [
"MIT"
] | 6 | 2019-02-18T12:42:44.000Z | 2020-11-11T23:10:17.000Z | test/dialect/oracle/test_compiler.py | oladimejiala/sqlalchemy | cbfa1363d7201848a56e7209146e81b9c51aa8af | [
"MIT"
] | null | null | null | test/dialect/oracle/test_compiler.py | oladimejiala/sqlalchemy | cbfa1363d7201848a56e7209146e81b9c51aa8af | [
"MIT"
] | 2 | 2016-11-02T04:59:02.000Z | 2019-05-11T06:01:30.000Z | # coding: utf-8
from sqlalchemy import and_
from sqlalchemy import bindparam
from sqlalchemy import Computed
from sqlalchemy import exc
from sqlalchemy import except_
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import literal
from sqlalchemy import literal_column
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy import outerjoin
from sqlalchemy import schema
from sqlalchemy import select
from sqlalchemy import Sequence
from sqlalchemy import sql
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import type_coerce
from sqlalchemy import TypeDecorator
from sqlalchemy import union
from sqlalchemy.dialects.oracle import base as oracle
from sqlalchemy.dialects.oracle import cx_oracle
from sqlalchemy.engine import default
from sqlalchemy.sql import column
from sqlalchemy.sql import quoted_name
from sqlalchemy.sql import table
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "oracle"
def test_true_false(self):
self.assert_compile(sql.false(), "0")
self.assert_compile(sql.true(), "1")
def test_owner(self):
meta = MetaData()
parent = Table(
"parent",
meta,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
schema="ed",
)
child = Table(
"child",
meta,
Column("id", Integer, primary_key=True),
Column("parent_id", Integer, ForeignKey("ed.parent.id")),
schema="ed",
)
self.assert_compile(
parent.join(child),
"ed.parent JOIN ed.child ON ed.parent.id = " "ed.child.parent_id",
)
def test_subquery(self):
t = table("sometable", column("col1"), column("col2"))
s = select([t]).subquery()
s = select([s.c.col1, s.c.col2])
self.assert_compile(
s,
"SELECT anon_1.col1, anon_1.col2 FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 "
"AS col2 FROM sometable) anon_1",
)
def test_bindparam_quote(self):
"""test that bound parameters take on quoting for reserved words,
column names quote flag enabled."""
# note: this is only in cx_oracle at the moment. not sure
# what other hypothetical oracle dialects might need
self.assert_compile(bindparam("option"), ':"option"')
self.assert_compile(bindparam("plain"), ":plain")
t = Table("s", MetaData(), Column("plain", Integer, quote=True))
self.assert_compile(
t.insert().values(plain=5),
'INSERT INTO s ("plain") VALUES (:"plain")',
)
self.assert_compile(
t.update().values(plain=5), 'UPDATE s SET "plain"=:"plain"'
)
def test_bindparam_quote_works_on_expanding(self):
self.assert_compile(
bindparam("uid", expanding=True),
"([POSTCOMPILE_uid])",
dialect=cx_oracle.dialect(),
)
def test_cte(self):
part = table(
"part", column("part"), column("sub_part"), column("quantity")
)
included_parts = (
select([part.c.sub_part, part.c.part, part.c.quantity])
.where(part.c.part == "p1")
.cte(name="included_parts", recursive=True)
.suffix_with(
"search depth first by part set ord1",
"cycle part set y_cycle to 1 default 0",
dialect="oracle",
)
)
incl_alias = included_parts.alias("pr1")
parts_alias = part.alias("p")
included_parts = included_parts.union_all(
select(
[
parts_alias.c.sub_part,
parts_alias.c.part,
parts_alias.c.quantity,
]
).where(parts_alias.c.part == incl_alias.c.sub_part)
)
q = select(
[
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).label("total_quantity"),
]
).group_by(included_parts.c.sub_part)
self.assert_compile(
q,
"WITH included_parts(sub_part, part, quantity) AS "
"(SELECT part.sub_part AS sub_part, part.part AS part, "
"part.quantity AS quantity FROM part WHERE part.part = :part_1 "
"UNION ALL SELECT p.sub_part AS sub_part, p.part AS part, "
"p.quantity AS quantity FROM part p, included_parts pr1 "
"WHERE p.part = pr1.sub_part) "
"search depth first by part set ord1 cycle part set "
"y_cycle to 1 default 0 "
"SELECT included_parts.sub_part, sum(included_parts.quantity) "
"AS total_quantity FROM included_parts "
"GROUP BY included_parts.sub_part",
)
def test_limit_one(self):
t = table("sometable", column("col1"), column("col2"))
s = select([t])
c = s.compile(dialect=oracle.OracleDialect())
assert t.c.col1 in set(c._create_result_map()["col1"][1])
s = select([t]).limit(10).offset(20)
self.assert_compile(
s,
"SELECT anon_1.col1, anon_1.col2 FROM "
"(SELECT anon_2.col1 AS col1, "
"anon_2.col2 AS col2, ROWNUM AS ora_rn FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 AS "
"col2 FROM sometable) anon_2 WHERE ROWNUM <= "
"[POSTCOMPILE_param_1]) anon_1 WHERE ora_rn > "
"[POSTCOMPILE_param_2]",
checkparams={"param_1": 30, "param_2": 20},
)
c = s.compile(dialect=oracle.OracleDialect())
eq_(len(c._result_columns), 2)
assert t.c.col1 in set(c._create_result_map()["col1"][1])
def test_limit_one_firstrows(self):
t = table("sometable", column("col1"), column("col2"))
s = select([t])
s = select([t]).limit(10).offset(20)
self.assert_compile(
s,
"SELECT anon_1.col1, anon_1.col2 FROM "
"(SELECT /*+ FIRST_ROWS([POSTCOMPILE_ora_frow_1]) */ "
"anon_2.col1 AS col1, "
"anon_2.col2 AS col2, ROWNUM AS ora_rn FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 AS "
"col2 FROM sometable) anon_2 WHERE ROWNUM <= "
"[POSTCOMPILE_param_1]) anon_1 WHERE ora_rn > "
"[POSTCOMPILE_param_2]",
checkparams={"ora_frow_1": 10, "param_1": 30, "param_2": 20},
dialect=oracle.OracleDialect(optimize_limits=True),
)
def test_limit_two(self):
t = table("sometable", column("col1"), column("col2"))
s = select([t]).limit(10).offset(20).subquery()
s2 = select([s.c.col1, s.c.col2])
self.assert_compile(
s2,
"SELECT anon_1.col1, anon_1.col2 FROM "
"(SELECT anon_2.col1 AS col1, "
"anon_2.col2 AS col2 "
"FROM (SELECT anon_3.col1 AS col1, anon_3.col2 AS col2, "
"ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, "
"sometable.col2 AS col2 FROM sometable) anon_3 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]) anon_2 "
"WHERE ora_rn > [POSTCOMPILE_param_2]) anon_1",
checkparams={"param_1": 30, "param_2": 20},
)
self.assert_compile(
s2,
"SELECT anon_1.col1, anon_1.col2 FROM "
"(SELECT anon_2.col1 AS col1, "
"anon_2.col2 AS col2 "
"FROM (SELECT anon_3.col1 AS col1, anon_3.col2 AS col2, "
"ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, "
"sometable.col2 AS col2 FROM sometable) anon_3 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]) anon_2 "
"WHERE ora_rn > [POSTCOMPILE_param_2]) anon_1",
)
c = s2.compile(dialect=oracle.OracleDialect())
eq_(len(c._result_columns), 2)
assert s.c.col1 in set(c._create_result_map()["col1"][1])
def test_limit_three(self):
t = table("sometable", column("col1"), column("col2"))
s = select([t]).limit(10).offset(20).order_by(t.c.col2)
self.assert_compile(
s,
"SELECT anon_1.col1, anon_1.col2 FROM "
"(SELECT anon_2.col1 AS col1, "
"anon_2.col2 AS col2, ROWNUM AS ora_rn FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 AS "
"col2 FROM sometable ORDER BY "
"sometable.col2) anon_2 WHERE ROWNUM <= "
"[POSTCOMPILE_param_1]) anon_1 "
"WHERE ora_rn > [POSTCOMPILE_param_2]",
checkparams={"param_1": 30, "param_2": 20},
)
c = s.compile(dialect=oracle.OracleDialect())
eq_(len(c._result_columns), 2)
assert t.c.col1 in set(c._create_result_map()["col1"][1])
def test_limit_four(self):
t = table("sometable", column("col1"), column("col2"))
s = select([t]).with_for_update().limit(10).order_by(t.c.col2)
self.assert_compile(
s,
"SELECT anon_1.col1, anon_1.col2 FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 AS "
"col2 FROM sometable ORDER BY "
"sometable.col2) anon_1 WHERE ROWNUM <= [POSTCOMPILE_param_1] "
"FOR UPDATE",
checkparams={"param_1": 10},
)
def test_limit_four_firstrows(self):
t = table("sometable", column("col1"), column("col2"))
s = select([t]).with_for_update().limit(10).order_by(t.c.col2)
self.assert_compile(
s,
"SELECT /*+ FIRST_ROWS([POSTCOMPILE_ora_frow_1]) */ "
"anon_1.col1, anon_1.col2 FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 AS "
"col2 FROM sometable ORDER BY "
"sometable.col2) anon_1 WHERE ROWNUM <= [POSTCOMPILE_param_1] "
"FOR UPDATE",
checkparams={"param_1": 10, "ora_frow_1": 10},
dialect=oracle.OracleDialect(optimize_limits=True),
)
def test_limit_five(self):
t = table("sometable", column("col1"), column("col2"))
s = (
select([t])
.with_for_update()
.limit(10)
.offset(20)
.order_by(t.c.col2)
)
self.assert_compile(
s,
"SELECT anon_1.col1, anon_1.col2 FROM "
"(SELECT anon_2.col1 AS col1, "
"anon_2.col2 AS col2, ROWNUM AS ora_rn FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 AS "
"col2 FROM sometable ORDER BY "
"sometable.col2) anon_2 WHERE ROWNUM <= "
"[POSTCOMPILE_param_1]) anon_1 "
"WHERE ora_rn > [POSTCOMPILE_param_2] FOR "
"UPDATE",
checkparams={"param_1": 30, "param_2": 20},
)
def test_limit_six(self):
t = table("sometable", column("col1"), column("col2"))
s = (
select([t])
.limit(10)
.offset(literal(10) + literal(20))
.order_by(t.c.col2)
)
self.assert_compile(
s,
"SELECT anon_1.col1, anon_1.col2 FROM (SELECT anon_2.col1 AS "
"col1, anon_2.col2 AS col2, ROWNUM AS ora_rn FROM "
"(SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable ORDER BY sometable.col2) anon_2 WHERE "
"ROWNUM <= :param_1 + :param_2 + :param_3) anon_1 "
"WHERE ora_rn > :param_2 + :param_3",
checkparams={"param_1": 10, "param_2": 10, "param_3": 20},
)
def test_limit_special_quoting(self):
"""Oracle-specific test for #4730.
Even though this issue is generic, test the originally reported Oracle
use case.
"""
col = literal_column("SUM(ABC)").label("SUM(ABC)")
tbl = table("my_table")
query = select([col]).select_from(tbl).order_by(col).limit(100)
self.assert_compile(
query,
'SELECT anon_1."SUM(ABC)" FROM '
'(SELECT SUM(ABC) AS "SUM(ABC)" '
"FROM my_table ORDER BY SUM(ABC)) anon_1 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]",
)
col = literal_column("SUM(ABC)").label(quoted_name("SUM(ABC)", True))
tbl = table("my_table")
query = select([col]).select_from(tbl).order_by(col).limit(100)
self.assert_compile(
query,
'SELECT anon_1."SUM(ABC)" FROM '
'(SELECT SUM(ABC) AS "SUM(ABC)" '
"FROM my_table ORDER BY SUM(ABC)) anon_1 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]",
)
col = literal_column("SUM(ABC)").label("SUM(ABC)_")
tbl = table("my_table")
query = select([col]).select_from(tbl).order_by(col).limit(100)
self.assert_compile(
query,
'SELECT anon_1."SUM(ABC)_" FROM '
'(SELECT SUM(ABC) AS "SUM(ABC)_" '
"FROM my_table ORDER BY SUM(ABC)) anon_1 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]",
)
col = literal_column("SUM(ABC)").label(quoted_name("SUM(ABC)_", True))
tbl = table("my_table")
query = select([col]).select_from(tbl).order_by(col).limit(100)
self.assert_compile(
query,
'SELECT anon_1."SUM(ABC)_" FROM '
'(SELECT SUM(ABC) AS "SUM(ABC)_" '
"FROM my_table ORDER BY SUM(ABC)) anon_1 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]",
)
def test_for_update(self):
table1 = table(
"mytable", column("myid"), column("name"), column("description")
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
of=table1.c.myid
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 "
"FOR UPDATE OF mytable.myid",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE NOWAIT",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
nowait=True, of=table1.c.myid
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 "
"FOR UPDATE OF mytable.myid NOWAIT",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
nowait=True, of=[table1.c.myid, table1.c.name]
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE OF "
"mytable.myid, mytable.name NOWAIT",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
skip_locked=True, of=[table1.c.myid, table1.c.name]
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE OF "
"mytable.myid, mytable.name SKIP LOCKED",
)
# key_share has no effect
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(key_share=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE",
)
# read has no effect
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
read=True, key_share=True
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE",
)
ta = table1.alias()
self.assert_compile(
ta.select(ta.c.myid == 7).with_for_update(
of=[ta.c.myid, ta.c.name]
),
"SELECT mytable_1.myid, mytable_1.name, mytable_1.description "
"FROM mytable mytable_1 "
"WHERE mytable_1.myid = :myid_1 FOR UPDATE OF "
"mytable_1.myid, mytable_1.name",
)
def test_for_update_of_w_limit_adaption_col_present(self):
table1 = table("mytable", column("myid"), column("name"))
self.assert_compile(
select([table1.c.myid, table1.c.name])
.where(table1.c.myid == 7)
.with_for_update(nowait=True, of=table1.c.name)
.limit(10),
"SELECT anon_1.myid, anon_1.name FROM "
"(SELECT mytable.myid AS myid, mytable.name AS name "
"FROM mytable WHERE mytable.myid = :myid_1) anon_1 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1] "
"FOR UPDATE OF anon_1.name NOWAIT",
checkparams={"param_1": 10, "myid_1": 7},
)
def test_for_update_of_w_limit_adaption_col_unpresent(self):
table1 = table("mytable", column("myid"), column("name"))
self.assert_compile(
select([table1.c.myid])
.where(table1.c.myid == 7)
.with_for_update(nowait=True, of=table1.c.name)
.limit(10),
"SELECT anon_1.myid FROM "
"(SELECT mytable.myid AS myid, mytable.name AS name "
"FROM mytable WHERE mytable.myid = :myid_1) anon_1 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1] "
"FOR UPDATE OF anon_1.name NOWAIT",
)
def test_for_update_of_w_limit_offset_adaption_col_present(self):
table1 = table("mytable", column("myid"), column("name"))
self.assert_compile(
select([table1.c.myid, table1.c.name])
.where(table1.c.myid == 7)
.with_for_update(nowait=True, of=table1.c.name)
.limit(10)
.offset(50),
"SELECT anon_1.myid, anon_1.name FROM "
"(SELECT anon_2.myid AS myid, anon_2.name AS name, "
"ROWNUM AS ora_rn "
"FROM (SELECT mytable.myid AS myid, mytable.name AS name "
"FROM mytable WHERE mytable.myid = :myid_1) anon_2 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]) anon_1 "
"WHERE ora_rn > [POSTCOMPILE_param_2] "
"FOR UPDATE OF anon_1.name NOWAIT",
checkparams={"param_1": 60, "param_2": 50, "myid_1": 7},
)
def test_for_update_of_w_limit_offset_adaption_col_unpresent(self):
table1 = table("mytable", column("myid"), column("name"))
self.assert_compile(
select([table1.c.myid])
.where(table1.c.myid == 7)
.with_for_update(nowait=True, of=table1.c.name)
.limit(10)
.offset(50),
"SELECT anon_1.myid FROM (SELECT anon_2.myid AS myid, "
"ROWNUM AS ora_rn, anon_2.name AS name "
"FROM (SELECT mytable.myid AS myid, mytable.name AS name "
"FROM mytable WHERE mytable.myid = :myid_1) anon_2 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]) anon_1 "
"WHERE ora_rn > [POSTCOMPILE_param_2] "
"FOR UPDATE OF anon_1.name NOWAIT",
checkparams={"param_1": 60, "param_2": 50, "myid_1": 7},
)
def test_for_update_of_w_limit_offset_adaption_partial_col_unpresent(self):
table1 = table("mytable", column("myid"), column("foo"), column("bar"))
self.assert_compile(
select([table1.c.myid, table1.c.bar])
.where(table1.c.myid == 7)
.with_for_update(nowait=True, of=[table1.c.foo, table1.c.bar])
.limit(10)
.offset(50),
"SELECT anon_1.myid, anon_1.bar FROM (SELECT anon_2.myid AS myid, "
"anon_2.bar AS bar, ROWNUM AS ora_rn, "
"anon_2.foo AS foo FROM (SELECT mytable.myid AS myid, "
"mytable.bar AS bar, "
"mytable.foo AS foo FROM mytable "
"WHERE mytable.myid = :myid_1) anon_2 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]) anon_1 "
"WHERE ora_rn > [POSTCOMPILE_param_2] "
"FOR UPDATE OF anon_1.foo, anon_1.bar NOWAIT",
checkparams={"param_1": 60, "param_2": 50, "myid_1": 7},
)
def test_limit_preserves_typing_information(self):
class MyType(TypeDecorator):
impl = Integer
stmt = select([type_coerce(column("x"), MyType).label("foo")]).limit(1)
dialect = oracle.dialect()
compiled = stmt.compile(dialect=dialect)
assert isinstance(compiled._create_result_map()["foo"][-1], MyType)
def test_use_binds_for_limits_disabled_one(self):
t = table("sometable", column("col1"), column("col2"))
with testing.expect_deprecated(
"The ``use_binds_for_limits`` Oracle dialect parameter is "
"deprecated."
):
dialect = oracle.OracleDialect(use_binds_for_limits=False)
self.assert_compile(
select([t]).limit(10),
"SELECT anon_1.col1, anon_1.col2 FROM "
"(SELECT sometable.col1 AS col1, "
"sometable.col2 AS col2 FROM sometable) anon_1 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]",
dialect=dialect,
)
def test_use_binds_for_limits_disabled_two(self):
t = table("sometable", column("col1"), column("col2"))
with testing.expect_deprecated(
"The ``use_binds_for_limits`` Oracle dialect parameter is "
"deprecated."
):
dialect = oracle.OracleDialect(use_binds_for_limits=False)
self.assert_compile(
select([t]).offset(10),
"SELECT anon_1.col1, anon_1.col2 FROM (SELECT "
"anon_2.col1 AS col1, anon_2.col2 AS col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable) anon_2) anon_1 "
"WHERE ora_rn > [POSTCOMPILE_param_1]",
dialect=dialect,
)
def test_use_binds_for_limits_disabled_three(self):
t = table("sometable", column("col1"), column("col2"))
with testing.expect_deprecated(
"The ``use_binds_for_limits`` Oracle dialect parameter is "
"deprecated."
):
dialect = oracle.OracleDialect(use_binds_for_limits=False)
self.assert_compile(
select([t]).limit(10).offset(10),
"SELECT anon_1.col1, anon_1.col2 FROM (SELECT "
"anon_2.col1 AS col1, anon_2.col2 AS col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable) anon_2 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]) anon_1 "
"WHERE ora_rn > [POSTCOMPILE_param_2]",
dialect=dialect,
)
def test_use_binds_for_limits_enabled_one(self):
t = table("sometable", column("col1"), column("col2"))
with testing.expect_deprecated(
"The ``use_binds_for_limits`` Oracle dialect parameter is "
"deprecated."
):
dialect = oracle.OracleDialect(use_binds_for_limits=True)
self.assert_compile(
select([t]).limit(10),
"SELECT anon_1.col1, anon_1.col2 FROM "
"(SELECT sometable.col1 AS col1, "
"sometable.col2 AS col2 FROM sometable) anon_1 WHERE ROWNUM "
"<= [POSTCOMPILE_param_1]",
dialect=dialect,
)
def test_use_binds_for_limits_enabled_two(self):
t = table("sometable", column("col1"), column("col2"))
with testing.expect_deprecated(
"The ``use_binds_for_limits`` Oracle dialect parameter is "
"deprecated."
):
dialect = oracle.OracleDialect(use_binds_for_limits=True)
self.assert_compile(
select([t]).offset(10),
"SELECT anon_1.col1, anon_1.col2 FROM "
"(SELECT anon_2.col1 AS col1, anon_2.col2 AS col2, "
"ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable) anon_2) anon_1 "
"WHERE ora_rn > [POSTCOMPILE_param_1]",
dialect=dialect,
)
def test_use_binds_for_limits_enabled_three(self):
t = table("sometable", column("col1"), column("col2"))
with testing.expect_deprecated(
"The ``use_binds_for_limits`` Oracle dialect parameter is "
"deprecated."
):
dialect = oracle.OracleDialect(use_binds_for_limits=True)
self.assert_compile(
select([t]).limit(10).offset(10),
"SELECT anon_1.col1, anon_1.col2 FROM "
"(SELECT anon_2.col1 AS col1, anon_2.col2 AS col2, "
"ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable) anon_2 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]) anon_1 "
"WHERE ora_rn > [POSTCOMPILE_param_2]",
dialect=dialect,
checkparams={"param_1": 20, "param_2": 10},
)
def test_long_labels_legacy_ident_length(self):
dialect = default.DefaultDialect()
dialect.max_identifier_length = 30
ora_dialect = oracle.dialect(max_identifier_length=30)
m = MetaData()
a_table = Table(
"thirty_characters_table_xxxxxx",
m,
Column("id", Integer, primary_key=True),
)
other_table = Table(
"other_thirty_characters_table_",
m,
Column("id", Integer, primary_key=True),
Column(
"thirty_characters_table_id",
Integer,
ForeignKey("thirty_characters_table_xxxxxx.id"),
primary_key=True,
),
)
anon = a_table.alias()
self.assert_compile(
select([other_table, anon])
.select_from(other_table.outerjoin(anon))
.apply_labels(),
"SELECT other_thirty_characters_table_.id "
"AS other_thirty_characters__1, "
"other_thirty_characters_table_.thirty_char"
"acters_table_id AS other_thirty_characters"
"__2, thirty_characters_table__1.id AS "
"thirty_characters_table__3 FROM "
"other_thirty_characters_table_ LEFT OUTER "
"JOIN thirty_characters_table_xxxxxx AS "
"thirty_characters_table__1 ON "
"thirty_characters_table__1.id = "
"other_thirty_characters_table_.thirty_char"
"acters_table_id",
dialect=dialect,
)
self.assert_compile(
select([other_table, anon])
.select_from(other_table.outerjoin(anon))
.apply_labels(),
"SELECT other_thirty_characters_table_.id "
"AS other_thirty_characters__1, "
"other_thirty_characters_table_.thirty_char"
"acters_table_id AS other_thirty_characters"
"__2, thirty_characters_table__1.id AS "
"thirty_characters_table__3 FROM "
"other_thirty_characters_table_ LEFT OUTER "
"JOIN thirty_characters_table_xxxxxx "
"thirty_characters_table__1 ON "
"thirty_characters_table__1.id = "
"other_thirty_characters_table_.thirty_char"
"acters_table_id",
dialect=ora_dialect,
)
def _test_outer_join_fixture(self):
table1 = table(
"mytable",
column("myid", Integer),
column("name", String),
column("description", String),
)
table2 = table(
"myothertable",
column("otherid", Integer),
column("othername", String),
)
table3 = table(
"thirdtable",
column("userid", Integer),
column("otherstuff", String),
)
return table1, table2, table3
def test_outer_join_one(self):
table1, table2, table3 = self._test_outer_join_fixture()
query = select(
[table1, table2],
or_(
table1.c.name == "fred",
table1.c.myid == 10,
table2.c.othername != "jack",
text("EXISTS (select yay from foo where boo = lar)"),
),
from_obj=[
outerjoin(table1, table2, table1.c.myid == table2.c.otherid)
],
)
self.assert_compile(
query,
"SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid,"
" myothertable.othername FROM mytable, "
"myothertable WHERE (mytable.name = "
":name_1 OR mytable.myid = :myid_1 OR "
"myothertable.othername != :othername_1 OR "
"EXISTS (select yay from foo where boo = "
"lar)) AND mytable.myid = "
"myothertable.otherid(+)",
dialect=oracle.OracleDialect(use_ansi=False),
)
def test_outer_join_two(self):
table1, table2, table3 = self._test_outer_join_fixture()
query = table1.outerjoin(
table2, table1.c.myid == table2.c.otherid
).outerjoin(table3, table3.c.userid == table2.c.otherid)
self.assert_compile(
query.select(),
"SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid,"
" myothertable.othername, "
"thirdtable.userid, thirdtable.otherstuff "
"FROM mytable LEFT OUTER JOIN myothertable "
"ON mytable.myid = myothertable.otherid "
"LEFT OUTER JOIN thirdtable ON "
"thirdtable.userid = myothertable.otherid",
)
def test_outer_join_three(self):
table1, table2, table3 = self._test_outer_join_fixture()
query = table1.outerjoin(
table2, table1.c.myid == table2.c.otherid
).outerjoin(table3, table3.c.userid == table2.c.otherid)
self.assert_compile(
query.select(),
"SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid,"
" myothertable.othername, "
"thirdtable.userid, thirdtable.otherstuff "
"FROM mytable, myothertable, thirdtable "
"WHERE thirdtable.userid(+) = "
"myothertable.otherid AND mytable.myid = "
"myothertable.otherid(+)",
dialect=oracle.dialect(use_ansi=False),
)
def test_outer_join_four(self):
table1, table2, table3 = self._test_outer_join_fixture()
query = table1.join(table2, table1.c.myid == table2.c.otherid).join(
table3, table3.c.userid == table2.c.otherid
)
self.assert_compile(
query.select(),
"SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid,"
" myothertable.othername, "
"thirdtable.userid, thirdtable.otherstuff "
"FROM mytable, myothertable, thirdtable "
"WHERE thirdtable.userid = "
"myothertable.otherid AND mytable.myid = "
"myothertable.otherid",
dialect=oracle.dialect(use_ansi=False),
)
def test_outer_join_five(self):
table1, table2, table3 = self._test_outer_join_fixture()
query = table1.join(
table2, table1.c.myid == table2.c.otherid
).outerjoin(table3, table3.c.userid == table2.c.otherid)
self.assert_compile(
query.select().order_by(table1.c.name).limit(10).offset(5),
"SELECT anon_1.myid, anon_1.name, anon_1.description, "
"anon_1.otherid, "
"anon_1.othername, anon_1.userid, anon_1.otherstuff FROM "
"(SELECT anon_2.myid AS myid, anon_2.name AS name, "
"anon_2.description AS description, anon_2.otherid AS otherid, "
"anon_2.othername AS othername, anon_2.userid AS userid, "
"anon_2.otherstuff AS otherstuff, ROWNUM AS "
"ora_rn FROM (SELECT mytable.myid AS myid, "
"mytable.name AS name, mytable.description "
"AS description, myothertable.otherid AS "
"otherid, myothertable.othername AS "
"othername, thirdtable.userid AS userid, "
"thirdtable.otherstuff AS otherstuff FROM "
"mytable, myothertable, thirdtable WHERE "
"thirdtable.userid(+) = "
"myothertable.otherid AND mytable.myid = "
"myothertable.otherid ORDER BY mytable.name) anon_2 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]) anon_1 "
"WHERE ora_rn > [POSTCOMPILE_param_2]",
checkparams={"param_1": 15, "param_2": 5},
dialect=oracle.dialect(use_ansi=False),
)
def test_outer_join_six(self):
table1, table2, table3 = self._test_outer_join_fixture()
subq = (
select([table1])
.select_from(
table1.outerjoin(table2, table1.c.myid == table2.c.otherid)
)
.alias()
)
q = select([table3]).select_from(
table3.outerjoin(subq, table3.c.userid == subq.c.myid)
)
self.assert_compile(
q,
"SELECT thirdtable.userid, "
"thirdtable.otherstuff FROM thirdtable "
"LEFT OUTER JOIN (SELECT mytable.myid AS "
"myid, mytable.name AS name, "
"mytable.description AS description FROM "
"mytable LEFT OUTER JOIN myothertable ON "
"mytable.myid = myothertable.otherid) "
"anon_1 ON thirdtable.userid = anon_1.myid",
dialect=oracle.dialect(use_ansi=True),
)
self.assert_compile(
q,
"SELECT thirdtable.userid, "
"thirdtable.otherstuff FROM thirdtable, "
"(SELECT mytable.myid AS myid, "
"mytable.name AS name, mytable.description "
"AS description FROM mytable, myothertable "
"WHERE mytable.myid = myothertable.otherid("
"+)) anon_1 WHERE thirdtable.userid = "
"anon_1.myid(+)",
dialect=oracle.dialect(use_ansi=False),
)
def test_outer_join_seven(self):
table1, table2, table3 = self._test_outer_join_fixture()
q = select([table1.c.name]).where(table1.c.name == "foo")
self.assert_compile(
q,
"SELECT mytable.name FROM mytable WHERE " "mytable.name = :name_1",
dialect=oracle.dialect(use_ansi=False),
)
def test_outer_join_eight(self):
table1, table2, table3 = self._test_outer_join_fixture()
subq = (
select([table3.c.otherstuff])
.where(table3.c.otherstuff == table1.c.name)
.label("bar")
)
q = select([table1.c.name, subq])
self.assert_compile(
q,
"SELECT mytable.name, (SELECT "
"thirdtable.otherstuff FROM thirdtable "
"WHERE thirdtable.otherstuff = "
"mytable.name) AS bar FROM mytable",
dialect=oracle.dialect(use_ansi=False),
)
def test_nonansi_plusses_everthing_in_the_condition(self):
table1 = table(
"mytable",
column("myid", Integer),
column("name", String),
column("description", String),
)
table2 = table(
"myothertable",
column("otherid", Integer),
column("othername", String),
)
stmt = select([table1]).select_from(
table1.outerjoin(
table2,
and_(
table1.c.myid == table2.c.otherid,
table2.c.othername > 5,
table1.c.name == "foo",
),
)
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, myothertable WHERE mytable.myid = "
"myothertable.otherid(+) AND myothertable.othername(+) > "
":othername_1 AND mytable.name = :name_1",
dialect=oracle.dialect(use_ansi=False),
)
stmt = select([table1]).select_from(
table1.outerjoin(
table2,
and_(
table1.c.myid == table2.c.otherid,
table2.c.othername == None,
table1.c.name == None,
),
)
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, myothertable WHERE mytable.myid = "
"myothertable.otherid(+) AND myothertable.othername(+) IS NULL "
"AND mytable.name IS NULL",
dialect=oracle.dialect(use_ansi=False),
)
def test_nonansi_nested_right_join(self):
a = table("a", column("a"))
b = table("b", column("b"))
c = table("c", column("c"))
j = a.join(b.join(c, b.c.b == c.c.c), a.c.a == b.c.b)
self.assert_compile(
select([j]),
"SELECT a.a, b.b, c.c FROM a, b, c "
"WHERE a.a = b.b AND b.b = c.c",
dialect=oracle.OracleDialect(use_ansi=False),
)
j = a.outerjoin(b.join(c, b.c.b == c.c.c), a.c.a == b.c.b)
self.assert_compile(
select([j]),
"SELECT a.a, b.b, c.c FROM a, b, c "
"WHERE a.a = b.b(+) AND b.b = c.c",
dialect=oracle.OracleDialect(use_ansi=False),
)
j = a.join(b.outerjoin(c, b.c.b == c.c.c), a.c.a == b.c.b)
self.assert_compile(
select([j]),
"SELECT a.a, b.b, c.c FROM a, b, c "
"WHERE a.a = b.b AND b.b = c.c(+)",
dialect=oracle.OracleDialect(use_ansi=False),
)
def test_alias_outer_join(self):
address_types = table("address_types", column("id"), column("name"))
addresses = table(
"addresses",
column("id"),
column("user_id"),
column("address_type_id"),
column("email_address"),
)
at_alias = address_types.alias()
s = (
select([at_alias, addresses])
.select_from(
addresses.outerjoin(
at_alias, addresses.c.address_type_id == at_alias.c.id
)
)
.where(addresses.c.user_id == 7)
.order_by(addresses.c.id, address_types.c.id)
)
self.assert_compile(
s,
"SELECT address_types_1.id, "
"address_types_1.name, addresses.id, "
"addresses.user_id, addresses.address_type_"
"id, addresses.email_address FROM "
"addresses LEFT OUTER JOIN address_types "
"address_types_1 ON addresses.address_type_"
"id = address_types_1.id WHERE "
"addresses.user_id = :user_id_1 ORDER BY "
"addresses.id, address_types.id",
)
def test_returning_insert(self):
t1 = table("t1", column("c1"), column("c2"), column("c3"))
self.assert_compile(
t1.insert().values(c1=1).returning(t1.c.c2, t1.c.c3),
"INSERT INTO t1 (c1) VALUES (:c1) RETURNING "
"t1.c2, t1.c3 INTO :ret_0, :ret_1",
)
def test_returning_insert_functional(self):
t1 = table(
"t1", column("c1"), column("c2", String()), column("c3", String())
)
fn = func.lower(t1.c.c2, type_=String())
stmt = t1.insert().values(c1=1).returning(fn, t1.c.c3)
compiled = stmt.compile(dialect=oracle.dialect())
eq_(
compiled._create_result_map(),
{
"c3": ("c3", (t1.c.c3, "c3", "c3"), t1.c.c3.type),
"lower": ("lower", (fn, "lower", None), fn.type),
},
)
self.assert_compile(
stmt,
"INSERT INTO t1 (c1) VALUES (:c1) RETURNING "
"lower(t1.c2), t1.c3 INTO :ret_0, :ret_1",
)
def test_returning_insert_labeled(self):
t1 = table("t1", column("c1"), column("c2"), column("c3"))
self.assert_compile(
t1.insert()
.values(c1=1)
.returning(t1.c.c2.label("c2_l"), t1.c.c3.label("c3_l")),
"INSERT INTO t1 (c1) VALUES (:c1) RETURNING "
"t1.c2, t1.c3 INTO :ret_0, :ret_1",
)
def test_returning_insert_computed(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Column("bar", Integer, Computed("foo + 42")),
)
self.assert_compile(
t1.insert().values(id=1, foo=5).returning(t1.c.bar),
"INSERT INTO t1 (id, foo) VALUES (:id, :foo) "
"RETURNING t1.bar INTO :ret_0",
)
def test_returning_update_computed_warning(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Column("bar", Integer, Computed("foo + 42")),
)
with testing.expect_warnings(
"Computed columns don't work with Oracle UPDATE"
):
self.assert_compile(
t1.update().values(id=1, foo=5).returning(t1.c.bar),
"UPDATE t1 SET id=:id, foo=:foo RETURNING t1.bar INTO :ret_0",
)
def test_compound(self):
t1 = table("t1", column("c1"), column("c2"), column("c3"))
t2 = table("t2", column("c1"), column("c2"), column("c3"))
self.assert_compile(
union(t1.select(), t2.select()),
"SELECT t1.c1, t1.c2, t1.c3 FROM t1 UNION "
"SELECT t2.c1, t2.c2, t2.c3 FROM t2",
)
self.assert_compile(
except_(t1.select(), t2.select()),
"SELECT t1.c1, t1.c2, t1.c3 FROM t1 MINUS "
"SELECT t2.c1, t2.c2, t2.c3 FROM t2",
)
def test_no_paren_fns(self):
for fn, expected in [
(func.uid(), "uid"),
(func.UID(), "UID"),
(func.sysdate(), "sysdate"),
(func.row_number(), "row_number()"),
(func.rank(), "rank()"),
(func.now(), "CURRENT_TIMESTAMP"),
(func.current_timestamp(), "CURRENT_TIMESTAMP"),
(func.user(), "USER"),
]:
self.assert_compile(fn, expected)
def test_create_index_alt_schema(self):
m = MetaData()
t1 = Table("foo", m, Column("x", Integer), schema="alt_schema")
self.assert_compile(
schema.CreateIndex(Index("bar", t1.c.x)),
"CREATE INDEX alt_schema.bar ON alt_schema.foo (x)",
)
def test_create_index_expr(self):
m = MetaData()
t1 = Table("foo", m, Column("x", Integer))
self.assert_compile(
schema.CreateIndex(Index("bar", t1.c.x > 5)),
"CREATE INDEX bar ON foo (x > 5)",
)
def test_table_options(self):
m = MetaData()
t = Table(
"foo",
m,
Column("x", Integer),
prefixes=["GLOBAL TEMPORARY"],
oracle_on_commit="PRESERVE ROWS",
)
self.assert_compile(
schema.CreateTable(t),
"CREATE GLOBAL TEMPORARY TABLE "
"foo (x INTEGER) ON COMMIT PRESERVE ROWS",
)
def test_create_table_compress(self):
m = MetaData()
tbl1 = Table(
"testtbl1", m, Column("data", Integer), oracle_compress=True
)
tbl2 = Table(
"testtbl2", m, Column("data", Integer), oracle_compress="OLTP"
)
self.assert_compile(
schema.CreateTable(tbl1),
"CREATE TABLE testtbl1 (data INTEGER) COMPRESS",
)
self.assert_compile(
schema.CreateTable(tbl2),
"CREATE TABLE testtbl2 (data INTEGER) " "COMPRESS FOR OLTP",
)
def test_create_index_bitmap_compress(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", Integer))
idx1 = Index("idx1", tbl.c.data, oracle_compress=True)
idx2 = Index("idx2", tbl.c.data, oracle_compress=1)
idx3 = Index("idx3", tbl.c.data, oracle_bitmap=True)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX idx1 ON testtbl (data) COMPRESS",
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX idx2 ON testtbl (data) COMPRESS 1",
)
self.assert_compile(
schema.CreateIndex(idx3),
"CREATE BITMAP INDEX idx3 ON testtbl (data)",
)
@testing.combinations(
("no_persisted", "", "ignore"),
("persisted_none", "", None),
("persisted_false", " VIRTUAL", False),
id_="iaa",
)
def test_column_computed(self, text, persisted):
m = MetaData()
kwargs = {"persisted": persisted} if persisted != "ignore" else {}
t = Table(
"t",
m,
Column("x", Integer),
Column("y", Integer, Computed("x + 2", **kwargs)),
)
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE t (x INTEGER, y INTEGER GENERATED "
"ALWAYS AS (x + 2)%s)" % text,
)
def test_column_computed_persisted_true(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", Integer),
Column("y", Integer, Computed("x + 2", persisted=True)),
)
assert_raises_message(
exc.CompileError,
r".*Oracle computed columns do not support 'stored' ",
schema.CreateTable(t).compile,
dialect=oracle.dialect(),
)
class SequenceTest(fixtures.TestBase, AssertsCompiledSQL):
def test_basic(self):
seq = Sequence("my_seq_no_schema")
dialect = oracle.OracleDialect()
assert (
dialect.identifier_preparer.format_sequence(seq)
== "my_seq_no_schema"
)
seq = Sequence("my_seq", schema="some_schema")
assert (
dialect.identifier_preparer.format_sequence(seq)
== "some_schema.my_seq"
)
seq = Sequence("My_Seq", schema="Some_Schema")
assert (
dialect.identifier_preparer.format_sequence(seq)
== '"Some_Schema"."My_Seq"'
)
| 37.206089 | 79 | 0.560081 |
from sqlalchemy import and_
from sqlalchemy import bindparam
from sqlalchemy import Computed
from sqlalchemy import exc
from sqlalchemy import except_
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import literal
from sqlalchemy import literal_column
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy import outerjoin
from sqlalchemy import schema
from sqlalchemy import select
from sqlalchemy import Sequence
from sqlalchemy import sql
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import type_coerce
from sqlalchemy import TypeDecorator
from sqlalchemy import union
from sqlalchemy.dialects.oracle import base as oracle
from sqlalchemy.dialects.oracle import cx_oracle
from sqlalchemy.engine import default
from sqlalchemy.sql import column
from sqlalchemy.sql import quoted_name
from sqlalchemy.sql import table
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "oracle"
def test_true_false(self):
self.assert_compile(sql.false(), "0")
self.assert_compile(sql.true(), "1")
def test_owner(self):
meta = MetaData()
parent = Table(
"parent",
meta,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
schema="ed",
)
child = Table(
"child",
meta,
Column("id", Integer, primary_key=True),
Column("parent_id", Integer, ForeignKey("ed.parent.id")),
schema="ed",
)
self.assert_compile(
parent.join(child),
"ed.parent JOIN ed.child ON ed.parent.id = " "ed.child.parent_id",
)
def test_subquery(self):
t = table("sometable", column("col1"), column("col2"))
s = select([t]).subquery()
s = select([s.c.col1, s.c.col2])
self.assert_compile(
s,
"SELECT anon_1.col1, anon_1.col2 FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 "
"AS col2 FROM sometable) anon_1",
)
def test_bindparam_quote(self):
self.assert_compile(bindparam("option"), ':"option"')
self.assert_compile(bindparam("plain"), ":plain")
t = Table("s", MetaData(), Column("plain", Integer, quote=True))
self.assert_compile(
t.insert().values(plain=5),
'INSERT INTO s ("plain") VALUES (:"plain")',
)
self.assert_compile(
t.update().values(plain=5), 'UPDATE s SET "plain"=:"plain"'
)
def test_bindparam_quote_works_on_expanding(self):
self.assert_compile(
bindparam("uid", expanding=True),
"([POSTCOMPILE_uid])",
dialect=cx_oracle.dialect(),
)
def test_cte(self):
part = table(
"part", column("part"), column("sub_part"), column("quantity")
)
included_parts = (
select([part.c.sub_part, part.c.part, part.c.quantity])
.where(part.c.part == "p1")
.cte(name="included_parts", recursive=True)
.suffix_with(
"search depth first by part set ord1",
"cycle part set y_cycle to 1 default 0",
dialect="oracle",
)
)
incl_alias = included_parts.alias("pr1")
parts_alias = part.alias("p")
included_parts = included_parts.union_all(
select(
[
parts_alias.c.sub_part,
parts_alias.c.part,
parts_alias.c.quantity,
]
).where(parts_alias.c.part == incl_alias.c.sub_part)
)
q = select(
[
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).label("total_quantity"),
]
).group_by(included_parts.c.sub_part)
self.assert_compile(
q,
"WITH included_parts(sub_part, part, quantity) AS "
"(SELECT part.sub_part AS sub_part, part.part AS part, "
"part.quantity AS quantity FROM part WHERE part.part = :part_1 "
"UNION ALL SELECT p.sub_part AS sub_part, p.part AS part, "
"p.quantity AS quantity FROM part p, included_parts pr1 "
"WHERE p.part = pr1.sub_part) "
"search depth first by part set ord1 cycle part set "
"y_cycle to 1 default 0 "
"SELECT included_parts.sub_part, sum(included_parts.quantity) "
"AS total_quantity FROM included_parts "
"GROUP BY included_parts.sub_part",
)
def test_limit_one(self):
t = table("sometable", column("col1"), column("col2"))
s = select([t])
c = s.compile(dialect=oracle.OracleDialect())
assert t.c.col1 in set(c._create_result_map()["col1"][1])
s = select([t]).limit(10).offset(20)
self.assert_compile(
s,
"SELECT anon_1.col1, anon_1.col2 FROM "
"(SELECT anon_2.col1 AS col1, "
"anon_2.col2 AS col2, ROWNUM AS ora_rn FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 AS "
"col2 FROM sometable) anon_2 WHERE ROWNUM <= "
"[POSTCOMPILE_param_1]) anon_1 WHERE ora_rn > "
"[POSTCOMPILE_param_2]",
checkparams={"param_1": 30, "param_2": 20},
)
c = s.compile(dialect=oracle.OracleDialect())
eq_(len(c._result_columns), 2)
assert t.c.col1 in set(c._create_result_map()["col1"][1])
def test_limit_one_firstrows(self):
t = table("sometable", column("col1"), column("col2"))
s = select([t])
s = select([t]).limit(10).offset(20)
self.assert_compile(
s,
"SELECT anon_1.col1, anon_1.col2 FROM "
"(SELECT /*+ FIRST_ROWS([POSTCOMPILE_ora_frow_1]) */ "
"anon_2.col1 AS col1, "
"anon_2.col2 AS col2, ROWNUM AS ora_rn FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 AS "
"col2 FROM sometable) anon_2 WHERE ROWNUM <= "
"[POSTCOMPILE_param_1]) anon_1 WHERE ora_rn > "
"[POSTCOMPILE_param_2]",
checkparams={"ora_frow_1": 10, "param_1": 30, "param_2": 20},
dialect=oracle.OracleDialect(optimize_limits=True),
)
def test_limit_two(self):
t = table("sometable", column("col1"), column("col2"))
s = select([t]).limit(10).offset(20).subquery()
s2 = select([s.c.col1, s.c.col2])
self.assert_compile(
s2,
"SELECT anon_1.col1, anon_1.col2 FROM "
"(SELECT anon_2.col1 AS col1, "
"anon_2.col2 AS col2 "
"FROM (SELECT anon_3.col1 AS col1, anon_3.col2 AS col2, "
"ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, "
"sometable.col2 AS col2 FROM sometable) anon_3 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]) anon_2 "
"WHERE ora_rn > [POSTCOMPILE_param_2]) anon_1",
checkparams={"param_1": 30, "param_2": 20},
)
self.assert_compile(
s2,
"SELECT anon_1.col1, anon_1.col2 FROM "
"(SELECT anon_2.col1 AS col1, "
"anon_2.col2 AS col2 "
"FROM (SELECT anon_3.col1 AS col1, anon_3.col2 AS col2, "
"ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, "
"sometable.col2 AS col2 FROM sometable) anon_3 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]) anon_2 "
"WHERE ora_rn > [POSTCOMPILE_param_2]) anon_1",
)
c = s2.compile(dialect=oracle.OracleDialect())
eq_(len(c._result_columns), 2)
assert s.c.col1 in set(c._create_result_map()["col1"][1])
def test_limit_three(self):
t = table("sometable", column("col1"), column("col2"))
s = select([t]).limit(10).offset(20).order_by(t.c.col2)
self.assert_compile(
s,
"SELECT anon_1.col1, anon_1.col2 FROM "
"(SELECT anon_2.col1 AS col1, "
"anon_2.col2 AS col2, ROWNUM AS ora_rn FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 AS "
"col2 FROM sometable ORDER BY "
"sometable.col2) anon_2 WHERE ROWNUM <= "
"[POSTCOMPILE_param_1]) anon_1 "
"WHERE ora_rn > [POSTCOMPILE_param_2]",
checkparams={"param_1": 30, "param_2": 20},
)
c = s.compile(dialect=oracle.OracleDialect())
eq_(len(c._result_columns), 2)
assert t.c.col1 in set(c._create_result_map()["col1"][1])
def test_limit_four(self):
t = table("sometable", column("col1"), column("col2"))
s = select([t]).with_for_update().limit(10).order_by(t.c.col2)
self.assert_compile(
s,
"SELECT anon_1.col1, anon_1.col2 FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 AS "
"col2 FROM sometable ORDER BY "
"sometable.col2) anon_1 WHERE ROWNUM <= [POSTCOMPILE_param_1] "
"FOR UPDATE",
checkparams={"param_1": 10},
)
def test_limit_four_firstrows(self):
t = table("sometable", column("col1"), column("col2"))
s = select([t]).with_for_update().limit(10).order_by(t.c.col2)
self.assert_compile(
s,
"SELECT /*+ FIRST_ROWS([POSTCOMPILE_ora_frow_1]) */ "
"anon_1.col1, anon_1.col2 FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 AS "
"col2 FROM sometable ORDER BY "
"sometable.col2) anon_1 WHERE ROWNUM <= [POSTCOMPILE_param_1] "
"FOR UPDATE",
checkparams={"param_1": 10, "ora_frow_1": 10},
dialect=oracle.OracleDialect(optimize_limits=True),
)
def test_limit_five(self):
t = table("sometable", column("col1"), column("col2"))
s = (
select([t])
.with_for_update()
.limit(10)
.offset(20)
.order_by(t.c.col2)
)
self.assert_compile(
s,
"SELECT anon_1.col1, anon_1.col2 FROM "
"(SELECT anon_2.col1 AS col1, "
"anon_2.col2 AS col2, ROWNUM AS ora_rn FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 AS "
"col2 FROM sometable ORDER BY "
"sometable.col2) anon_2 WHERE ROWNUM <= "
"[POSTCOMPILE_param_1]) anon_1 "
"WHERE ora_rn > [POSTCOMPILE_param_2] FOR "
"UPDATE",
checkparams={"param_1": 30, "param_2": 20},
)
def test_limit_six(self):
t = table("sometable", column("col1"), column("col2"))
s = (
select([t])
.limit(10)
.offset(literal(10) + literal(20))
.order_by(t.c.col2)
)
self.assert_compile(
s,
"SELECT anon_1.col1, anon_1.col2 FROM (SELECT anon_2.col1 AS "
"col1, anon_2.col2 AS col2, ROWNUM AS ora_rn FROM "
"(SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable ORDER BY sometable.col2) anon_2 WHERE "
"ROWNUM <= :param_1 + :param_2 + :param_3) anon_1 "
"WHERE ora_rn > :param_2 + :param_3",
checkparams={"param_1": 10, "param_2": 10, "param_3": 20},
)
def test_limit_special_quoting(self):
col = literal_column("SUM(ABC)").label("SUM(ABC)")
tbl = table("my_table")
query = select([col]).select_from(tbl).order_by(col).limit(100)
self.assert_compile(
query,
'SELECT anon_1."SUM(ABC)" FROM '
'(SELECT SUM(ABC) AS "SUM(ABC)" '
"FROM my_table ORDER BY SUM(ABC)) anon_1 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]",
)
col = literal_column("SUM(ABC)").label(quoted_name("SUM(ABC)", True))
tbl = table("my_table")
query = select([col]).select_from(tbl).order_by(col).limit(100)
self.assert_compile(
query,
'SELECT anon_1."SUM(ABC)" FROM '
'(SELECT SUM(ABC) AS "SUM(ABC)" '
"FROM my_table ORDER BY SUM(ABC)) anon_1 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]",
)
col = literal_column("SUM(ABC)").label("SUM(ABC)_")
tbl = table("my_table")
query = select([col]).select_from(tbl).order_by(col).limit(100)
self.assert_compile(
query,
'SELECT anon_1."SUM(ABC)_" FROM '
'(SELECT SUM(ABC) AS "SUM(ABC)_" '
"FROM my_table ORDER BY SUM(ABC)) anon_1 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]",
)
col = literal_column("SUM(ABC)").label(quoted_name("SUM(ABC)_", True))
tbl = table("my_table")
query = select([col]).select_from(tbl).order_by(col).limit(100)
self.assert_compile(
query,
'SELECT anon_1."SUM(ABC)_" FROM '
'(SELECT SUM(ABC) AS "SUM(ABC)_" '
"FROM my_table ORDER BY SUM(ABC)) anon_1 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]",
)
def test_for_update(self):
table1 = table(
"mytable", column("myid"), column("name"), column("description")
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
of=table1.c.myid
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 "
"FOR UPDATE OF mytable.myid",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE NOWAIT",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
nowait=True, of=table1.c.myid
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 "
"FOR UPDATE OF mytable.myid NOWAIT",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
nowait=True, of=[table1.c.myid, table1.c.name]
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE OF "
"mytable.myid, mytable.name NOWAIT",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
skip_locked=True, of=[table1.c.myid, table1.c.name]
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE OF "
"mytable.myid, mytable.name SKIP LOCKED",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(key_share=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE",
)
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(
read=True, key_share=True
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE",
)
ta = table1.alias()
self.assert_compile(
ta.select(ta.c.myid == 7).with_for_update(
of=[ta.c.myid, ta.c.name]
),
"SELECT mytable_1.myid, mytable_1.name, mytable_1.description "
"FROM mytable mytable_1 "
"WHERE mytable_1.myid = :myid_1 FOR UPDATE OF "
"mytable_1.myid, mytable_1.name",
)
def test_for_update_of_w_limit_adaption_col_present(self):
table1 = table("mytable", column("myid"), column("name"))
self.assert_compile(
select([table1.c.myid, table1.c.name])
.where(table1.c.myid == 7)
.with_for_update(nowait=True, of=table1.c.name)
.limit(10),
"SELECT anon_1.myid, anon_1.name FROM "
"(SELECT mytable.myid AS myid, mytable.name AS name "
"FROM mytable WHERE mytable.myid = :myid_1) anon_1 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1] "
"FOR UPDATE OF anon_1.name NOWAIT",
checkparams={"param_1": 10, "myid_1": 7},
)
def test_for_update_of_w_limit_adaption_col_unpresent(self):
table1 = table("mytable", column("myid"), column("name"))
self.assert_compile(
select([table1.c.myid])
.where(table1.c.myid == 7)
.with_for_update(nowait=True, of=table1.c.name)
.limit(10),
"SELECT anon_1.myid FROM "
"(SELECT mytable.myid AS myid, mytable.name AS name "
"FROM mytable WHERE mytable.myid = :myid_1) anon_1 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1] "
"FOR UPDATE OF anon_1.name NOWAIT",
)
def test_for_update_of_w_limit_offset_adaption_col_present(self):
table1 = table("mytable", column("myid"), column("name"))
self.assert_compile(
select([table1.c.myid, table1.c.name])
.where(table1.c.myid == 7)
.with_for_update(nowait=True, of=table1.c.name)
.limit(10)
.offset(50),
"SELECT anon_1.myid, anon_1.name FROM "
"(SELECT anon_2.myid AS myid, anon_2.name AS name, "
"ROWNUM AS ora_rn "
"FROM (SELECT mytable.myid AS myid, mytable.name AS name "
"FROM mytable WHERE mytable.myid = :myid_1) anon_2 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]) anon_1 "
"WHERE ora_rn > [POSTCOMPILE_param_2] "
"FOR UPDATE OF anon_1.name NOWAIT",
checkparams={"param_1": 60, "param_2": 50, "myid_1": 7},
)
def test_for_update_of_w_limit_offset_adaption_col_unpresent(self):
table1 = table("mytable", column("myid"), column("name"))
self.assert_compile(
select([table1.c.myid])
.where(table1.c.myid == 7)
.with_for_update(nowait=True, of=table1.c.name)
.limit(10)
.offset(50),
"SELECT anon_1.myid FROM (SELECT anon_2.myid AS myid, "
"ROWNUM AS ora_rn, anon_2.name AS name "
"FROM (SELECT mytable.myid AS myid, mytable.name AS name "
"FROM mytable WHERE mytable.myid = :myid_1) anon_2 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]) anon_1 "
"WHERE ora_rn > [POSTCOMPILE_param_2] "
"FOR UPDATE OF anon_1.name NOWAIT",
checkparams={"param_1": 60, "param_2": 50, "myid_1": 7},
)
def test_for_update_of_w_limit_offset_adaption_partial_col_unpresent(self):
table1 = table("mytable", column("myid"), column("foo"), column("bar"))
self.assert_compile(
select([table1.c.myid, table1.c.bar])
.where(table1.c.myid == 7)
.with_for_update(nowait=True, of=[table1.c.foo, table1.c.bar])
.limit(10)
.offset(50),
"SELECT anon_1.myid, anon_1.bar FROM (SELECT anon_2.myid AS myid, "
"anon_2.bar AS bar, ROWNUM AS ora_rn, "
"anon_2.foo AS foo FROM (SELECT mytable.myid AS myid, "
"mytable.bar AS bar, "
"mytable.foo AS foo FROM mytable "
"WHERE mytable.myid = :myid_1) anon_2 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]) anon_1 "
"WHERE ora_rn > [POSTCOMPILE_param_2] "
"FOR UPDATE OF anon_1.foo, anon_1.bar NOWAIT",
checkparams={"param_1": 60, "param_2": 50, "myid_1": 7},
)
def test_limit_preserves_typing_information(self):
class MyType(TypeDecorator):
impl = Integer
stmt = select([type_coerce(column("x"), MyType).label("foo")]).limit(1)
dialect = oracle.dialect()
compiled = stmt.compile(dialect=dialect)
assert isinstance(compiled._create_result_map()["foo"][-1], MyType)
def test_use_binds_for_limits_disabled_one(self):
t = table("sometable", column("col1"), column("col2"))
with testing.expect_deprecated(
"The ``use_binds_for_limits`` Oracle dialect parameter is "
"deprecated."
):
dialect = oracle.OracleDialect(use_binds_for_limits=False)
self.assert_compile(
select([t]).limit(10),
"SELECT anon_1.col1, anon_1.col2 FROM "
"(SELECT sometable.col1 AS col1, "
"sometable.col2 AS col2 FROM sometable) anon_1 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]",
dialect=dialect,
)
def test_use_binds_for_limits_disabled_two(self):
t = table("sometable", column("col1"), column("col2"))
with testing.expect_deprecated(
"The ``use_binds_for_limits`` Oracle dialect parameter is "
"deprecated."
):
dialect = oracle.OracleDialect(use_binds_for_limits=False)
self.assert_compile(
select([t]).offset(10),
"SELECT anon_1.col1, anon_1.col2 FROM (SELECT "
"anon_2.col1 AS col1, anon_2.col2 AS col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable) anon_2) anon_1 "
"WHERE ora_rn > [POSTCOMPILE_param_1]",
dialect=dialect,
)
def test_use_binds_for_limits_disabled_three(self):
t = table("sometable", column("col1"), column("col2"))
with testing.expect_deprecated(
"The ``use_binds_for_limits`` Oracle dialect parameter is "
"deprecated."
):
dialect = oracle.OracleDialect(use_binds_for_limits=False)
self.assert_compile(
select([t]).limit(10).offset(10),
"SELECT anon_1.col1, anon_1.col2 FROM (SELECT "
"anon_2.col1 AS col1, anon_2.col2 AS col2, ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable) anon_2 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]) anon_1 "
"WHERE ora_rn > [POSTCOMPILE_param_2]",
dialect=dialect,
)
def test_use_binds_for_limits_enabled_one(self):
t = table("sometable", column("col1"), column("col2"))
with testing.expect_deprecated(
"The ``use_binds_for_limits`` Oracle dialect parameter is "
"deprecated."
):
dialect = oracle.OracleDialect(use_binds_for_limits=True)
self.assert_compile(
select([t]).limit(10),
"SELECT anon_1.col1, anon_1.col2 FROM "
"(SELECT sometable.col1 AS col1, "
"sometable.col2 AS col2 FROM sometable) anon_1 WHERE ROWNUM "
"<= [POSTCOMPILE_param_1]",
dialect=dialect,
)
def test_use_binds_for_limits_enabled_two(self):
t = table("sometable", column("col1"), column("col2"))
with testing.expect_deprecated(
"The ``use_binds_for_limits`` Oracle dialect parameter is "
"deprecated."
):
dialect = oracle.OracleDialect(use_binds_for_limits=True)
self.assert_compile(
select([t]).offset(10),
"SELECT anon_1.col1, anon_1.col2 FROM "
"(SELECT anon_2.col1 AS col1, anon_2.col2 AS col2, "
"ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable) anon_2) anon_1 "
"WHERE ora_rn > [POSTCOMPILE_param_1]",
dialect=dialect,
)
def test_use_binds_for_limits_enabled_three(self):
t = table("sometable", column("col1"), column("col2"))
with testing.expect_deprecated(
"The ``use_binds_for_limits`` Oracle dialect parameter is "
"deprecated."
):
dialect = oracle.OracleDialect(use_binds_for_limits=True)
self.assert_compile(
select([t]).limit(10).offset(10),
"SELECT anon_1.col1, anon_1.col2 FROM "
"(SELECT anon_2.col1 AS col1, anon_2.col2 AS col2, "
"ROWNUM AS ora_rn "
"FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 "
"FROM sometable) anon_2 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]) anon_1 "
"WHERE ora_rn > [POSTCOMPILE_param_2]",
dialect=dialect,
checkparams={"param_1": 20, "param_2": 10},
)
def test_long_labels_legacy_ident_length(self):
dialect = default.DefaultDialect()
dialect.max_identifier_length = 30
ora_dialect = oracle.dialect(max_identifier_length=30)
m = MetaData()
a_table = Table(
"thirty_characters_table_xxxxxx",
m,
Column("id", Integer, primary_key=True),
)
other_table = Table(
"other_thirty_characters_table_",
m,
Column("id", Integer, primary_key=True),
Column(
"thirty_characters_table_id",
Integer,
ForeignKey("thirty_characters_table_xxxxxx.id"),
primary_key=True,
),
)
anon = a_table.alias()
self.assert_compile(
select([other_table, anon])
.select_from(other_table.outerjoin(anon))
.apply_labels(),
"SELECT other_thirty_characters_table_.id "
"AS other_thirty_characters__1, "
"other_thirty_characters_table_.thirty_char"
"acters_table_id AS other_thirty_characters"
"__2, thirty_characters_table__1.id AS "
"thirty_characters_table__3 FROM "
"other_thirty_characters_table_ LEFT OUTER "
"JOIN thirty_characters_table_xxxxxx AS "
"thirty_characters_table__1 ON "
"thirty_characters_table__1.id = "
"other_thirty_characters_table_.thirty_char"
"acters_table_id",
dialect=dialect,
)
self.assert_compile(
select([other_table, anon])
.select_from(other_table.outerjoin(anon))
.apply_labels(),
"SELECT other_thirty_characters_table_.id "
"AS other_thirty_characters__1, "
"other_thirty_characters_table_.thirty_char"
"acters_table_id AS other_thirty_characters"
"__2, thirty_characters_table__1.id AS "
"thirty_characters_table__3 FROM "
"other_thirty_characters_table_ LEFT OUTER "
"JOIN thirty_characters_table_xxxxxx "
"thirty_characters_table__1 ON "
"thirty_characters_table__1.id = "
"other_thirty_characters_table_.thirty_char"
"acters_table_id",
dialect=ora_dialect,
)
def _test_outer_join_fixture(self):
table1 = table(
"mytable",
column("myid", Integer),
column("name", String),
column("description", String),
)
table2 = table(
"myothertable",
column("otherid", Integer),
column("othername", String),
)
table3 = table(
"thirdtable",
column("userid", Integer),
column("otherstuff", String),
)
return table1, table2, table3
def test_outer_join_one(self):
table1, table2, table3 = self._test_outer_join_fixture()
query = select(
[table1, table2],
or_(
table1.c.name == "fred",
table1.c.myid == 10,
table2.c.othername != "jack",
text("EXISTS (select yay from foo where boo = lar)"),
),
from_obj=[
outerjoin(table1, table2, table1.c.myid == table2.c.otherid)
],
)
self.assert_compile(
query,
"SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid,"
" myothertable.othername FROM mytable, "
"myothertable WHERE (mytable.name = "
":name_1 OR mytable.myid = :myid_1 OR "
"myothertable.othername != :othername_1 OR "
"EXISTS (select yay from foo where boo = "
"lar)) AND mytable.myid = "
"myothertable.otherid(+)",
dialect=oracle.OracleDialect(use_ansi=False),
)
def test_outer_join_two(self):
table1, table2, table3 = self._test_outer_join_fixture()
query = table1.outerjoin(
table2, table1.c.myid == table2.c.otherid
).outerjoin(table3, table3.c.userid == table2.c.otherid)
self.assert_compile(
query.select(),
"SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid,"
" myothertable.othername, "
"thirdtable.userid, thirdtable.otherstuff "
"FROM mytable LEFT OUTER JOIN myothertable "
"ON mytable.myid = myothertable.otherid "
"LEFT OUTER JOIN thirdtable ON "
"thirdtable.userid = myothertable.otherid",
)
def test_outer_join_three(self):
table1, table2, table3 = self._test_outer_join_fixture()
query = table1.outerjoin(
table2, table1.c.myid == table2.c.otherid
).outerjoin(table3, table3.c.userid == table2.c.otherid)
self.assert_compile(
query.select(),
"SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid,"
" myothertable.othername, "
"thirdtable.userid, thirdtable.otherstuff "
"FROM mytable, myothertable, thirdtable "
"WHERE thirdtable.userid(+) = "
"myothertable.otherid AND mytable.myid = "
"myothertable.otherid(+)",
dialect=oracle.dialect(use_ansi=False),
)
def test_outer_join_four(self):
table1, table2, table3 = self._test_outer_join_fixture()
query = table1.join(table2, table1.c.myid == table2.c.otherid).join(
table3, table3.c.userid == table2.c.otherid
)
self.assert_compile(
query.select(),
"SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid,"
" myothertable.othername, "
"thirdtable.userid, thirdtable.otherstuff "
"FROM mytable, myothertable, thirdtable "
"WHERE thirdtable.userid = "
"myothertable.otherid AND mytable.myid = "
"myothertable.otherid",
dialect=oracle.dialect(use_ansi=False),
)
def test_outer_join_five(self):
table1, table2, table3 = self._test_outer_join_fixture()
query = table1.join(
table2, table1.c.myid == table2.c.otherid
).outerjoin(table3, table3.c.userid == table2.c.otherid)
self.assert_compile(
query.select().order_by(table1.c.name).limit(10).offset(5),
"SELECT anon_1.myid, anon_1.name, anon_1.description, "
"anon_1.otherid, "
"anon_1.othername, anon_1.userid, anon_1.otherstuff FROM "
"(SELECT anon_2.myid AS myid, anon_2.name AS name, "
"anon_2.description AS description, anon_2.otherid AS otherid, "
"anon_2.othername AS othername, anon_2.userid AS userid, "
"anon_2.otherstuff AS otherstuff, ROWNUM AS "
"ora_rn FROM (SELECT mytable.myid AS myid, "
"mytable.name AS name, mytable.description "
"AS description, myothertable.otherid AS "
"otherid, myothertable.othername AS "
"othername, thirdtable.userid AS userid, "
"thirdtable.otherstuff AS otherstuff FROM "
"mytable, myothertable, thirdtable WHERE "
"thirdtable.userid(+) = "
"myothertable.otherid AND mytable.myid = "
"myothertable.otherid ORDER BY mytable.name) anon_2 "
"WHERE ROWNUM <= [POSTCOMPILE_param_1]) anon_1 "
"WHERE ora_rn > [POSTCOMPILE_param_2]",
checkparams={"param_1": 15, "param_2": 5},
dialect=oracle.dialect(use_ansi=False),
)
def test_outer_join_six(self):
table1, table2, table3 = self._test_outer_join_fixture()
subq = (
select([table1])
.select_from(
table1.outerjoin(table2, table1.c.myid == table2.c.otherid)
)
.alias()
)
q = select([table3]).select_from(
table3.outerjoin(subq, table3.c.userid == subq.c.myid)
)
self.assert_compile(
q,
"SELECT thirdtable.userid, "
"thirdtable.otherstuff FROM thirdtable "
"LEFT OUTER JOIN (SELECT mytable.myid AS "
"myid, mytable.name AS name, "
"mytable.description AS description FROM "
"mytable LEFT OUTER JOIN myothertable ON "
"mytable.myid = myothertable.otherid) "
"anon_1 ON thirdtable.userid = anon_1.myid",
dialect=oracle.dialect(use_ansi=True),
)
self.assert_compile(
q,
"SELECT thirdtable.userid, "
"thirdtable.otherstuff FROM thirdtable, "
"(SELECT mytable.myid AS myid, "
"mytable.name AS name, mytable.description "
"AS description FROM mytable, myothertable "
"WHERE mytable.myid = myothertable.otherid("
"+)) anon_1 WHERE thirdtable.userid = "
"anon_1.myid(+)",
dialect=oracle.dialect(use_ansi=False),
)
def test_outer_join_seven(self):
table1, table2, table3 = self._test_outer_join_fixture()
q = select([table1.c.name]).where(table1.c.name == "foo")
self.assert_compile(
q,
"SELECT mytable.name FROM mytable WHERE " "mytable.name = :name_1",
dialect=oracle.dialect(use_ansi=False),
)
def test_outer_join_eight(self):
table1, table2, table3 = self._test_outer_join_fixture()
subq = (
select([table3.c.otherstuff])
.where(table3.c.otherstuff == table1.c.name)
.label("bar")
)
q = select([table1.c.name, subq])
self.assert_compile(
q,
"SELECT mytable.name, (SELECT "
"thirdtable.otherstuff FROM thirdtable "
"WHERE thirdtable.otherstuff = "
"mytable.name) AS bar FROM mytable",
dialect=oracle.dialect(use_ansi=False),
)
def test_nonansi_plusses_everthing_in_the_condition(self):
table1 = table(
"mytable",
column("myid", Integer),
column("name", String),
column("description", String),
)
table2 = table(
"myothertable",
column("otherid", Integer),
column("othername", String),
)
stmt = select([table1]).select_from(
table1.outerjoin(
table2,
and_(
table1.c.myid == table2.c.otherid,
table2.c.othername > 5,
table1.c.name == "foo",
),
)
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, myothertable WHERE mytable.myid = "
"myothertable.otherid(+) AND myothertable.othername(+) > "
":othername_1 AND mytable.name = :name_1",
dialect=oracle.dialect(use_ansi=False),
)
stmt = select([table1]).select_from(
table1.outerjoin(
table2,
and_(
table1.c.myid == table2.c.otherid,
table2.c.othername == None,
table1.c.name == None,
),
)
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, myothertable WHERE mytable.myid = "
"myothertable.otherid(+) AND myothertable.othername(+) IS NULL "
"AND mytable.name IS NULL",
dialect=oracle.dialect(use_ansi=False),
)
def test_nonansi_nested_right_join(self):
a = table("a", column("a"))
b = table("b", column("b"))
c = table("c", column("c"))
j = a.join(b.join(c, b.c.b == c.c.c), a.c.a == b.c.b)
self.assert_compile(
select([j]),
"SELECT a.a, b.b, c.c FROM a, b, c "
"WHERE a.a = b.b AND b.b = c.c",
dialect=oracle.OracleDialect(use_ansi=False),
)
j = a.outerjoin(b.join(c, b.c.b == c.c.c), a.c.a == b.c.b)
self.assert_compile(
select([j]),
"SELECT a.a, b.b, c.c FROM a, b, c "
"WHERE a.a = b.b(+) AND b.b = c.c",
dialect=oracle.OracleDialect(use_ansi=False),
)
j = a.join(b.outerjoin(c, b.c.b == c.c.c), a.c.a == b.c.b)
self.assert_compile(
select([j]),
"SELECT a.a, b.b, c.c FROM a, b, c "
"WHERE a.a = b.b AND b.b = c.c(+)",
dialect=oracle.OracleDialect(use_ansi=False),
)
def test_alias_outer_join(self):
address_types = table("address_types", column("id"), column("name"))
addresses = table(
"addresses",
column("id"),
column("user_id"),
column("address_type_id"),
column("email_address"),
)
at_alias = address_types.alias()
s = (
select([at_alias, addresses])
.select_from(
addresses.outerjoin(
at_alias, addresses.c.address_type_id == at_alias.c.id
)
)
.where(addresses.c.user_id == 7)
.order_by(addresses.c.id, address_types.c.id)
)
self.assert_compile(
s,
"SELECT address_types_1.id, "
"address_types_1.name, addresses.id, "
"addresses.user_id, addresses.address_type_"
"id, addresses.email_address FROM "
"addresses LEFT OUTER JOIN address_types "
"address_types_1 ON addresses.address_type_"
"id = address_types_1.id WHERE "
"addresses.user_id = :user_id_1 ORDER BY "
"addresses.id, address_types.id",
)
def test_returning_insert(self):
t1 = table("t1", column("c1"), column("c2"), column("c3"))
self.assert_compile(
t1.insert().values(c1=1).returning(t1.c.c2, t1.c.c3),
"INSERT INTO t1 (c1) VALUES (:c1) RETURNING "
"t1.c2, t1.c3 INTO :ret_0, :ret_1",
)
def test_returning_insert_functional(self):
t1 = table(
"t1", column("c1"), column("c2", String()), column("c3", String())
)
fn = func.lower(t1.c.c2, type_=String())
stmt = t1.insert().values(c1=1).returning(fn, t1.c.c3)
compiled = stmt.compile(dialect=oracle.dialect())
eq_(
compiled._create_result_map(),
{
"c3": ("c3", (t1.c.c3, "c3", "c3"), t1.c.c3.type),
"lower": ("lower", (fn, "lower", None), fn.type),
},
)
self.assert_compile(
stmt,
"INSERT INTO t1 (c1) VALUES (:c1) RETURNING "
"lower(t1.c2), t1.c3 INTO :ret_0, :ret_1",
)
def test_returning_insert_labeled(self):
t1 = table("t1", column("c1"), column("c2"), column("c3"))
self.assert_compile(
t1.insert()
.values(c1=1)
.returning(t1.c.c2.label("c2_l"), t1.c.c3.label("c3_l")),
"INSERT INTO t1 (c1) VALUES (:c1) RETURNING "
"t1.c2, t1.c3 INTO :ret_0, :ret_1",
)
def test_returning_insert_computed(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Column("bar", Integer, Computed("foo + 42")),
)
self.assert_compile(
t1.insert().values(id=1, foo=5).returning(t1.c.bar),
"INSERT INTO t1 (id, foo) VALUES (:id, :foo) "
"RETURNING t1.bar INTO :ret_0",
)
def test_returning_update_computed_warning(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Column("bar", Integer, Computed("foo + 42")),
)
with testing.expect_warnings(
"Computed columns don't work with Oracle UPDATE"
):
self.assert_compile(
t1.update().values(id=1, foo=5).returning(t1.c.bar),
"UPDATE t1 SET id=:id, foo=:foo RETURNING t1.bar INTO :ret_0",
)
def test_compound(self):
t1 = table("t1", column("c1"), column("c2"), column("c3"))
t2 = table("t2", column("c1"), column("c2"), column("c3"))
self.assert_compile(
union(t1.select(), t2.select()),
"SELECT t1.c1, t1.c2, t1.c3 FROM t1 UNION "
"SELECT t2.c1, t2.c2, t2.c3 FROM t2",
)
self.assert_compile(
except_(t1.select(), t2.select()),
"SELECT t1.c1, t1.c2, t1.c3 FROM t1 MINUS "
"SELECT t2.c1, t2.c2, t2.c3 FROM t2",
)
def test_no_paren_fns(self):
for fn, expected in [
(func.uid(), "uid"),
(func.UID(), "UID"),
(func.sysdate(), "sysdate"),
(func.row_number(), "row_number()"),
(func.rank(), "rank()"),
(func.now(), "CURRENT_TIMESTAMP"),
(func.current_timestamp(), "CURRENT_TIMESTAMP"),
(func.user(), "USER"),
]:
self.assert_compile(fn, expected)
def test_create_index_alt_schema(self):
m = MetaData()
t1 = Table("foo", m, Column("x", Integer), schema="alt_schema")
self.assert_compile(
schema.CreateIndex(Index("bar", t1.c.x)),
"CREATE INDEX alt_schema.bar ON alt_schema.foo (x)",
)
def test_create_index_expr(self):
m = MetaData()
t1 = Table("foo", m, Column("x", Integer))
self.assert_compile(
schema.CreateIndex(Index("bar", t1.c.x > 5)),
"CREATE INDEX bar ON foo (x > 5)",
)
def test_table_options(self):
m = MetaData()
t = Table(
"foo",
m,
Column("x", Integer),
prefixes=["GLOBAL TEMPORARY"],
oracle_on_commit="PRESERVE ROWS",
)
self.assert_compile(
schema.CreateTable(t),
"CREATE GLOBAL TEMPORARY TABLE "
"foo (x INTEGER) ON COMMIT PRESERVE ROWS",
)
def test_create_table_compress(self):
m = MetaData()
tbl1 = Table(
"testtbl1", m, Column("data", Integer), oracle_compress=True
)
tbl2 = Table(
"testtbl2", m, Column("data", Integer), oracle_compress="OLTP"
)
self.assert_compile(
schema.CreateTable(tbl1),
"CREATE TABLE testtbl1 (data INTEGER) COMPRESS",
)
self.assert_compile(
schema.CreateTable(tbl2),
"CREATE TABLE testtbl2 (data INTEGER) " "COMPRESS FOR OLTP",
)
def test_create_index_bitmap_compress(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", Integer))
idx1 = Index("idx1", tbl.c.data, oracle_compress=True)
idx2 = Index("idx2", tbl.c.data, oracle_compress=1)
idx3 = Index("idx3", tbl.c.data, oracle_bitmap=True)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX idx1 ON testtbl (data) COMPRESS",
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX idx2 ON testtbl (data) COMPRESS 1",
)
self.assert_compile(
schema.CreateIndex(idx3),
"CREATE BITMAP INDEX idx3 ON testtbl (data)",
)
@testing.combinations(
("no_persisted", "", "ignore"),
("persisted_none", "", None),
("persisted_false", " VIRTUAL", False),
id_="iaa",
)
def test_column_computed(self, text, persisted):
m = MetaData()
kwargs = {"persisted": persisted} if persisted != "ignore" else {}
t = Table(
"t",
m,
Column("x", Integer),
Column("y", Integer, Computed("x + 2", **kwargs)),
)
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE t (x INTEGER, y INTEGER GENERATED "
"ALWAYS AS (x + 2)%s)" % text,
)
def test_column_computed_persisted_true(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", Integer),
Column("y", Integer, Computed("x + 2", persisted=True)),
)
assert_raises_message(
exc.CompileError,
r".*Oracle computed columns do not support 'stored' ",
schema.CreateTable(t).compile,
dialect=oracle.dialect(),
)
class SequenceTest(fixtures.TestBase, AssertsCompiledSQL):
def test_basic(self):
seq = Sequence("my_seq_no_schema")
dialect = oracle.OracleDialect()
assert (
dialect.identifier_preparer.format_sequence(seq)
== "my_seq_no_schema"
)
seq = Sequence("my_seq", schema="some_schema")
assert (
dialect.identifier_preparer.format_sequence(seq)
== "some_schema.my_seq"
)
seq = Sequence("My_Seq", schema="Some_Schema")
assert (
dialect.identifier_preparer.format_sequence(seq)
== '"Some_Schema"."My_Seq"'
)
| true | true |
f73eb9d2dfcbcffa1873b6cfe413627568d3ab49 | 127 | py | Python | test/solution_tests/SUM/test_sum.py | DPNT-Sourcecode/CHK-ncqk01 | d18125ea324e31b818dbca7618bcb27bbd012a1e | [
"Apache-2.0"
] | null | null | null | test/solution_tests/SUM/test_sum.py | DPNT-Sourcecode/CHK-ncqk01 | d18125ea324e31b818dbca7618bcb27bbd012a1e | [
"Apache-2.0"
] | null | null | null | test/solution_tests/SUM/test_sum.py | DPNT-Sourcecode/CHK-ncqk01 | d18125ea324e31b818dbca7618bcb27bbd012a1e | [
"Apache-2.0"
] | null | null | null | from solutions.SUM import sum_solution
class TestSum:
def test_sum(self):
assert sum_solution.compute(1, 2) == 3
| 18.142857 | 46 | 0.700787 | from solutions.SUM import sum_solution
class TestSum:
def test_sum(self):
assert sum_solution.compute(1, 2) == 3
| true | true |
f73eba392c5c28d7e301a181792a51fd07e32cf2 | 2,349 | py | Python | notifico/models/channel.py | Pix1234/notifico | 144b8f1584d271100249f6cd05db217ca3b22e17 | [
"MIT"
] | null | null | null | notifico/models/channel.py | Pix1234/notifico | 144b8f1584d271100249f6cd05db217ca3b22e17 | [
"MIT"
] | null | null | null | notifico/models/channel.py | Pix1234/notifico | 144b8f1584d271100249f6cd05db217ca3b22e17 | [
"MIT"
] | null | null | null | # -*- coding: utf8 -*-
__all__ = ('Channel',)
import datetime
from sqlalchemy import func
from notifico import db
from notifico.models.bot import BotEvent
class Channel(db.Model):
id = db.Column(db.Integer, primary_key=True)
created = db.Column(db.TIMESTAMP(), default=datetime.datetime.utcnow)
channel = db.Column(db.String(80), nullable=False)
host = db.Column(db.String(255), nullable=False)
port = db.Column(db.Integer, default=6667)
ssl = db.Column(db.Boolean, default=False)
public = db.Column(db.Boolean, default=False)
project_id = db.Column(db.Integer, db.ForeignKey('project.id'))
project = db.relationship('Project', backref=db.backref(
'channels', order_by=id, lazy='dynamic', cascade='all, delete-orphan'
))
@classmethod
def new(cls, channel, host, port=6667, ssl=False, public=False):
c = cls()
c.channel = channel
c.host = host
c.port = port
c.ssl = ssl
c.public = public
return c
@classmethod
def channel_count_by_network(cls):
q = (
db.session.query(
Channel.host, func.count(Channel.channel).label('count')
)
.filter_by(public=True)
.group_by(Channel.host)
.order_by('-count')
)
for network, channel_count in q:
yield network, channel_count
def last_event(self):
"""
Returns the latest BotEvent to occur for this channel.
"""
return BotEvent.query.filter_by(
host=self.host,
port=self.port,
ssl=self.ssl,
channel=self.channel
).order_by(BotEvent.created.desc()).first()
@classmethod
def visible(cls, q, user=None):
"""
Modifies the sqlalchemy query `q` to only show channels accessible
to `user`. If `user` is ``None``, only shows public channels in
public projects.
"""
from notifico.models import Project
if user and user.in_group('admin'):
# We don't do any filtering for admins,
# who should have full visibility.
pass
else:
q = q.join(Channel.project).filter(
Project.public == True,
Channel.public == True
)
return q
| 29.3625 | 77 | 0.584078 |
__all__ = ('Channel',)
import datetime
from sqlalchemy import func
from notifico import db
from notifico.models.bot import BotEvent
class Channel(db.Model):
id = db.Column(db.Integer, primary_key=True)
created = db.Column(db.TIMESTAMP(), default=datetime.datetime.utcnow)
channel = db.Column(db.String(80), nullable=False)
host = db.Column(db.String(255), nullable=False)
port = db.Column(db.Integer, default=6667)
ssl = db.Column(db.Boolean, default=False)
public = db.Column(db.Boolean, default=False)
project_id = db.Column(db.Integer, db.ForeignKey('project.id'))
project = db.relationship('Project', backref=db.backref(
'channels', order_by=id, lazy='dynamic', cascade='all, delete-orphan'
))
@classmethod
def new(cls, channel, host, port=6667, ssl=False, public=False):
c = cls()
c.channel = channel
c.host = host
c.port = port
c.ssl = ssl
c.public = public
return c
@classmethod
def channel_count_by_network(cls):
q = (
db.session.query(
Channel.host, func.count(Channel.channel).label('count')
)
.filter_by(public=True)
.group_by(Channel.host)
.order_by('-count')
)
for network, channel_count in q:
yield network, channel_count
def last_event(self):
return BotEvent.query.filter_by(
host=self.host,
port=self.port,
ssl=self.ssl,
channel=self.channel
).order_by(BotEvent.created.desc()).first()
@classmethod
def visible(cls, q, user=None):
from notifico.models import Project
if user and user.in_group('admin'):
# who should have full visibility.
pass
else:
q = q.join(Channel.project).filter(
Project.public == True,
Channel.public == True
)
return q
| true | true |
f73ebad78fbe9fd650b35e3ecb718a0939131729 | 302 | py | Python | docs/conf.py | rahulunair/haxo | 6a4d333ecfa8a347d6a2db63a833c2bfab1278c8 | [
"Unlicense"
] | 1 | 2021-07-06T16:26:34.000Z | 2021-07-06T16:26:34.000Z | docs/conf.py | Meadosc/haxo | 7a9b6ce20861ad342865b496d1c2ba75970d4093 | [
"Unlicense"
] | 7 | 2020-06-15T18:57:20.000Z | 2020-06-23T22:32:06.000Z | docs/conf.py | Meadosc/haxo | 7a9b6ce20861ad342865b496d1c2ba75970d4093 | [
"Unlicense"
] | 2 | 2020-06-15T20:58:06.000Z | 2020-06-18T04:23:34.000Z | """sphinx config."""
from datetime import datetime
project = "haxo"
author = "rahul"
master_doc = 'index'
copyright = f"2020, {author}"
copyright = f"{datetime.now().year}, {author}"
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon", "sphinx_autodoc_typehints"]
html_static_path = ["_static"]
| 27.454545 | 86 | 0.715232 | from datetime import datetime
project = "haxo"
author = "rahul"
master_doc = 'index'
copyright = f"2020, {author}"
copyright = f"{datetime.now().year}, {author}"
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon", "sphinx_autodoc_typehints"]
html_static_path = ["_static"]
| true | true |
f73ebb50b84874c31308aadefde84f08553481f4 | 1,632 | py | Python | src/colour_displayer.py | Artemis21/image-analysis | 92d8edc627817bae6df2656f0f2baa4f2388a195 | [
"MIT"
] | null | null | null | src/colour_displayer.py | Artemis21/image-analysis | 92d8edc627817bae6df2656f0f2baa4f2388a195 | [
"MIT"
] | null | null | null | src/colour_displayer.py | Artemis21/image-analysis | 92d8edc627817bae6df2656f0f2baa4f2388a195 | [
"MIT"
] | null | null | null | """Tool for displaying a selection of colours."""
import math
import pathlib
from PIL import Image, ImageDraw, ImageFont
_font_path = str(pathlib.Path(__file__).parent.absolute() / 'res' / 'font.ttf')
FONT = ImageFont.truetype(_font_path, size=20)
class ColourDisplayer:
"""Tool for displaying a selection of colours."""
def __init__(self, colours: list[tuple[int, int, int]]):
"""Store the colours."""
self.colours = colours
self.im = None
self.draw = None
def display(self) -> Image.Image:
"""Draw the colours."""
columns = round((0.3 * len(self.colours)) ** 0.5)
rows = math.ceil(len(self.colours) / columns)
self.im = Image.new('RGB', (columns * 100, rows * 30))
self.draw = ImageDraw.Draw(self.im)
row = column = 0
for colour in self.colours:
self.draw_colour(colour, row, column)
column += 1
if column >= columns:
column = 0
row += 1
return self.im
def draw_colour(self, colour: tuple[int, int, int], row: int, column: int):
"""Draw a colour on the image."""
text = '#{0:0>2x}{1:0>2x}{2:0>2x}'.format(*colour).upper()
if sum(colour) / 3 > 128:
text_colour = (0, 0, 0)
else:
text_colour = (255, 255, 255)
x_start = column * 100
y_start = row * 30
self.draw.rectangle(
(x_start, y_start, x_start + 100, y_start + 30), fill=colour
)
self.draw.text(
(x_start + 8, y_start + 3), text, fill=text_colour, font=FONT
)
| 32 | 79 | 0.557598 | import math
import pathlib
from PIL import Image, ImageDraw, ImageFont
_font_path = str(pathlib.Path(__file__).parent.absolute() / 'res' / 'font.ttf')
FONT = ImageFont.truetype(_font_path, size=20)
class ColourDisplayer:
def __init__(self, colours: list[tuple[int, int, int]]):
self.colours = colours
self.im = None
self.draw = None
def display(self) -> Image.Image:
columns = round((0.3 * len(self.colours)) ** 0.5)
rows = math.ceil(len(self.colours) / columns)
self.im = Image.new('RGB', (columns * 100, rows * 30))
self.draw = ImageDraw.Draw(self.im)
row = column = 0
for colour in self.colours:
self.draw_colour(colour, row, column)
column += 1
if column >= columns:
column = 0
row += 1
return self.im
def draw_colour(self, colour: tuple[int, int, int], row: int, column: int):
text = '#{0:0>2x}{1:0>2x}{2:0>2x}'.format(*colour).upper()
if sum(colour) / 3 > 128:
text_colour = (0, 0, 0)
else:
text_colour = (255, 255, 255)
x_start = column * 100
y_start = row * 30
self.draw.rectangle(
(x_start, y_start, x_start + 100, y_start + 30), fill=colour
)
self.draw.text(
(x_start + 8, y_start + 3), text, fill=text_colour, font=FONT
)
| true | true |
f73ebd450de99712371c00863a5bab907e0638f9 | 1,451 | py | Python | profiler/mpops/ms_gpu.py | BUPT-GAMMA/GammaGL | 2b9f32e1ac3533cb75a063243e8a2fa654466d18 | [
"Apache-2.0"
] | null | null | null | profiler/mpops/ms_gpu.py | BUPT-GAMMA/GammaGL | 2b9f32e1ac3533cb75a063243e8a2fa654466d18 | [
"Apache-2.0"
] | null | null | null | profiler/mpops/ms_gpu.py | BUPT-GAMMA/GammaGL | 2b9f32e1ac3533cb75a063243e8a2fa654466d18 | [
"Apache-2.0"
] | null | null | null | # !/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Time : 2022/04/14 08:36
# @Author : clear
# @FileName: ms_gpu.py
import os
os.environ['TL_BACKEND'] = 'mindspore'
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import sys
sys.path.insert(0, os.path.abspath('../../'))
import time
import numpy as np
import tensorlayerx as tlx
from gammagl.mpops import *
edge_index = np.load('edge_index.npy')
num_nodes = int(np.max(edge_index))+1
src = edge_index[0,:]
dst = edge_index[1,:]
src = tlx.convert_to_tensor(src, tlx.int32)
dst = tlx.convert_to_tensor(dst, tlx.int32)
msg = tlx.convert_to_tensor(np.random.randn(edge_index.shape[1], 500), dtype=tlx.float32)
start_t = time.time()
for j in range(200):
# msg = tlx.gather(x, src)
# unsorted_segment_sum(msg, dst, num_nodes)
# unsorted_segment_mean(msg, dst, num_nodes)
unsorted_segment_max(msg, dst, num_nodes)
print("{:.3f}".format(time.time()-start_t))
# pf.stop()
# print(pf.output_text(unicode=True, color=True))
dst = tlx.convert_to_numpy(dst)
idx = np.argsort(dst)
dst = tlx.gather(tlx.convert_to_tensor(dst, dtype=tlx.int32), tlx.convert_to_tensor(idx,dtype=tlx.int32))
# pf.start()
start_t = time.time()
for j in range(200):
# msg = tlx.gather(x, src)
# segment_sum(msg, dst, num_nodes)
# segment_mean(msg, dst, num_nodes)
segment_max(msg, dst, num_nodes)
print("{:.3f}".format(time.time()-start_t))
# pf.stop()
# print(pf.output_text(unicode=True, color=True)) | 27.903846 | 105 | 0.698828 |
import os
os.environ['TL_BACKEND'] = 'mindspore'
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import sys
sys.path.insert(0, os.path.abspath('../../'))
import time
import numpy as np
import tensorlayerx as tlx
from gammagl.mpops import *
edge_index = np.load('edge_index.npy')
num_nodes = int(np.max(edge_index))+1
src = edge_index[0,:]
dst = edge_index[1,:]
src = tlx.convert_to_tensor(src, tlx.int32)
dst = tlx.convert_to_tensor(dst, tlx.int32)
msg = tlx.convert_to_tensor(np.random.randn(edge_index.shape[1], 500), dtype=tlx.float32)
start_t = time.time()
for j in range(200):
unsorted_segment_max(msg, dst, num_nodes)
print("{:.3f}".format(time.time()-start_t))
dst = tlx.convert_to_numpy(dst)
idx = np.argsort(dst)
dst = tlx.gather(tlx.convert_to_tensor(dst, dtype=tlx.int32), tlx.convert_to_tensor(idx,dtype=tlx.int32))
start_t = time.time()
for j in range(200):
segment_max(msg, dst, num_nodes)
print("{:.3f}".format(time.time()-start_t))
| true | true |
f73ebd7a90e6737bcdd743d1e7b0b75e660f5890 | 2,772 | py | Python | docs/conf.py | Miksus/red-base | 4c272e8cb2325b51f6293f608a773e011b1d05da | [
"MIT"
] | null | null | null | docs/conf.py | Miksus/red-base | 4c272e8cb2325b51f6293f608a773e011b1d05da | [
"MIT"
] | null | null | null | docs/conf.py | Miksus/red-base | 4c272e8cb2325b51f6293f608a773e011b1d05da | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import sphinx_book_theme
import sphinx_book_theme
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
print(f"Root dir: {sys.path[0]}")
# -- Project information -----------------------------------------------------
project = 'redbird'
copyright = '2022, Mikael Koli'
author = 'Mikael Koli'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.doctest',
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx_book_theme',
]
rst_prolog = """
.. include:: <s5defs.txt>
"""
# Extension settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
autodoc_typehints = 'none'
autodoc_member_order = 'bysource'
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
html_logo = "logo.png"
html_theme_options = {
"repository_url": "https://github.com/Miksus/red-bird",
"use_repository_button": True,
"repository_branch": "master",
"use_issues_button": True,
"use_download_button": True,
"use_fullscreen_button": True,
#"use_edit_page_button": True,
}
#html_sidebars = {}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_title = "Repository patterns for Python"
html_theme = 'sphinx_book_theme'
html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'css/types.css',
'css/colors.css',
"css/custom.css",
] | 29.489362 | 79 | 0.674242 |
import sphinx_book_theme
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
print(f"Root dir: {sys.path[0]}")
project = 'redbird'
copyright = '2022, Mikael Koli'
author = 'Mikael Koli'
extensions = [
'sphinx.ext.doctest',
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx_book_theme',
]
rst_prolog = """
.. include:: <s5defs.txt>
"""
napoleon_google_docstring = True
napoleon_numpy_docstring = True
autodoc_typehints = 'none'
autodoc_member_order = 'bysource'
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
html_logo = "logo.png"
html_theme_options = {
"repository_url": "https://github.com/Miksus/red-bird",
"use_repository_button": True,
"repository_branch": "master",
"use_issues_button": True,
"use_download_button": True,
"use_fullscreen_button": True,
}
html_title = "Repository patterns for Python"
html_theme = 'sphinx_book_theme'
html_favicon = 'favicon.ico'
html_static_path = ['_static']
html_css_files = [
'css/types.css',
'css/colors.css',
"css/custom.css",
] | true | true |
f73ebef40d6836b43a2d977f82de4f291d8af889 | 3,318 | py | Python | emeraldbgc/download_data.py | Finn-Lab/emeraldBGC | 8cd625d5d293725a7cb666c0fa733591534aab08 | [
"Apache-2.0"
] | null | null | null | emeraldbgc/download_data.py | Finn-Lab/emeraldBGC | 8cd625d5d293725a7cb666c0fa733591534aab08 | [
"Apache-2.0"
] | null | null | null | emeraldbgc/download_data.py | Finn-Lab/emeraldBGC | 8cd625d5d293725a7cb666c0fa733591534aab08 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2021 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import hashlib
import os
import sys
import tarfile
import requests
PKG_DIRECTORY = os.path.abspath( os.path.dirname(__file__) )
BIN_DIRECTORY = os.path.abspath( os.path.dirname(sys.argv[0]) )
INTERPROSCAN_URL = "http://ftp.ebi.ac.uk/pub/software/unix/iprscan/5/5.52-86.0/interproscan-5.52-86.0-64-bit.tar.gz"
INTERPROSCAN_MD5_URL = "http://ftp.ebi.ac.uk/pub/software/unix/iprscan/5/5.52-86.0/interproscan-5.52-86.0-64-bit.tar.gz.md5"
INTERPROSCAN_TAR_DEST = os.path.join(PKG_DIRECTORY, os.path.basename(INTERPROSCAN_URL))
INTERPROSCAN_DEST = os.path.join(PKG_DIRECTORY, "interproscan-5.52-86.0")
def url_file(url, dest=None, chunk_size=254):
""" DOWNLOAD FILES FROM URL """
try:
r = requests.get(url, stream=True)
if dest:
with open(dest, "wb") as h:
for chunk in r.iter_content(chunk_size=chunk_size):
h.write(chunk)
else:
return r.content.decode("utf8")
except:
raise Exception("Download error")
def download_tar(chunk_size=254):
"""" get pkg model file from git_url """
if os.path.isfile(INTERPROSCAN_TAR_DEST):
print("{} exists".format(INTERPROSCAN_TAR_DEST))
return INTERPROSCAN_TAR_DEST
elif os.path.isdir(INTERPROSCAN_DEST):
print("InterProScan already decompressed and in place")
sys.exit()
else:
url_file(INTERPROSCAN_URL, INTERPROSCAN_TAR_DEST)
print("Check MD5")
#ori_md5 = url_file(INTERPROSCAN_MD5_URL).split()[0]
ori_md5 = INTERPROSCAN_MD5
dw_md5 = checkMD5(INTERPROSCAN_TAR_DEST)
assert dw_md5 == ori_md5, "{} is corrupt (wrong md5 checksum)".format(
INTERPROSCAN_TAR_DEST
)
print("Downloaded and correct MD5sum")
def checkMD5(file):
""" assert MD5 of tar.gz"""
md5_hash = hashlib.md5()
with open(file, "rb") as h:
models_tar = h.read()
md5_hash.update(models_tar)
digest = md5_hash.hexdigest()
return digest
def decompress_file(ori, dest):
""" decompress the models file """
try:
with tarfile.open(ori) as tar:
tar.extractall(path=dest)
except:
raise Exception("Error decompressing")
def clean():
""" clean tar.gz"""
os.remove(INTERPROSCAN_TAR_DEST)
def main():
""""""
print("Downloading InterProScan")
download_tar()
print("Decompressing files")
decompress_file(INTERPROSCAN_TAR_DEST, PKG_DIRECTORY)
print("Cleaning...")
clean()
print("create symlink")
# os.symlink( os.path.join( INTERPROSCAN_DEST, 'interproscan.sh' ), BIN_DIRECTORY )
print("DONE!")
if __name__ == "__main__":
main()
| 31.301887 | 124 | 0.676914 |
import glob
import hashlib
import os
import sys
import tarfile
import requests
PKG_DIRECTORY = os.path.abspath( os.path.dirname(__file__) )
BIN_DIRECTORY = os.path.abspath( os.path.dirname(sys.argv[0]) )
INTERPROSCAN_URL = "http://ftp.ebi.ac.uk/pub/software/unix/iprscan/5/5.52-86.0/interproscan-5.52-86.0-64-bit.tar.gz"
INTERPROSCAN_MD5_URL = "http://ftp.ebi.ac.uk/pub/software/unix/iprscan/5/5.52-86.0/interproscan-5.52-86.0-64-bit.tar.gz.md5"
INTERPROSCAN_TAR_DEST = os.path.join(PKG_DIRECTORY, os.path.basename(INTERPROSCAN_URL))
INTERPROSCAN_DEST = os.path.join(PKG_DIRECTORY, "interproscan-5.52-86.0")
def url_file(url, dest=None, chunk_size=254):
try:
r = requests.get(url, stream=True)
if dest:
with open(dest, "wb") as h:
for chunk in r.iter_content(chunk_size=chunk_size):
h.write(chunk)
else:
return r.content.decode("utf8")
except:
raise Exception("Download error")
def download_tar(chunk_size=254):
if os.path.isfile(INTERPROSCAN_TAR_DEST):
print("{} exists".format(INTERPROSCAN_TAR_DEST))
return INTERPROSCAN_TAR_DEST
elif os.path.isdir(INTERPROSCAN_DEST):
print("InterProScan already decompressed and in place")
sys.exit()
else:
url_file(INTERPROSCAN_URL, INTERPROSCAN_TAR_DEST)
print("Check MD5")
ori_md5 = INTERPROSCAN_MD5
dw_md5 = checkMD5(INTERPROSCAN_TAR_DEST)
assert dw_md5 == ori_md5, "{} is corrupt (wrong md5 checksum)".format(
INTERPROSCAN_TAR_DEST
)
print("Downloaded and correct MD5sum")
def checkMD5(file):
md5_hash = hashlib.md5()
with open(file, "rb") as h:
models_tar = h.read()
md5_hash.update(models_tar)
digest = md5_hash.hexdigest()
return digest
def decompress_file(ori, dest):
try:
with tarfile.open(ori) as tar:
tar.extractall(path=dest)
except:
raise Exception("Error decompressing")
def clean():
os.remove(INTERPROSCAN_TAR_DEST)
def main():
print("Downloading InterProScan")
download_tar()
print("Decompressing files")
decompress_file(INTERPROSCAN_TAR_DEST, PKG_DIRECTORY)
print("Cleaning...")
clean()
print("create symlink")
print("DONE!")
if __name__ == "__main__":
main()
| true | true |
f73ebf3b488d1a77666e6d9a141e2593cff335fe | 754 | py | Python | queue/crud.py | maransowthri/data-structures-algorithms | 7558d85573df7f5eeea2b4fd8d6570eada539cdf | [
"Apache-2.0"
] | null | null | null | queue/crud.py | maransowthri/data-structures-algorithms | 7558d85573df7f5eeea2b4fd8d6570eada539cdf | [
"Apache-2.0"
] | null | null | null | queue/crud.py | maransowthri/data-structures-algorithms | 7558d85573df7f5eeea2b4fd8d6570eada539cdf | [
"Apache-2.0"
] | null | null | null | class Queue:
def __init__(self):
self.data = []
def __str__(self):
values = map(str, self.data)
return ' <- '.join(values)
def enque(self, val):
self.data.append(val)
def deque(self):
return self.data.pop(0)
def peek(self):
return self.data[0]
def is_empty(self):
return self.data == []
def clear(self):
self.data = None
queue = Queue()
queue.enque(0)
queue.enque(1)
queue.enque(2)
queue.enque(3)
print('queue: ')
print(queue)
print('dequeing', queue.deque())
print('queue: ')
print(queue)
print('Peeked data', queue.peek())
print('Clearing out')
queue.clear()
print('queue is empty' if queue.is_empty() else 'queue is not empty') | 19.842105 | 69 | 0.586207 | class Queue:
def __init__(self):
self.data = []
def __str__(self):
values = map(str, self.data)
return ' <- '.join(values)
def enque(self, val):
self.data.append(val)
def deque(self):
return self.data.pop(0)
def peek(self):
return self.data[0]
def is_empty(self):
return self.data == []
def clear(self):
self.data = None
queue = Queue()
queue.enque(0)
queue.enque(1)
queue.enque(2)
queue.enque(3)
print('queue: ')
print(queue)
print('dequeing', queue.deque())
print('queue: ')
print(queue)
print('Peeked data', queue.peek())
print('Clearing out')
queue.clear()
print('queue is empty' if queue.is_empty() else 'queue is not empty') | true | true |
f73ebf7f3af9a9e38c42247c25c19c6954997d26 | 3,377 | py | Python | sampling/resgld.py | karimul/ebm-sampling | c8c8565a21fde52ac71598f20625857c4ccb8b67 | [
"MIT"
] | null | null | null | sampling/resgld.py | karimul/ebm-sampling | c8c8565a21fde52ac71598f20625857c4ccb8b67 | [
"MIT"
] | null | null | null | sampling/resgld.py | karimul/ebm-sampling | c8c8565a21fde52ac71598f20625857c4ccb8b67 | [
"MIT"
] | null | null | null | import torch
import numpy as np
from autograd.numpy import sqrt
def gen_image_resgld(label, FLAGS, model, im_neg, num_steps, sample=False):
im_noise = torch.randn_like(im_neg).detach()
T_multiply=0.9
T = 0.9
var=0.1
resgld_beta_high = im_neg
resgld_beta_low = im_neg
swaps = 0
noise_scale = sqrt(2e-6 * FLAGS.step_lr * T)
print("noise_scale : ", noise_scale)
print("noise_scale * T_multiply: ", noise_scale* T_multiply)
im_negs_samples = []
for i in range(num_steps):
im_noise.normal_()
resgld_beta_low = resgld_beta_low + noise_scale * im_noise
resgld_beta_high = resgld_beta_high + noise_scale * T_multiply * im_noise
resgld_beta_high.requires_grad_(requires_grad=True)
energy_high = model.forward(resgld_beta_high, label)
resgld_beta_low.requires_grad_(requires_grad=True)
energy_low = model.forward(resgld_beta_low, label)
im_grad_low = torch.autograd.grad([energy_low.sum()], [resgld_beta_low])[0]
im_grad_high = torch.autograd.grad([energy_high.sum()], [resgld_beta_high])[0]
if i == num_steps - 1:
im_neg_orig = resgld_beta_low
resgld_beta_low = resgld_beta_low - FLAGS.step_lr * im_grad_low
resgld_beta_high = resgld_beta_high - FLAGS.step_lr * im_grad_high
if FLAGS.dataset in ("cifar10", "celeba", "cats"):
n = 128
elif FLAGS.dataset == "celebahq":
# Save space
n = 128
elif FLAGS.dataset == "lsun":
# Save space
n = 32
elif FLAGS.dataset == "object":
# Save space
n = 32
elif FLAGS.dataset == "mnist":
n = 128
elif FLAGS.dataset == "imagenet":
n = 32
elif FLAGS.dataset == "stl":
n = 32
im_neg_kl = im_neg_orig[:n]
if sample:
pass
else:
energy = model.forward(im_neg_kl, label)
im_grad = torch.autograd.grad([energy.sum()], [im_neg_kl], create_graph=True)[0]
im_neg_kl = im_neg_kl - FLAGS.step_lr * im_grad[:n]
im_neg_kl = torch.clamp(im_neg_kl, 0, 1)
else:
resgld_beta_low = resgld_beta_low - FLAGS.step_lr * im_grad_low
resgld_beta_high = resgld_beta_high - FLAGS.step_lr * im_grad_high * T_multiply
dT = 1 / T - 1 / (T * T_multiply)
swap_rate = torch.exp(dT * (energy_low - energy_high - dT * var))
intensity_r = 0.1
# print("swap_rate", swap_rate)
swap_rate = swap_rate.mean().item()
print("swap_rate", swap_rate)
random = np.random.uniform(0, 1)
print("random", random)
if random < intensity_r * swap_rate:
resgld_beta_high, resgld_beta_low = resgld_beta_low, resgld_beta_high
swaps += 1
print("swaps : ", swaps)
im_neg = resgld_beta_low.detach()
if sample:
im_negs_samples.append(im_neg)
im_neg = torch.clamp(im_neg, 0, 1)
if sample:
return im_neg, im_neg_kl, im_negs_samples, np.abs(im_grad_low.detach().cpu().numpy()).mean()
else:
return im_neg, im_neg_kl, np.abs(im_grad_low.detach().cpu().numpy()).mean() | 34.814433 | 100 | 0.586023 | import torch
import numpy as np
from autograd.numpy import sqrt
def gen_image_resgld(label, FLAGS, model, im_neg, num_steps, sample=False):
im_noise = torch.randn_like(im_neg).detach()
T_multiply=0.9
T = 0.9
var=0.1
resgld_beta_high = im_neg
resgld_beta_low = im_neg
swaps = 0
noise_scale = sqrt(2e-6 * FLAGS.step_lr * T)
print("noise_scale : ", noise_scale)
print("noise_scale * T_multiply: ", noise_scale* T_multiply)
im_negs_samples = []
for i in range(num_steps):
im_noise.normal_()
resgld_beta_low = resgld_beta_low + noise_scale * im_noise
resgld_beta_high = resgld_beta_high + noise_scale * T_multiply * im_noise
resgld_beta_high.requires_grad_(requires_grad=True)
energy_high = model.forward(resgld_beta_high, label)
resgld_beta_low.requires_grad_(requires_grad=True)
energy_low = model.forward(resgld_beta_low, label)
im_grad_low = torch.autograd.grad([energy_low.sum()], [resgld_beta_low])[0]
im_grad_high = torch.autograd.grad([energy_high.sum()], [resgld_beta_high])[0]
if i == num_steps - 1:
im_neg_orig = resgld_beta_low
resgld_beta_low = resgld_beta_low - FLAGS.step_lr * im_grad_low
resgld_beta_high = resgld_beta_high - FLAGS.step_lr * im_grad_high
if FLAGS.dataset in ("cifar10", "celeba", "cats"):
n = 128
elif FLAGS.dataset == "celebahq":
n = 128
elif FLAGS.dataset == "lsun":
n = 32
elif FLAGS.dataset == "object":
n = 32
elif FLAGS.dataset == "mnist":
n = 128
elif FLAGS.dataset == "imagenet":
n = 32
elif FLAGS.dataset == "stl":
n = 32
im_neg_kl = im_neg_orig[:n]
if sample:
pass
else:
energy = model.forward(im_neg_kl, label)
im_grad = torch.autograd.grad([energy.sum()], [im_neg_kl], create_graph=True)[0]
im_neg_kl = im_neg_kl - FLAGS.step_lr * im_grad[:n]
im_neg_kl = torch.clamp(im_neg_kl, 0, 1)
else:
resgld_beta_low = resgld_beta_low - FLAGS.step_lr * im_grad_low
resgld_beta_high = resgld_beta_high - FLAGS.step_lr * im_grad_high * T_multiply
dT = 1 / T - 1 / (T * T_multiply)
swap_rate = torch.exp(dT * (energy_low - energy_high - dT * var))
intensity_r = 0.1
swap_rate = swap_rate.mean().item()
print("swap_rate", swap_rate)
random = np.random.uniform(0, 1)
print("random", random)
if random < intensity_r * swap_rate:
resgld_beta_high, resgld_beta_low = resgld_beta_low, resgld_beta_high
swaps += 1
print("swaps : ", swaps)
im_neg = resgld_beta_low.detach()
if sample:
im_negs_samples.append(im_neg)
im_neg = torch.clamp(im_neg, 0, 1)
if sample:
return im_neg, im_neg_kl, im_negs_samples, np.abs(im_grad_low.detach().cpu().numpy()).mean()
else:
return im_neg, im_neg_kl, np.abs(im_grad_low.detach().cpu().numpy()).mean() | true | true |
f73ec07ec988999962c11354160dfde04715d682 | 547 | py | Python | tools/parsebuildpack.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 2,027 | 2019-11-12T18:05:48.000Z | 2022-03-31T22:25:04.000Z | tools/parsebuildpack.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 496 | 2019-11-12T18:13:37.000Z | 2022-03-31T10:43:45.000Z | tools/parsebuildpack.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 249 | 2019-11-12T18:02:27.000Z | 2022-03-22T12:19:19.000Z | from app import userfiles as user_files
import workers.dockerfilebuild
import requests
w = workers.dockerfilebuild.DockerfileBuildWorker(100, None)
resource_key = "5c0a985c-405d-4161-b0ac-603c3757b5f9"
resource_url = user_files.get_file_url(resource_key, "127.0.0.1", requires_cors=False)
print(resource_url)
docker_resource = requests.get(resource_url, stream=True)
c_type = docker_resource.headers["content-type"]
if ";" in c_type:
c_type = c_type.split(";")[0]
build_dir = w._mime_processors[c_type](docker_resource)
print(build_dir)
| 27.35 | 86 | 0.793419 | from app import userfiles as user_files
import workers.dockerfilebuild
import requests
w = workers.dockerfilebuild.DockerfileBuildWorker(100, None)
resource_key = "5c0a985c-405d-4161-b0ac-603c3757b5f9"
resource_url = user_files.get_file_url(resource_key, "127.0.0.1", requires_cors=False)
print(resource_url)
docker_resource = requests.get(resource_url, stream=True)
c_type = docker_resource.headers["content-type"]
if ";" in c_type:
c_type = c_type.split(";")[0]
build_dir = w._mime_processors[c_type](docker_resource)
print(build_dir)
| true | true |
f73ec20f0e50d8a12eb1b8ea024256716d1e90e5 | 775 | py | Python | demo.py | MrLiPP6274/mitmproxy-interface | ea9db2ae06d5860196cee17e80903ea454de7ac2 | [
"MIT"
] | null | null | null | demo.py | MrLiPP6274/mitmproxy-interface | ea9db2ae06d5860196cee17e80903ea454de7ac2 | [
"MIT"
] | null | null | null | demo.py | MrLiPP6274/mitmproxy-interface | ea9db2ae06d5860196cee17e80903ea454de7ac2 | [
"MIT"
] | null | null | null | """
@Time :1:08 下午
@Author :li
@decs :demo
"""
import time
import interface
from interface.interface_enum import InterfacePath
# 1.清除本地缓存
interface.clean_local()
# 2.运行mitmproxy:默认运行InterfaceMonitor类,默认端口为8080,也可以自己设定
interface.run()
# interface.run(scipt='ResponseMock, InterfaceMonitor', port='8888')
# 3.调用接口,存入数据:程序暂停期间,发起接口请求,将接口数据存入本地
time.sleep(20)
# 4.关闭mitmproxy
interface.stop()
# 5.获取接口数据,获取到的数据等于flow,保留flow.request和flow.response的原汁原味
# request
requestdata = interface.get(InterfacePath.TEST1).request
print(requestdata.url)
print(requestdata.query)
print(requestdata.json())
print(requestdata.headers)
# response
responsedata = interface.get(InterfacePath.TEST1).response
print(responsedata.url)
print(responsedata.json())
print(responsedata.headers)
| 20.945946 | 68 | 0.792258 | import time
import interface
from interface.interface_enum import InterfacePath
interface.clean_local()
interface.run()
time.sleep(20)
interface.stop()
requestdata = interface.get(InterfacePath.TEST1).request
print(requestdata.url)
print(requestdata.query)
print(requestdata.json())
print(requestdata.headers)
responsedata = interface.get(InterfacePath.TEST1).response
print(responsedata.url)
print(responsedata.json())
print(responsedata.headers)
| true | true |
f73ec3ddcf5e97c9e0ee65f1565be31492abcd2d | 3,084 | py | Python | worden/src/app.py | jvlsg/Worden | 383a8549b09f82e2516887bd621829047ecffbec | [
"MIT"
] | 4 | 2020-07-19T06:38:53.000Z | 2021-07-30T21:36:37.000Z | worden/src/app.py | jvlsg/Huston | 383a8549b09f82e2516887bd621829047ecffbec | [
"MIT"
] | 2 | 2021-02-26T19:43:16.000Z | 2021-02-26T19:43:39.000Z | worden/src/app.py | jvlsg/Huston | 383a8549b09f82e2516887bd621829047ecffbec | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import curses
import logging
import npyscreen
from worden.src.api import api_man
from worden.src.api.trackable_object import TrackableObject
from worden.src.ui.list_and_details_form import ListAndDetailsForm
from worden.src.ui.map_form import MapForm
import worden.const as const
class WordenApp(npyscreen.NPSAppManaged):
"""
The core of the application's Logic will be here.
The Form classes should only know what is required to display themselves
"""
def while_waiting(self):
#UPDATE THE CURRENT FORM
self._Forms[self._active_form].update_form()
if self.tracked_object != None and self._active_form != self.tracked_object_type:
#Invokes the while_waiting , which will in turn update the list of objects
self._Forms[self.tracked_object_type].while_waiting()
#Gets the equivalent to the tracked object again
self.tracked_object = self.api_man.pages.get(self.tracked_object_type).results_dict.get(self.tracked_object_key)
def onStart(self):
#THIS NEEDS TO BE BEFORE REGISTERING THE FORM
self.keypress_timeout_default = const.KEYPRESS_TIMEOUT
self.api_man = api_man.Api_Manager(self)
self.f_map = MapForm(parentApp=self, name="MAPS")
self.registerForm("MAIN",self.f_map)
self.f_api_forms = {}
for api_type in const.API_TYPES:
f_api_type = ListAndDetailsForm(parentApp=self,name=api_type.value)
self.f_api_forms[api_type] = f_api_type
f_api_type.set_api_type(api_type)
self.registerForm(api_type,self.f_api_forms[api_type])
self.f_api_forms[const.API_TYPES.SOLAR_SYSTEM_BODIES].set_refresh_api_data(False)
self.f_api_forms[const.API_TYPES.ASTRONAUTS].set_order_keys(True)
self.f_api_forms[const.API_TYPES.SOLAR_SYSTEM_BODIES].set_order_keys(True)
self._active_form = "MAIN"
self.tracked_object = None
self.tracked_object_key = None
self.tracked_object_type = None
def set_tracked_object(self,trackable_object=None,trackable_object_key=None,trackable_object_type=None):
"""
Sets the new tracked object, it's key and API Type
"""
if not issubclass(type(trackable_object),TrackableObject) or trackable_object_key == None or trackable_object_type == None:
raise TypeError()
logging.debug("Set Tracked Object to a {}: {}".format(type(trackable_object),trackable_object_key))
self.tracked_object = trackable_object
self.tracked_object_key = trackable_object_key
self.tracked_object_type = trackable_object_type
def onCleanExit(self):
npyscreen.notify_wait("Goodbye!")
def change_form_to(self, form_name):
"""
Changes the App's current active form
"""
if form_name not in self._Forms.keys():
return
self._active_form = form_name
self.switchForm(form_name)
self._Forms[self._active_form].update_form()
self.resetHistory()
| 38.55 | 131 | 0.703307 |
import curses
import logging
import npyscreen
from worden.src.api import api_man
from worden.src.api.trackable_object import TrackableObject
from worden.src.ui.list_and_details_form import ListAndDetailsForm
from worden.src.ui.map_form import MapForm
import worden.const as const
class WordenApp(npyscreen.NPSAppManaged):
def while_waiting(self):
self._Forms[self._active_form].update_form()
if self.tracked_object != None and self._active_form != self.tracked_object_type:
self._Forms[self.tracked_object_type].while_waiting()
self.tracked_object = self.api_man.pages.get(self.tracked_object_type).results_dict.get(self.tracked_object_key)
def onStart(self):
self.keypress_timeout_default = const.KEYPRESS_TIMEOUT
self.api_man = api_man.Api_Manager(self)
self.f_map = MapForm(parentApp=self, name="MAPS")
self.registerForm("MAIN",self.f_map)
self.f_api_forms = {}
for api_type in const.API_TYPES:
f_api_type = ListAndDetailsForm(parentApp=self,name=api_type.value)
self.f_api_forms[api_type] = f_api_type
f_api_type.set_api_type(api_type)
self.registerForm(api_type,self.f_api_forms[api_type])
self.f_api_forms[const.API_TYPES.SOLAR_SYSTEM_BODIES].set_refresh_api_data(False)
self.f_api_forms[const.API_TYPES.ASTRONAUTS].set_order_keys(True)
self.f_api_forms[const.API_TYPES.SOLAR_SYSTEM_BODIES].set_order_keys(True)
self._active_form = "MAIN"
self.tracked_object = None
self.tracked_object_key = None
self.tracked_object_type = None
def set_tracked_object(self,trackable_object=None,trackable_object_key=None,trackable_object_type=None):
if not issubclass(type(trackable_object),TrackableObject) or trackable_object_key == None or trackable_object_type == None:
raise TypeError()
logging.debug("Set Tracked Object to a {}: {}".format(type(trackable_object),trackable_object_key))
self.tracked_object = trackable_object
self.tracked_object_key = trackable_object_key
self.tracked_object_type = trackable_object_type
def onCleanExit(self):
npyscreen.notify_wait("Goodbye!")
def change_form_to(self, form_name):
if form_name not in self._Forms.keys():
return
self._active_form = form_name
self.switchForm(form_name)
self._Forms[self._active_form].update_form()
self.resetHistory()
| true | true |
f73ec5dad3cb94a49d724ad483fcd5ed5714f395 | 5,483 | py | Python | MICRO_CPU_profiling/experiment_2_algorithm_settings.py | WenqiJiang/faiss-cpu-profiling | f2c7b3051f8860e8918c713ef4baddd563cc515c | [
"MIT"
] | null | null | null | MICRO_CPU_profiling/experiment_2_algorithm_settings.py | WenqiJiang/faiss-cpu-profiling | f2c7b3051f8860e8918c713ef4baddd563cc515c | [
"MIT"
] | null | null | null | MICRO_CPU_profiling/experiment_2_algorithm_settings.py | WenqiJiang/faiss-cpu-profiling | f2c7b3051f8860e8918c713ef4baddd563cc515c | [
"MIT"
] | null | null | null | """
Evaluating index's fluence on performance to achieve certain recall.
Note: if using perf to profile the program, use sudo to run the commands (already
hardcoded in this script), make sure the user have sudo access
Example Usage:
python experiment_2_algorithm_settings.py --dbname SIFT1000M --topK 100 --recall_goal 0.95 --qbs 10000 --repeat_time 1 \
--cpp_bin_dir /data/faiss-cpu-profiling/build/demos/bigann_search \
--index_parent_dir /data/Faiss_experiments/trained_CPU_indexes/ \
--gt_parent_dir /data/Faiss_experiments/bigann/gnd/ \
--nprobe_dict_dir '../recall_info/cpu_recall_index_nprobe_pairs_SIFT1000M.pkl' --perf_enable 1
"""
from __future__ import print_function
import os
import sys
import time
import re
import pickle
import getpass
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dbname', type=str, default='SIFT1000M', help="dataset name, e.g., SIFT100M")
parser.add_argument('--topK', type=int, default=10, help="return topK most similar vector, related to recall, e.g., R@10=50perc or R@100=80perc")
parser.add_argument('--recall_goal', type=float, default=0.8, help="recall goal 0~1")
parser.add_argument('--qbs', type=int, default=10000, help="batch size")
parser.add_argument('--repeat_time', type=int, default=1, help="repeat_time of the 10000 queries, higher repeat time typically has a better stability")
parser.add_argument('--cpp_bin_dir', type=str, default='/data/faiss-cpu-profiling/build/demos/bigann_search', help="c++ search binary")
parser.add_argument('--index_parent_dir', type=str, default='/data/Faiss_experiments/trained_CPU_indexes/', help="parent directory of index storage")
parser.add_argument('--gt_parent_dir', type=str, default='/data/Faiss_experiments/bigann/gnd/', help="parent directory of ground truth")
parser.add_argument('--nprobe_dict_dir', type=str, default='../recall_info/cpu_recall_index_nprobe_pairs_SIFT1000M.pkl', help="recall dictionary, stores the min nprobe to achieve certain recall")
parser.add_argument('--perf_enable', type=int, default=1, help="whether to profile by perf")
args = parser.parse_args()
dbname = args.dbname
topK = args.topK
recall_goal = args.recall_goal
qbs = args.qbs
repeat_time = args.repeat_time
cpp_bin_dir = args.cpp_bin_dir
index_parent_dir = args.index_parent_dir
gt_parent_dir = args.gt_parent_dir
nprobe_dict_dir = args.nprobe_dict_dir
perf_enable = args.perf_enable
assert qbs == 10000, "Currently the c++ search program only support batch size = 10000"
# dictionary format: d_nprobes[dbname][index_key][topK][recall_goal] = min_nprobe
d_nprobes = None
if os.path.exists(nprobe_dict_dir):
with open(nprobe_dict_dir, 'rb') as f:
d_nprobes = pickle.load(f)
else:
print("ERROR! input dictionary does not exists")
raise ValueError
out_dir = "result_experiment_2_algorithm_settings"
if not os.path.exists(out_dir):
os.mkdir(out_dir)
logname = "./{out_dir}/out_{dbname}_R@{topK}={recall_goal}_qbs_{qbs}".format(
out_dir=out_dir, dbname=dbname, topK=topK, recall_goal=recall_goal, qbs=qbs)
if os.path.exists(logname):
os.remove(logname)
gt_dir = None
if dbname == 'SIFT1M':
gt_dir = os.path.join(gt_parent_dir, 'idx_1M.ivecs')
elif dbname == 'SIFT10M':
gt_dir = os.path.join(gt_parent_dir, 'idx_10M.ivecs')
elif dbname == 'SIFT100M':
gt_dir = os.path.join(gt_parent_dir, 'idx_100M.ivecs')
elif dbname == 'SIFT1000M':
gt_dir = os.path.join(gt_parent_dir, 'idx_1000M.ivecs')
else:
print("ERROR: unknown dataset")
raise ValueError
index_keys = ['IVF1024,PQ16', 'IVF2048,PQ16', 'IVF4096,PQ16', 'IVF8192,PQ16', 'IVF16384,PQ16', 'IVF32768,PQ16', 'IVF65536,PQ16', 'IVF131072,PQ16', 'IVF262144,PQ16', \
'OPQ16,IVF1024,PQ16', 'OPQ16,IVF2048,PQ16', 'OPQ16,IVF4096,PQ16', 'OPQ16,IVF8192,PQ16', 'OPQ16,IVF16384,PQ16', 'OPQ16,IVF32768,PQ16', 'OPQ16,IVF65536,PQ16', 'OPQ16,IVF131072,PQ16', 'OPQ16,IVF262144,PQ16']
for index_key in index_keys:
nprobe = d_nprobes[dbname][index_key][topK][recall_goal]
if nprobe is None:
continue
os.system('echo ==== {index_key} ==== >> {logname}'.format(index_key=index_key, logname=logname))
index_sub_dir = 'bench_cpu_{dbname}_{index_key}/{dbname}_{index_key}_populated.index'.format(dbname=dbname, index_key=index_key)
index_dir = os.path.join(index_parent_dir, index_sub_dir)
# Usage: ./binary index_dir gt_dir topK nprobe
cmd = "{cpp_bin_dir} {index_dir} {gt_dir} {topK} {nprobe} {repeat_time} >> {logname}".format(
cpp_bin_dir=cpp_bin_dir, index_dir=index_dir, gt_dir=gt_dir, topK=topK, nprobe=nprobe, repeat_time=repeat_time, logname=logname)
if not perf_enable:
print(cmd)
os.system(cmd)
else:
cmd_prefix = "perf record -v -g -F 99 "
cmd_prof = "sudo " + cmd_prefix + cmd
print(cmd_prof)
os.system(cmd_prof)
# generate the perf.out, i.e., the trace of each sample
reportname = "./{out_dir}/perf.out_{dbname}_{index_key}_R@{topK}={recall_goal}_nprobe_{nprobe}_qbs_{qbs}".format(
out_dir=out_dir, dbname=dbname, index_key=index_key, topK=topK, recall_goal=recall_goal, nprobe=nprobe,qbs=qbs)
cmd_stats = "sudo perf script > {reportname}".format(reportname=reportname)
os.system(cmd_stats)
username = getpass.getuser()
os.system("sudo chown {username} {reportname}".format(username=username, reportname=reportname))
os.system("sudo rm perf.data perf.data.old")
| 47.267241 | 209 | 0.732628 |
from __future__ import print_function
import os
import sys
import time
import re
import pickle
import getpass
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dbname', type=str, default='SIFT1000M', help="dataset name, e.g., SIFT100M")
parser.add_argument('--topK', type=int, default=10, help="return topK most similar vector, related to recall, e.g., R@10=50perc or R@100=80perc")
parser.add_argument('--recall_goal', type=float, default=0.8, help="recall goal 0~1")
parser.add_argument('--qbs', type=int, default=10000, help="batch size")
parser.add_argument('--repeat_time', type=int, default=1, help="repeat_time of the 10000 queries, higher repeat time typically has a better stability")
parser.add_argument('--cpp_bin_dir', type=str, default='/data/faiss-cpu-profiling/build/demos/bigann_search', help="c++ search binary")
parser.add_argument('--index_parent_dir', type=str, default='/data/Faiss_experiments/trained_CPU_indexes/', help="parent directory of index storage")
parser.add_argument('--gt_parent_dir', type=str, default='/data/Faiss_experiments/bigann/gnd/', help="parent directory of ground truth")
parser.add_argument('--nprobe_dict_dir', type=str, default='../recall_info/cpu_recall_index_nprobe_pairs_SIFT1000M.pkl', help="recall dictionary, stores the min nprobe to achieve certain recall")
parser.add_argument('--perf_enable', type=int, default=1, help="whether to profile by perf")
args = parser.parse_args()
dbname = args.dbname
topK = args.topK
recall_goal = args.recall_goal
qbs = args.qbs
repeat_time = args.repeat_time
cpp_bin_dir = args.cpp_bin_dir
index_parent_dir = args.index_parent_dir
gt_parent_dir = args.gt_parent_dir
nprobe_dict_dir = args.nprobe_dict_dir
perf_enable = args.perf_enable
assert qbs == 10000, "Currently the c++ search program only support batch size = 10000"
d_nprobes = None
if os.path.exists(nprobe_dict_dir):
with open(nprobe_dict_dir, 'rb') as f:
d_nprobes = pickle.load(f)
else:
print("ERROR! input dictionary does not exists")
raise ValueError
out_dir = "result_experiment_2_algorithm_settings"
if not os.path.exists(out_dir):
os.mkdir(out_dir)
logname = "./{out_dir}/out_{dbname}_R@{topK}={recall_goal}_qbs_{qbs}".format(
out_dir=out_dir, dbname=dbname, topK=topK, recall_goal=recall_goal, qbs=qbs)
if os.path.exists(logname):
os.remove(logname)
gt_dir = None
if dbname == 'SIFT1M':
gt_dir = os.path.join(gt_parent_dir, 'idx_1M.ivecs')
elif dbname == 'SIFT10M':
gt_dir = os.path.join(gt_parent_dir, 'idx_10M.ivecs')
elif dbname == 'SIFT100M':
gt_dir = os.path.join(gt_parent_dir, 'idx_100M.ivecs')
elif dbname == 'SIFT1000M':
gt_dir = os.path.join(gt_parent_dir, 'idx_1000M.ivecs')
else:
print("ERROR: unknown dataset")
raise ValueError
index_keys = ['IVF1024,PQ16', 'IVF2048,PQ16', 'IVF4096,PQ16', 'IVF8192,PQ16', 'IVF16384,PQ16', 'IVF32768,PQ16', 'IVF65536,PQ16', 'IVF131072,PQ16', 'IVF262144,PQ16', \
'OPQ16,IVF1024,PQ16', 'OPQ16,IVF2048,PQ16', 'OPQ16,IVF4096,PQ16', 'OPQ16,IVF8192,PQ16', 'OPQ16,IVF16384,PQ16', 'OPQ16,IVF32768,PQ16', 'OPQ16,IVF65536,PQ16', 'OPQ16,IVF131072,PQ16', 'OPQ16,IVF262144,PQ16']
for index_key in index_keys:
nprobe = d_nprobes[dbname][index_key][topK][recall_goal]
if nprobe is None:
continue
os.system('echo ==== {index_key} ==== >> {logname}'.format(index_key=index_key, logname=logname))
index_sub_dir = 'bench_cpu_{dbname}_{index_key}/{dbname}_{index_key}_populated.index'.format(dbname=dbname, index_key=index_key)
index_dir = os.path.join(index_parent_dir, index_sub_dir)
cmd = "{cpp_bin_dir} {index_dir} {gt_dir} {topK} {nprobe} {repeat_time} >> {logname}".format(
cpp_bin_dir=cpp_bin_dir, index_dir=index_dir, gt_dir=gt_dir, topK=topK, nprobe=nprobe, repeat_time=repeat_time, logname=logname)
if not perf_enable:
print(cmd)
os.system(cmd)
else:
cmd_prefix = "perf record -v -g -F 99 "
cmd_prof = "sudo " + cmd_prefix + cmd
print(cmd_prof)
os.system(cmd_prof)
reportname = "./{out_dir}/perf.out_{dbname}_{index_key}_R@{topK}={recall_goal}_nprobe_{nprobe}_qbs_{qbs}".format(
out_dir=out_dir, dbname=dbname, index_key=index_key, topK=topK, recall_goal=recall_goal, nprobe=nprobe,qbs=qbs)
cmd_stats = "sudo perf script > {reportname}".format(reportname=reportname)
os.system(cmd_stats)
username = getpass.getuser()
os.system("sudo chown {username} {reportname}".format(username=username, reportname=reportname))
os.system("sudo rm perf.data perf.data.old")
| true | true |
f73ec6362b361afc3a9d2421f74a8cf5c39b38fe | 10,130 | py | Python | shadowlands/credstick/trezor_ethdriver.py | carver/shadowlands-core | 8931254da4af7e4cde3594fe1bcbf92a34ac02a4 | [
"MIT"
] | null | null | null | shadowlands/credstick/trezor_ethdriver.py | carver/shadowlands-core | 8931254da4af7e4cde3594fe1bcbf92a34ac02a4 | [
"MIT"
] | null | null | null | shadowlands/credstick/trezor_ethdriver.py | carver/shadowlands-core | 8931254da4af7e4cde3594fe1bcbf92a34ac02a4 | [
"MIT"
] | null | null | null |
import sys
from time import sleep
import getpass
from trezorlib.client import ProtocolMixin, BaseClient
from trezorlib.transport import enumerate_devices, get_transport, TransportException
from trezorlib import tools
from trezorlib import messages as proto
import binascii
from shadowlands.credstick import Credstick, DeriveCredstickAddressError, OpenCredstickError, CloseCredstickError, SignTxError
from shadowlands.tui.effects.widgets import TextRequestDialog, MessageDialog
from shadowlands.tui.debug import debug
from web3 import Web3
import pdb
# The Trezor protocol is a dumpster fire of shitty design
# caused by the inability to authenticate without client
# side intervention.
#
# I hope you enjoy using this because I bled for its
# implementation.
#
class TrezorEthDriver(Credstick):
transport = None
state = None
hdpath_base="44'/60'/0'/0"
hdpath_index = '0'
@classmethod
def call_raw(cls, msg):
#__tracebackhide__ = True # pytest traceback hiding - this function won't appear in tracebacks
cls.transport.session_begin()
cls.transport.write(msg)
response = cls.transport.read()
cls.transport.session_end()
return response
@classmethod
def open(cls):
try:
cls.transport = get_transport(None, prefix_search=False)
except StopIteration as e:
debug(); pdb.set_trace()
init_msg = proto.Initialize()
if cls.state is not None:
init_msg.state = cls.state
try:
cls.features = cls.call_raw(init_msg)
except TransportException:
raise OpenCredstickError("Error opening Trezor")
#self.features = expect(proto.Features)(self.call)(init_msg)
#if str(cls.features.vendor) not in self.VENDORS:
# raise RuntimeError("Unsupported device")
#cls.address = cls.derive()
#debug(); pdb.set_trace()
#calling_window._destroy_window_stack()
#calling_window._scene.remove_effect(calling_window)
#calling_window._scene.reset()
@classmethod
def matrix_process(cls, text, calling_window):
response = cls.call_raw(proto.PinMatrixAck(pin=text))
if response.__class__.__name__ is 'EthereumAddress':
address = '0x' + binascii.hexlify(response.address).decode('ascii')
address = cls.eth_node.w3.toChecksumAddress(address)
cls.address = address
else:
calling_window._scene.add_effect(MessageDialog(calling_window._screen, "Trezor is unlocked now.", destroy_window=calling_window))
# open a message dialog, tell them they are now authenticated and to try whatever they were doing again
@classmethod
def matrix_request_window(cls):
legend = '''Use the numeric keypad to describe number positions.
The layout is:
7 8 9
4 5 6
1 2 3'''
scr = cls.interface._screen
dialog = TextRequestDialog(scr,
height=14,
width = 60,
label_prompt_text=legend,
label_height=5,
continue_button_text="Unlock",
continue_function=cls.matrix_process,
text_label="Your code:",
hide_char="*",
label_align="<",
title="Trezor Auth",
reset_scene=False
)
scr.current_scene.add_effect( dialog )
@classmethod
def derive(cls, hdpath_base="44'/60'/0'/0", hdpath_index='0', set_address=False):
hdpath = hdpath_base + '/' + hdpath_index
address_n = tools.parse_path(hdpath)
call_obj = proto.EthereumGetAddress(address_n=address_n, show_display=False)
try:
response = cls.call_raw(call_obj)
except TransportException:
raise DeriveCredstickAddressError
if response.__class__.__name__ == 'PinMatrixRequest':
cls.matrix_request_window()
return None
elif response.__class__.__name__ == 'Failure':
raise DeriveCredstickAddressError
#return None
else:
address = '0x' + binascii.hexlify(response.address).decode('ascii')
derived_address = Web3.toChecksumAddress(address)
if set_address is True:
cls.address = derived_address
cls.hdpath_base = hdpath_base
cls.hdpath_index = hdpath_index
return derived_address
#result = "0x%s" % binascii.hexlify(address).decode()
#return result
@classmethod
def signTx(cls, tx):
def int_to_big_endian(value):
return value.to_bytes((value.bit_length() + 7) // 8, 'big')
tx = cls.prepare_tx(tx)
#n = self._convert_prime(n)
address_n = tools.parse_path(cls.hdpath())
msg = proto.EthereumSignTx(
address_n=address_n,
nonce=int_to_big_endian(tx['nonce']),
gas_price=int_to_big_endian(tx['gasPrice']),
gas_limit=int_to_big_endian(tx['gas']),
chain_id=int(cls.eth_node._network),
value=int_to_big_endian(tx['value']))
if tx['to']:
msg.to = tx['to']
data = tx['data']
if data:
msg.data_length = len(data)
data, chunk = data[1024:], data[:1024]
msg.data_initial_chunk = chunk
#if chain_id:
# msg.chain_id = chain_id
#if tx_type is not None:
# msg.tx_type = tx_type
try:
response = cls.call_raw(msg)
# This is dumb.
while response.__class__.__name__ == 'ButtonRequest':
response = cls.call_raw(proto.ButtonAck())
if response.__class__.__name__ == 'PinMatrixRequest':
cls.matrix_request_window()
raise SignTxError("Credstick needs to be unlocked")
if response.__class__.__name__ == 'Failure':
raise SignTxError
except TransportException:
raise SignTxError
while response.data_length is not None:
data_length = response.data_length
data, chunk = data[data_length:], data[:data_length]
response = cls.call_raw(proto.EthereumTxAck(data_chunk=chunk))
v = response.signature_v
r = response.signature_r
s = response.signature_s
stx = cls.signed_tx(tx, v,
int(r.hex(), 16),
int(s.hex(), 16)
)
return stx
#debug(); pdb.set_trace()
@classmethod
def close(cls):
if cls.transport:
cls.transport.close()
class SomeException(Exception):
pass
'''
@session
def call_raw(self, msg):
__tracebackhide__ = True # pytest traceback hiding - this function won't appear in tracebacks
self.transport.write(msg)
return self.transport.read()
@session
def call(self, msg):
resp = self.call_raw(msg)
handler_name = "callback_%s" % resp.__class__.__name__
handler = getattr(self, handler_name, None)
if handler is not None:
msg = handler(resp)
if msg is None:
raise ValueError("Callback %s must return protobuf message, not None" % handler)
resp = self.call(msg)
return resp
@field('address')
@expect(proto.EthereumAddress)
def ethereum_get_address(self, n, show_display=False, multisig=None):
n = self._convert_prime(n)
return self.call(proto.EthereumGetAddress(address_n=n, show_display=show_display))
@session
def ethereum_sign_tx(self, n, nonce, gas_price, gas_limit, to, value, data=None, chain_id=None, tx_type=None):
def int_to_big_endian(value):
return value.to_bytes((value.bit_length() + 7) // 8, 'big')
n = self._convert_prime(n)
msg = proto.EthereumSignTx(
address_n=n,
nonce=int_to_big_endian(nonce),
gas_price=int_to_big_endian(gas_price),
gas_limit=int_to_big_endian(gas_limit),
value=int_to_big_endian(value))
if to:
msg.to = to
if data:
msg.data_length = len(data)
data, chunk = data[1024:], data[:1024]
msg.data_initial_chunk = chunk
if chain_id:
msg.chain_id = chain_id
if tx_type is not None:
msg.tx_type = tx_type
response = self.call(msg)
while response.data_length is not None:
data_length = response.data_length
data, chunk = data[data_length:], data[:data_length]
response = self.call(proto.EthereumTxAck(data_chunk=chunk))
return response.signature_v, response.signature_r, response.signature_s
@expect(proto.EthereumMessageSignature)
def ethereum_sign_message(self, n, message):
n = self._convert_prime(n)
message = normalize_nfc(message)
return self.call(proto.EthereumSignMessage(address_n=n, message=message))
def ethereum_verify_message(self, address, signature, message):
message = normalize_nfc(message)
try:
resp = self.call(proto.EthereumVerifyMessage(address=address, signature=signature, message=message))
except CallException as e:
resp = e
if isinstance(resp, proto.Success):
return True
return False
def init_device(self):
init_msg = proto.Initialize()
if self.state is not None:
init_msg.state = self.state
self.features = expect(proto.Features)(self.call)(init_msg)
if str(self.features.vendor) not in self.VENDORS:
raise RuntimeError("Unsupported device")
'''
| 32.783172 | 141 | 0.601382 |
import sys
from time import sleep
import getpass
from trezorlib.client import ProtocolMixin, BaseClient
from trezorlib.transport import enumerate_devices, get_transport, TransportException
from trezorlib import tools
from trezorlib import messages as proto
import binascii
from shadowlands.credstick import Credstick, DeriveCredstickAddressError, OpenCredstickError, CloseCredstickError, SignTxError
from shadowlands.tui.effects.widgets import TextRequestDialog, MessageDialog
from shadowlands.tui.debug import debug
from web3 import Web3
import pdb
class TrezorEthDriver(Credstick):
transport = None
state = None
hdpath_base="44'/60'/0'/0"
hdpath_index = '0'
@classmethod
def call_raw(cls, msg):
#__tracebackhide__ = True # pytest traceback hiding - this function won't appear in tracebacks
cls.transport.session_begin()
cls.transport.write(msg)
response = cls.transport.read()
cls.transport.session_end()
return response
@classmethod
def open(cls):
try:
cls.transport = get_transport(None, prefix_search=False)
except StopIteration as e:
debug(); pdb.set_trace()
init_msg = proto.Initialize()
if cls.state is not None:
init_msg.state = cls.state
try:
cls.features = cls.call_raw(init_msg)
except TransportException:
raise OpenCredstickError("Error opening Trezor")
@classmethod
def matrix_process(cls, text, calling_window):
response = cls.call_raw(proto.PinMatrixAck(pin=text))
if response.__class__.__name__ is 'EthereumAddress':
address = '0x' + binascii.hexlify(response.address).decode('ascii')
address = cls.eth_node.w3.toChecksumAddress(address)
cls.address = address
else:
calling_window._scene.add_effect(MessageDialog(calling_window._screen, "Trezor is unlocked now.", destroy_window=calling_window))
@classmethod
def matrix_request_window(cls):
legend = '''Use the numeric keypad to describe number positions.
The layout is:
7 8 9
4 5 6
1 2 3'''
scr = cls.interface._screen
dialog = TextRequestDialog(scr,
height=14,
width = 60,
label_prompt_text=legend,
label_height=5,
continue_button_text="Unlock",
continue_function=cls.matrix_process,
text_label="Your code:",
hide_char="*",
label_align="<",
title="Trezor Auth",
reset_scene=False
)
scr.current_scene.add_effect( dialog )
@classmethod
def derive(cls, hdpath_base="44'/60'/0'/0", hdpath_index='0', set_address=False):
hdpath = hdpath_base + '/' + hdpath_index
address_n = tools.parse_path(hdpath)
call_obj = proto.EthereumGetAddress(address_n=address_n, show_display=False)
try:
response = cls.call_raw(call_obj)
except TransportException:
raise DeriveCredstickAddressError
if response.__class__.__name__ == 'PinMatrixRequest':
cls.matrix_request_window()
return None
elif response.__class__.__name__ == 'Failure':
raise DeriveCredstickAddressError
#return None
else:
address = '0x' + binascii.hexlify(response.address).decode('ascii')
derived_address = Web3.toChecksumAddress(address)
if set_address is True:
cls.address = derived_address
cls.hdpath_base = hdpath_base
cls.hdpath_index = hdpath_index
return derived_address
#result = "0x%s" % binascii.hexlify(address).decode()
#return result
@classmethod
def signTx(cls, tx):
def int_to_big_endian(value):
return value.to_bytes((value.bit_length() + 7) // 8, 'big')
tx = cls.prepare_tx(tx)
#n = self._convert_prime(n)
address_n = tools.parse_path(cls.hdpath())
msg = proto.EthereumSignTx(
address_n=address_n,
nonce=int_to_big_endian(tx['nonce']),
gas_price=int_to_big_endian(tx['gasPrice']),
gas_limit=int_to_big_endian(tx['gas']),
chain_id=int(cls.eth_node._network),
value=int_to_big_endian(tx['value']))
if tx['to']:
msg.to = tx['to']
data = tx['data']
if data:
msg.data_length = len(data)
data, chunk = data[1024:], data[:1024]
msg.data_initial_chunk = chunk
#if chain_id:
# msg.chain_id = chain_id
#if tx_type is not None:
# msg.tx_type = tx_type
try:
response = cls.call_raw(msg)
# This is dumb.
while response.__class__.__name__ == 'ButtonRequest':
response = cls.call_raw(proto.ButtonAck())
if response.__class__.__name__ == 'PinMatrixRequest':
cls.matrix_request_window()
raise SignTxError("Credstick needs to be unlocked")
if response.__class__.__name__ == 'Failure':
raise SignTxError
except TransportException:
raise SignTxError
while response.data_length is not None:
data_length = response.data_length
data, chunk = data[data_length:], data[:data_length]
response = cls.call_raw(proto.EthereumTxAck(data_chunk=chunk))
v = response.signature_v
r = response.signature_r
s = response.signature_s
stx = cls.signed_tx(tx, v,
int(r.hex(), 16),
int(s.hex(), 16)
)
return stx
#debug(); pdb.set_trace()
@classmethod
def close(cls):
if cls.transport:
cls.transport.close()
class SomeException(Exception):
pass
| true | true |
f73ec7a3002233b93515285fcd63ee1574fcded3 | 3,213 | py | Python | palsbet/models.py | denis254/xpredict | a8df45803889c03695ee1a6652baa5cca7aa0c60 | [
"BSD-3-Clause"
] | null | null | null | palsbet/models.py | denis254/xpredict | a8df45803889c03695ee1a6652baa5cca7aa0c60 | [
"BSD-3-Clause"
] | 10 | 2020-03-24T16:21:33.000Z | 2021-09-08T00:35:51.000Z | palsbet/models.py | denis254/xpredict | a8df45803889c03695ee1a6652baa5cca7aa0c60 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from django.utils import timezone
import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.conf import settings
from django.contrib.auth.models import User
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', User)
class Visitor(models.Model):
user = models.OneToOneField(AUTH_USER_MODEL, null=False, related_name='visitor', on_delete=models.CASCADE,)
session_key = models.CharField(null=False, max_length=40)
class FreeTipsGames(models.Model):
published_date = models.DateTimeField('Date Published', auto_now_add=True)
country = models.CharField(max_length = 200)
home_team = models.CharField(max_length = 200)
home_score = models.PositiveIntegerField(default = 0)
away_score = models.PositiveIntegerField(default = 0)
away_team = models.CharField(max_length = 200)
prediction = models.CharField(max_length = 100)
odds = models.CharField(max_length = 100, null = True, blank = True)
status = models.CharField(max_length = 100, choices=[('Running','Running'),('Won','Won'),('Lost','Lost')])
def __str__(self):
return self.home_team
class VipTips(models.Model):
published_date = models.DateTimeField('Date Published', auto_now_add=True)
country = models.CharField(max_length = 200)
home_team = models.CharField(max_length = 200)
home_score = models.PositiveIntegerField(default = 0)
away_score = models.PositiveIntegerField(default = 0)
away_team = models.CharField(max_length = 200)
prediction = models.CharField(max_length = 100)
odds = models.CharField(max_length = 100, null = True, blank = True)
status = models.CharField(max_length = 100, choices=[('Running','Running'),('Won','Won'),('Lost','Lost')])
def __str__(self):
return self.home_team
class PunterPick(models.Model):
published_date = models.DateTimeField('Date Published', auto_now_add=True)
country = models.CharField(max_length = 200)
home_team = models.CharField(max_length = 200)
home_score = models.PositiveIntegerField(default = 0)
away_score = models.PositiveIntegerField(default = 0)
away_team = models.CharField(max_length = 200)
prediction = models.CharField(max_length = 100)
odds = models.CharField(max_length = 100, null = True, blank = True)
status = models.CharField(max_length = 100, choices=[('Running','Running'),('Won','Won'),('Lost','Lost')])
def __str__(self):
return self.home_team
class RollOver(models.Model):
published_date = models.DateTimeField('Date Published', auto_now_add=True)
country = models.CharField(max_length = 200)
home_team = models.CharField(max_length = 200)
home_score = models.PositiveIntegerField(default = 0)
away_score = models.PositiveIntegerField(default = 0)
away_team = models.CharField(max_length = 200)
prediction = models.CharField(max_length = 100)
odds = models.CharField(max_length = 100, null = True, blank = True)
status = models.CharField(max_length = 100, choices=[('Running','Running'),('Won','Won'),('Lost','Lost')])
def __str__(self):
return self.home_team
| 28.184211 | 111 | 0.715219 | from django.db import models
from django.utils import timezone
import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.conf import settings
from django.contrib.auth.models import User
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', User)
class Visitor(models.Model):
user = models.OneToOneField(AUTH_USER_MODEL, null=False, related_name='visitor', on_delete=models.CASCADE,)
session_key = models.CharField(null=False, max_length=40)
class FreeTipsGames(models.Model):
published_date = models.DateTimeField('Date Published', auto_now_add=True)
country = models.CharField(max_length = 200)
home_team = models.CharField(max_length = 200)
home_score = models.PositiveIntegerField(default = 0)
away_score = models.PositiveIntegerField(default = 0)
away_team = models.CharField(max_length = 200)
prediction = models.CharField(max_length = 100)
odds = models.CharField(max_length = 100, null = True, blank = True)
status = models.CharField(max_length = 100, choices=[('Running','Running'),('Won','Won'),('Lost','Lost')])
def __str__(self):
return self.home_team
class VipTips(models.Model):
published_date = models.DateTimeField('Date Published', auto_now_add=True)
country = models.CharField(max_length = 200)
home_team = models.CharField(max_length = 200)
home_score = models.PositiveIntegerField(default = 0)
away_score = models.PositiveIntegerField(default = 0)
away_team = models.CharField(max_length = 200)
prediction = models.CharField(max_length = 100)
odds = models.CharField(max_length = 100, null = True, blank = True)
status = models.CharField(max_length = 100, choices=[('Running','Running'),('Won','Won'),('Lost','Lost')])
def __str__(self):
return self.home_team
class PunterPick(models.Model):
published_date = models.DateTimeField('Date Published', auto_now_add=True)
country = models.CharField(max_length = 200)
home_team = models.CharField(max_length = 200)
home_score = models.PositiveIntegerField(default = 0)
away_score = models.PositiveIntegerField(default = 0)
away_team = models.CharField(max_length = 200)
prediction = models.CharField(max_length = 100)
odds = models.CharField(max_length = 100, null = True, blank = True)
status = models.CharField(max_length = 100, choices=[('Running','Running'),('Won','Won'),('Lost','Lost')])
def __str__(self):
return self.home_team
class RollOver(models.Model):
published_date = models.DateTimeField('Date Published', auto_now_add=True)
country = models.CharField(max_length = 200)
home_team = models.CharField(max_length = 200)
home_score = models.PositiveIntegerField(default = 0)
away_score = models.PositiveIntegerField(default = 0)
away_team = models.CharField(max_length = 200)
prediction = models.CharField(max_length = 100)
odds = models.CharField(max_length = 100, null = True, blank = True)
status = models.CharField(max_length = 100, choices=[('Running','Running'),('Won','Won'),('Lost','Lost')])
def __str__(self):
return self.home_team
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.