id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
161502 | from output.models.sun_data.elem_decl.type_def.type_def00203m.type_def00203m_xsd.type_def00203m import Root
__all__ = [
"Root",
]
| StarcoderdataPython |
188140 | <filename>mooncake_utils/file.py<gh_stars>1-10
# -*- coding:utf-8 -*-
# @author <NAME> ( <EMAIL> )
# @date 2017-06-07
import os
import glob
import shutil
def mkdirp(directory):
"""
利用python库来做到shell中的 ``mkdir -p``
好处是不用 ``os.system()``,避免了fork进程造成的资源浪费。
:param directory: 路径
"""
if not os.path.isdir(directory):
os.makedirs(directory)
def rm_folder(path, debug = False):
"""
清空文件夹
:param debug: 若为True,则只打印日志,不执行删除操作。
"""
if not path.endswith("/*"):
path+="/*"
files = glob.glob(path)
for one in files:
print("removing [%s]" % one)
if not debug:
os.remove(one)
def rglob(p):
matches = []
for root, dirnames, filenames in os.walk(p):
for filename in filenames:
path = os.path.join(root, filename)
matches.append(path)
return matches
def safewrite(filename, content):
"""Writes the content to a temp file and then moves the temp file to
given filename to avoid overwriting the existing file in case of errors.
"""
f = file(filename + '.tmp', 'w')
f.write(content)
f.close()
os.rename(f.name, filename)
if __name__ == "__main__":
pass
| StarcoderdataPython |
164987 | import pytest
from unittest.mock import MagicMock
@pytest.fixture(scope="function")
def canifier(ctre):
return ctre.CANifier(1)
@pytest.fixture(scope="function")
def cdata(canifier, hal_data):
return hal_data["CAN"][1]
def test_canifier_init(ctre, hal_data):
assert 1 not in hal_data["CAN"]
ctre.CANifier(1)
assert 1 in hal_data["CAN"]
def test_canifier_setLEDOutput(canifier, cdata):
canifier.setLEDOutput(0.1, canifier.LEDChannel.C)
# TODO pytest bug?
# assert cdata['led_c'] == pytest.approx(0.1, 0.01)
def test_canifier_setGeneralOutput(canifier, cdata):
canifier.setGeneralOutput(canifier.GeneralPin.QUAD_A, True, True)
assert cdata["general_pin_outputs"] == 0x04
assert cdata["general_pin_is_output"] == 0x04
def test_canifier_setGeneralOutputs(canifier, cdata):
canifier.setGeneralOutputs(2, 1)
assert cdata["general_pin_outputs"] == 2
assert cdata["general_pin_is_output"] == 1
@pytest.mark.xfail(raises=AttributeError)
def test_canifier_getGeneralInputs(ctre, canifier):
pin_values = ctre.canifier.PinValues()
canifier.getGeneralInputs(pin_values)
def test_canifier_getGeneralInput(canifier, cdata):
assert canifier.getGeneralInput(2) == False
assert canifier.getGeneralInput(3) == False
cdata["general_pin_inputs"] = 0x04
assert canifier.getGeneralInput(2) == True
assert canifier.getGeneralInput(3) == False
cdata["general_pin_inputs"] = 0x08
assert canifier.getGeneralInput(2) == False
assert canifier.getGeneralInput(3) == True
def test_canifier_lastError(ctre, canifier, cdata):
canifier.setLastError(int(ctre.ErrorCode.GeneralWarning))
assert canifier.getLastError() == ctre.ErrorCode.GeneralWarning
def test_canifier_setPWMOutput(canifier, cdata):
output = 102 / 1023.0
canifier.setPWMOutput(canifier.PWMChannel.C2, output)
# TODO: pytest bug?
# assert cdata['pwm_2'] == pytest.approx(output, 0.001)
def test_canifier_enablePWMOutput(canifier, cdata):
canifier.enablePWMOutput(canifier.PWMChannel.C1, True)
assert cdata["pwm_1_en"] == True
def test_canifier_getPWMInput(canifier, cdata):
dutyCycle, period = canifier.getPWMInput(canifier.PWMChannel.C1)
# assert dutyCycle == pytest.approx(0.0042, 0.01)
# TODO: pytest bug?
@pytest.mark.xfail(raises=NotImplementedError)
def test_canifier_configSetCustomParam(canifier, cdata):
canifier.configSetCustomParam(1, 2, 3)
@pytest.mark.xfail(raises=NotImplementedError)
def test_canifier_configGetCustomParam(canifier, cdata):
canifier.configGetCustomParam(1, 2)
@pytest.mark.xfail(raises=NotImplementedError)
def test_canifier_configSetParameter(canifier, cdata):
canifier.configSetParameter(1, 2, 3, 4, 5)
@pytest.mark.xfail(raises=NotImplementedError)
def test_canifier_configGetParameter(canifier, cdata):
canifier.configGetParameter(1, 2, 3)
def test_canifier_statusFramePeriod(canifier, cdata):
canifier.setStatusFramePeriod(canifier.StatusFrame.Status_1_General, 2, 0)
assert canifier.getStatusFramePeriod(canifier.StatusFrame.Status_1_General, 0) == 2
def test_canifier_setControlFramePeriod(canifier, cdata):
canifier.setControlFramePeriod(canifier.ControlFrame.CANifier_Control_1_General, 2)
assert cdata["control_1"] == 2
def test_canifier_getFirmwareVersion(canifier, cdata):
assert canifier.getFirmwareVersion() == 0
def test_canifier_hasResetOccurred(canifier, cdata):
assert canifier.hasResetOccurred() == False
cdata["reset_occurred"] = True
assert canifier.hasResetOccurred() == True
assert canifier.hasResetOccurred() == False
@pytest.mark.xfail(raises=NotImplementedError)
def test_canifier_getFaults(canifier, cdata):
canifier.getFaults()
@pytest.mark.xfail(raises=NotImplementedError)
def test_canifier_getStickyFaults(canifier, cdata):
canifier.getStickyFaults()
@pytest.mark.xfail(raises=NotImplementedError)
def test_canifier_clearStickyFaults(canifier, cdata):
canifier.clearStickyFaults(1)
def test_canifier_getBusVoltage(canifier, cdata):
canifier.getBusVoltage()
| StarcoderdataPython |
66043 | import cv2
import numpy as np
# Capture the input frame
def get_frame(cap, scaling_factor=0.5):
ret, frame = cap.read()
# Resize the frame
frame = cv2.resize(frame, None, fx=scaling_factor,
fy=scaling_factor, interpolation=cv2.INTER_AREA)
return frame
if __name__=='__main__':
# Initialize the video capture object
cap = cv2.VideoCapture(1)
# Create the background subtractor object
bgSubtractor = cv2.createBackgroundSubtractorMOG2()
# This factor controls the learning rate of the algorithm.
# The learning rate refers to the rate at which your model
# will learn about the background. Higher value for
# 'history' indicates a slower learning rate. You
# can play with this parameter to see how it affects
# the output.
history = 100
# Iterate until the user presses the ESC key
while True:
frame = get_frame(cap, 0.5)
# Apply the background subtraction model to the input frame
mask = bgSubtractor.apply(frame, learningRate=1.0/history)
# Convert from grayscale to 3-channel RGB
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
cv2.imshow('Input frame', frame)
cv2.imshow('Moving Objects MOG', mask & frame)
# Check if the user pressed the ESC key
c = cv2.waitKey(delay=30)
if c == 27:
break
cap.release()
cv2.destroyAllWindows() | StarcoderdataPython |
190507 | <reponame>planlodge/ChowNow-Theme-Wordpress<filename>tests/data/jacob/generate_remove_accents_tests.py
import unicodedata, codecs
# Generates testdata for the WordPress `remove_accents` function.
#
# Unicode defines character decompositions: e.g., an
# e with an umlaut (LATIN SMALL LETTER E WITH DIAERESIS) decomposes
# to 0x0065 (LATIN SMALL LETTER E) and 0x0308 (COMBINING DIAERESIS).
#
# Some characters aren't decomposable, but still have ASCII
# representations, e.g. the "ae" ligature should be written as "ae".
#
#
# This code derives from the work of <NAME> / effbot
# <http://effbot.org/zone/unicode-convert.htm>
#
# He uses different replacements ("d" for eth, "oe" for o-with-stroke, etc.)
# but the below follows Wordpress's behaviour.
#
CHAR_REPLACEMENT = {
# latin-1 characters that don't have a unicode decomposition
0xc6: u"AE", # LATIN CAPITAL LETTER AE
0xd0: u"DH", # LATIN CAPITAL LETTER ETH
0xd8: u"O", # LATIN CAPITAL LETTER O WITH STROKE
0xde: u"TH", # LATIN CAPITAL LETTER THORN
0xdf: u"ss", # LATIN SMALL LETTER SHARP S
0xe6: u"ae", # LATIN SMALL LETTER AE
0xf0: u"dh", # LATIN SMALL LETTER ETH
0xf8: u"o", # LATIN SMALL LETTER O WITH STROKE
0xfe: u"th", # LATIN SMALL LETTER THORN
0x13f: u"L", # LATIN CAPITAL LETTER L WITH MIDDLE DOT
0x140: u"l", # LATIN SMALL LETTER L WITH MIDDLE DOT
0x149: u"N" # LATIN SMALL LETTER N PRECEDED BY APOSTROPHE
}
# Latin-1 Supplement (0080-00FF): identical to ISO-8859-1.
# 0080009F are control characters, 00A000BF are currency symbols,
# punctuation and numerals.
latin1_supplement = map(unichr, range(0x00C0, 0x0100))
# Latin Extended-A 0100017F
latin_extended_a = map(unichr, range(0x0100, 0x0180))
def remove_accents(chars):
"""Divides a given string into decomposable and undecomposable characters."""
decomposable = []
undecomposable = []
for c in chars:
de = unicodedata.decomposition(c)
if de:
dechars = de.split(None)
try:
# Only keep characters with a decimal value < 300
dechars = map(lambda i: int(i, 16), dechars)
dechars = filter(lambda i: i < 300, dechars)
dechars = map(unichr, dechars)
de = "".join(dechars)
except (IndexError, ValueError):
if ord(c) in CHAR_REPLACEMENT:
de = CHAR_REPLACEMENT[ord(c)]
else:
dechars = filter(lambda s: s[0] != "<", dechars)
dechars = map(lambda i: int(i, 16), dechars)
dechars = map(unichr, dechars)
de = "".join(dechars)
undecomposable.append((c, de))
else:
decomposable.append((c, de))
else:
if ord(c) in CHAR_REPLACEMENT:
de = CHAR_REPLACEMENT[ord(c)]
undecomposable.append((c, de))
return decomposable, undecomposable
def write_cases(case, data, in_encoding="utf-8", out_encoding="utf-8"):
if not isinstance(data[0], list):
data = [data]
print "generating %s data" % case
infile = codecs.open(case + ".input.txt", "w", in_encoding)
outfile = codecs.open(case + ".output.txt", "w", out_encoding)
for data_ in data:
inline, outline = zip(*data_)
infile.write("".join(inline) + "\n")
outfile.write("".join(outline) + "\n")
infile.close()
outfile.close()
if __name__ == "__main__":
l1s_decomposable, l1s_undecomposable = remove_accents(latin1_supplement)
l1s_both = l1s_decomposable + l1s_undecomposable
lea_decomposable, lea_undecomposable = remove_accents(latin_extended_a)
lea_both = lea_decomposable + lea_undecomposable
write_cases("removes_accents_from_decomposable_latin1_supplement",
l1s_decomposable, in_encoding="iso-8859-1", out_encoding="ascii")
write_cases("removes_accents_from_undecomposable_latin1_supplement",
l1s_undecomposable, in_encoding="iso-8859-1", out_encoding="ascii")
write_cases("removes_accents_from_latin1_supplement",
l1s_both, in_encoding="iso-8859-1", out_encoding="ascii")
write_cases("removes_accents_from_decomposable_latin_extended_a",
lea_decomposable, in_encoding="utf-8", out_encoding="ascii")
write_cases("removes_accents_from_undecomposable_latin_extended_a",
lea_undecomposable, in_encoding="utf-8", out_encoding="ascii")
write_cases("removes_accents_from_latin_extended_a",
lea_both, in_encoding="utf-8", out_encoding="ascii")
write_cases("removes_accents_from_latin1_supplement_and_latin_extended_a",
l1s_both + lea_both, in_encoding="utf-8", out_encoding="ascii")
| StarcoderdataPython |
172683 | <reponame>vfdev-5/ignite-examples
from argparse import ArgumentParser
from pathlib import Path
from train import run
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("config_cv_folder", type=str,
help="Folder with configuration files")
args = parser.parse_args()
print("Run CV")
path = Path(args.config_cv_folder)
assert path.exists(), "Path '{}' is not found".format(path.as_posix())
config_files = sorted(path.glob("*.py"))
for config_file in config_files:
if config_file.name == "__init__.py":
continue
try:
print("\n\n----- run {} -----\n".format(config_file.as_posix()))
run(config_file.as_posix())
except Exception as e:
print("\n\n !!! Run {} failed !!!\n".format(config_file.as_posix()))
print("\n{}".format(e))
| StarcoderdataPython |
3327552 | from django.contrib import admin
# Register your models here.
from detection.models import File
admin.site.register(File) | StarcoderdataPython |
1605286 | <gh_stars>0
import shelve
import re
import json
import threading
import traceback
import os
from collections import defaultdict
import subprocess
try:
from subprocess import DEVNULL #pylint: disable=no-name-in-module
except:
DEVNULL = open(os.devnull, "r+b")
from .common import * #pylint: disable=wildcard-import
REF_DISPLAY_RANGE = 100
MAX_SEQ_DISPLAY_SIZE = 6000
# Test with the following function call
#
# generate_alignment_viz_json('../../nt','nt.db','NT',
# 'taxids.gsnapl.unmapped.bowtie2.lzw.cdhitdup.priceseqfilter.unmapped.star.m8
# ', 'taxid_annot_sorted_nt.fasta', 'align_viz')
def parse_reads(annotated_fasta, db_type):
read2seq = {}
search_string = "species_%s" % (db_type.lower())
adv_search_string = "family_%s:([-\d]+):.*genus_%s:([-\d]+):.*species_%s:(" \
"[-\d]+).*NT:[^:]*:(.*)" % (
db_type.lower(), db_type.lower(), db_type.lower())
with open(annotated_fasta, 'r') as af:
read_id = ''
for line in af:
if line[0] == '>':
read_id = line
else:
sequence = line
m = re.search("%s:([\d-]*)" % search_string, read_id)
if m:
species_id = int(m.group(1))
if species_id > 0 or species_id < INVALID_CALL_BASE_ID:
# Match found
ma = re.search(adv_search_string, read_id)
if ma:
read2seq[ma.group(4).rstrip()] = [
sequence.rstrip(),
ma.group(1),
ma.group(2),
ma.group(3)
]
return read2seq
def compress_coverage(coverage):
keys = sorted(coverage.keys())
if len(keys) <= 1:
return coverage
output = {}
start = keys[0]
current = start
val = coverage[start]
for k in keys[1:]:
if (k - current) == 1 and coverage[k] == val:
current = k
else:
output["%d-%d" % (start, current)] = val
start = k
current = k
val = coverage[k]
output["%d-%d" % (start, current)] = val
return output
def calculate_alignment_coverage(alignment_data):
ref_len = alignment_data['ref_seq_len']
# Setup. Can be implemented more cleanly.
coverage = defaultdict(lambda: 0)
output = {
'ref_seq_len': ref_len,
'total_read_length': 0,
'total_aligned_length': 0,
'total_mismatched_length': 0,
'num_reads': 0
}
if ref_len == 0:
return output
reads = alignment_data['reads']
for read in reads:
seq = read[1]
m8_metrics = read[2]
ref_start = int(m8_metrics[-4])
ref_end = int(m8_metrics[-3])
if ref_start > ref_end: # SWAP
(ref_start, ref_end) = (ref_end, ref_start)
ref_start -= 1
output['total_read_length'] += len(seq)
output['total_aligned_length'] += (ref_end - ref_start)
output['total_mismatched_length'] += int(m8_metrics[2])
output['num_reads'] += 1
for bp in range(ref_start, ref_end):
coverage[bp] += 1
output['distinct_covered_length'] = len(coverage)
output['coverage'] = compress_coverage(coverage)
return output
def generate_alignment_viz_json(nt_file, nt_loc_db, db_type, annotated_m8,
annotated_fasta, output_json_dir):
"""Generate alignment details from the reference sequence, m8, and
annotated fasta.
"""
# Go through annotated_fasta with a db_type (NT/NR match). Infer the
# family/genus/species info
if db_type != 'NT' and db_type != 'NR':
return
read2seq = parse_reads(annotated_fasta, db_type)
print("Read to Seq dictionary size: %d" % len(read2seq))
# Go through m8 file and infer the alignment info. Grab the fasta
# sequence, lineage info.
groups = {}
line_count = 0
nt_loc_dict = shelve.open(nt_loc_db)
with open(annotated_m8, 'r') as m8f:
for line in m8f:
line_count += 1
if line_count % 100000 == 0:
print("%d lines in the m8 file processed." % line_count)
line_columns = line.rstrip().split("\t")
read_id = line_columns[0]
seq_info = read2seq.get(read_id)
if seq_info:
accession_id = line_columns[1]
metrics = line_columns[2:]
# "ad" is short for "accession_dict" aka "accession_info"
ad = groups.get(accession_id, {'reads': []})
sequence, ad['family_id'], ad['genus_id'], ad[
'species_id'] = seq_info
ref_start = int(metrics[-4])
ref_end = int(metrics[-3])
if ref_start > ref_end: # SWAP
(ref_start, ref_end) = (ref_end, ref_start)
ref_start -= 1
prev_start = ref_start - REF_DISPLAY_RANGE
if prev_start < 0:
prev_start = 0
post_end = ref_end + REF_DISPLAY_RANGE
markers = (prev_start, ref_start, ref_end, post_end)
ad['reads'].append([read_id, sequence, metrics, markers])
ad['ref_link'] = "https://www.ncbi.nlm.nih.gov/nuccore/%s?report=fasta" % accession_id
groups[accession_id] = ad
print("%d lines in the m8 file" % line_count)
print("%d unique accession ids" % len(groups))
if nt_file.startswith("s3://"):
get_sequences_by_accession_list_from_s3(groups, nt_loc_dict, nt_file)
else:
get_sequences_by_accession_list_from_file(groups, nt_loc_dict, nt_file)
result_dict = {}
to_be_deleted = []
error_count = 0 # Cap max errors
for accession_id, ad in groups.iteritems():
ad['coverage_summary'] = calculate_alignment_coverage(ad)
# "ad" is short for "accession_dict" aka "accession_info"
for accession_id, ad in groups.iteritems():
try:
tmp_file = 'accession-%s' % accession_id
if ad['ref_seq_len'] <= MAX_SEQ_DISPLAY_SIZE and 'ref_seq' not in ad:
if ad['ref_seq_len'] == 0:
ad['ref_seq'] = "REFERENCE SEQUENCE NOT FOUND"
else:
with open(tmp_file, "rb") as tf:
ad['ref_seq'] = tf.read()
to_be_deleted.append(tmp_file)
if 'ref_seq' in ad:
ref_seq = ad['ref_seq']
for read in ad['reads']:
prev_start, ref_start, ref_end, post_end = read[3]
read[3] = [
ref_seq[prev_start:ref_start],
ref_seq[ref_start:ref_end], ref_seq[ref_end:post_end]
]
else:
# The reference sequence is too long to read entirely in RAM,
# so we only read the mapped segments.
with open(tmp_file, "rb") as tf:
for read in ad['reads']:
prev_start, ref_start, ref_end, post_end = read[3]
tf.seek(prev_start, 0)
segment = tf.read(post_end - prev_start)
read[3] = [
segment[0:(ref_start - prev_start)],
segment[(ref_start - prev_start):(
ref_end - prev_start)],
segment[(ref_end - prev_start):(
post_end - prev_start)]
]
to_be_deleted.append(tmp_file)
if ad['ref_seq_len'] > MAX_SEQ_DISPLAY_SIZE:
ad['ref_seq'] = '...Reference Seq Too Long ...'
except:
ad['ref_seq'] = "ERROR ACCESSING REFERENCE SEQUENCE FOR ACCESSION " \
"ID {}".format(accession_id)
if error_count == 0:
# Print stack trace for first error
traceback.print_exc()
error_count += 1
finally:
family_id = ad.pop('family_id')
genus_id = ad.pop('genus_id')
species_id = ad.pop('species_id')
family_dict = result_dict.get(family_id, {})
genus_dict = family_dict.get(genus_id, {})
species_dict = genus_dict.get(species_id, {})
species_dict[accession_id] = ad
genus_dict[species_id] = species_dict
family_dict[genus_id] = genus_dict
result_dict[family_id] = family_dict
if error_count > 10:
# Fail this many and the job is toast
msg = "Sorry, could not access reference sequences for over " \
"{error_count} accession IDs.".format(error_count=error_count)
raise RuntimeError(msg)
def safe_multi_delete(files):
for f in files:
try:
os.remove(f)
except:
pass
deleter_thread = threading.Thread(
target=safe_multi_delete, args=[to_be_deleted])
deleter_thread.start()
def align_viz_name(tag, lin_id):
return "%s/%s.%s.%d.align_viz.json" % (output_json_dir,
db_type.lower(), tag,
int(lin_id))
# Output JSON by species, genus, family
execute_command("mkdir -p %s" % output_json_dir)
for (family_id, family_dict) in result_dict.iteritems():
with open(align_viz_name("family", family_id), 'wb') as outjf:
json.dump(family_dict, outjf)
for (genus_id, genus_dict) in family_dict.iteritems():
with open(align_viz_name("genus", genus_id), 'wb') as outjf:
json.dump(genus_dict, outjf)
for (species_id, species_dict) in genus_dict.iteritems():
with open(align_viz_name("species", species_id),
'wb') as outjf:
json.dump(species_dict, outjf)
deleter_thread.join()
summary = "Read2Seq Size: %d, M8 lines %d, %d unique accession ids" % (
len(read2seq), line_count, len(groups))
summary_file_name = "%s.summary" % output_json_dir
with open(summary_file_name, 'w') as summary_f:
summary_f.write(summary)
return summary_file_name
def delete_many(files, semaphore=None): #pylint: disable=dangerous-default-value
try:
for f in files:
os.remove(f)
except:
with print_lock:
print("Couldn't delete some temp files. Moving on.")
finally:
if semaphore:
semaphore.release()
def get_sequences_by_accession_list_from_file(accession2seq, nt_loc_dict,
nt_file):
with open(nt_file) as ntf:
for accession_id, accession_info in accession2seq.iteritems():
(ref_seq, seq_name) = get_sequence_by_accession_id_ntf(
accession_id, nt_loc_dict, ntf)
accession_info['ref_seq'] = ref_seq
accession_info['ref_seq_len'] = len(ref_seq)
accession_info['name'] = seq_name
def get_sequences_by_accession_list_from_s3(accession_id_groups, nt_loc_dict,
nt_s3_path):
threads = []
error_flags = {}
semaphore = threading.Semaphore(64)
mutex = threading.RLock()
nt_bucket, nt_key = nt_s3_path[5:].split("/", 1)
for accession_id, accession_info in accession_id_groups.iteritems():
semaphore.acquire()
t = threading.Thread(
target=get_sequence_for_thread,
args=[
error_flags, accession_info, accession_id, nt_loc_dict,
nt_bucket, nt_key, semaphore, mutex
])
t.start()
threads.append(t)
for t in threads:
t.join()
if error_flags:
raise RuntimeError("Error in getting sequences by accession list.")
def get_sequence_for_thread(error_flags,
accession_info,
accession_id,
nt_loc_dict,
nt_bucket,
nt_key,
semaphore,
mutex,
seq_count=[0]): #pylint: disable=dangerous-default-value
try:
(ref_seq_len, seq_name) = get_sequence_by_accession_id_s3(
accession_id, nt_loc_dict, nt_bucket, nt_key)
with mutex:
accession_info['ref_seq_len'] = ref_seq_len
accession_info['name'] = seq_name
seq_count[0] += 1
if seq_count[0] % 100 == 0:
msg = "%d sequences fetched, most recently %s" % (seq_count[0],
accession_id)
print(msg)
except:
with mutex:
if not error_flags:
traceback.print_exc()
error_flags["error"] = 1
finally:
semaphore.release()
def get_sequence_by_accession_id_ntf(accession_id, nt_loc_dict, ntf):
ref_seq = ''
seq_name = ''
entry = nt_loc_dict.get(accession_id)
if entry:
range_start = entry[0]
seq_len = entry[1] + entry[2]
ntf.seek(range_start, 0)
(seq_name, ref_seq) = ntf.read(seq_len).split("\n", 1)
ref_seq = ref_seq.replace("\n", "")
seq_name = seq_name.split(" ", 1)[1]
return ref_seq, seq_name
def get_sequence_by_accession_id_s3(accession_id, nt_loc_dict, nt_bucket,
nt_key):
seq_len = 0
seq_name = ''
entry = nt_loc_dict.get(accession_id)
if not entry:
return seq_len, seq_name
(range_start, name_length, seq_len) = entry
accession_file = 'accession-%s' % accession_id
NUM_RETRIES = 3
for attempt in range(NUM_RETRIES):
try:
pipe_file = 'pipe-{attempt}-accession-{accession_id}'.format(
attempt=attempt, accession_id=accession_id)
os.mkfifo(pipe_file)
get_range = "aws s3api get-object --range bytes=%d-%d --bucket %s --key %s %s" % (
range_start, range_start + name_length + seq_len - 1,
nt_bucket, nt_key, pipe_file)
get_range_proc = subprocess.Popen(
get_range, shell=True, stdout=DEVNULL)
cmd = "cat {pipe_file} | tee >(tail -n+2 | tr -d '\n' > {accession_file}) | head -1".format(
pipe_file=pipe_file, accession_file=accession_file)
seq_name = subprocess.check_output(
cmd, executable='/bin/bash', shell=True).split(" ", 1)[1]
exitcode = get_range_proc.wait()
msg = "Error in get_sequence_by_accession_id_s3."
assert exitcode == 0, msg
seq_len = os.stat(accession_file).st_size
break
except:
if attempt + 1 < NUM_RETRIES:
time.sleep(1.0 * (4**attempt))
else:
print(
"All retries failed for get_sequence_by_accession_id_s3.")
raise
finally:
try:
os.remove(pipe_file)
except:
pass
return seq_len, seq_name
def accessionid2seq_main(arguments):
# Make work directory
dest_dir = os.path.join(DEST_DIR, "accession2seq/tmp-%d" % os.getpid())
execute_command("mkdir -p %s" % dest_dir)
s3_db_path = arguments.get('--s3_db_path')
s3_db_loc_path = arguments.get('--s3_db_loc_path')
db_type = arguments.get('--db_type')
input_fasta_s3_path = arguments.get('--input_fasta_s3_path')
input_m8_s3_path = arguments.get('--input_m8_s3_path')
output_json_s3_path = arguments.get('--output_json_s3_path').rstrip('/')
db_path = arguments.get('--local_db_path') # Try to get local file first
local_db_loc_path = os.path.join(dest_dir,
os.path.basename(s3_db_loc_path))
local_fasta_path = os.path.join(dest_dir,
os.path.basename(input_fasta_s3_path))
local_m8_path = os.path.join(dest_dir, os.path.basename(input_m8_s3_path))
local_json_path = os.path.join(dest_dir, "align_viz")
if not db_path:
db_path = s3_db_path
execute_command("aws s3 cp --quiet %s %s" % (s3_db_loc_path,
local_db_loc_path))
execute_command("aws s3 cp --quiet %s %s" % (input_fasta_s3_path,
local_fasta_path))
execute_command("aws s3 cp --quiet %s %s" % (input_m8_s3_path,
local_m8_path))
summary_file_name = generate_alignment_viz_json(
db_path, local_db_loc_path, db_type, local_m8_path, local_fasta_path,
local_json_path)
# Copy the data over
execute_command("aws s3 cp --quiet %s %s --recursive" %
(local_json_path, output_json_s3_path))
execute_command("aws s3 cp --quiet %s %s/" %
(summary_file_name, os.path.dirname(output_json_s3_path)))
# Clean up
execute_command("rm -rf %s" % dest_dir)
| StarcoderdataPython |
3229411 | from argparse import ArgumentParser
from config_parser import get_config
from utils.loss import LabelSmoothingLoss
from utils.opt import get_optimizer
from utils.scheduler import WarmUpLR, get_scheduler
from utils.trainer import train, evaluate
from utils.dataset import get_loader
from utils.misc import seed_everything, count_params, get_model, calc_step, log
import torch
from torch import nn
import numpy as np
import wandb
import os
import yaml
import random
import time
from torchsummary import summary
def training_pipeline(config):
"""Initiates and executes all the steps involved with model training.
Args:
config (dict) - Dict containing various settings for the training run.
"""
config["exp"]["save_dir"] = os.path.join(config["exp"]["exp_dir"], config["exp"]["exp_name"])
os.makedirs(config["exp"]["save_dir"], exist_ok=True)
######################################
# save hyperparameters for current run
######################################
config_str = yaml.dump(config)
print("Using settings:\n", config_str)
with open(os.path.join(config["exp"]["save_dir"], "settings.txt"), "w+") as f:
f.write(config_str)
#####################################
# initialize training items
#####################################
# data
with open(config["train_list_file"], "r") as f:
train_list = f.read().rstrip().split("\n")
with open(config["val_list_file"], "r") as f:
val_list = f.read().rstrip().split("\n")
with open(config["test_list_file"], "r") as f:
test_list = f.read().rstrip().split("\n")
trainloader = get_loader(train_list, config, train=True)
valloader = get_loader(val_list, config, train=False)
testloader = get_loader(test_list, config, train=False)
# model
model = get_model(config["hparams"]["model"])
model = model.to(config["hparams"]["device"])
print(f"Created model with {count_params(model)} parameters.")
summary(model, (1,config["hparams"]["audio"]["n_mels"],98),device='cuda')
# loss
if config["hparams"]["l_smooth"]:
criterion = LabelSmoothingLoss(num_classes=config["hparams"]["model"]["num_classes"], smoothing=config["hparams"]["l_smooth"])
else:
criterion = nn.CrossEntropyLoss()
# optimizer
optimizer = get_optimizer(model, config["hparams"]["optimizer"])
# scheduler
schedulers = {
"warmup": None,
"scheduler": None
}
if config["hparams"]["scheduler"]["n_warmup"]:
schedulers["warmup"] = WarmUpLR(optimizer, total_iters=len(trainloader) * config["hparams"]["scheduler"]["n_warmup"])
if config["hparams"]["scheduler"]["scheduler_type"] is not None:
total_iters = len(trainloader) * max(1, (config["hparams"]["scheduler"]["max_epochs"] - config["hparams"]["scheduler"]["n_warmup"]))
schedulers["scheduler"] = get_scheduler(optimizer, config["hparams"]["scheduler"]["scheduler_type"], total_iters)
#####################################
# Resume run
#####################################
if config["hparams"]["restore_ckpt"]:
ckpt = torch.load(config["hparams"]["restore_ckpt"])
config["hparams"]["start_epoch"] = ckpt["epoch"] + 1
model.load_state_dict(ckpt["model_state_dict"])
optimizer.load_state_dict(ckpt["optimizer_state_dict"])
if schedulers["scheduler"]:
schedulers["scheduler"].load_state_dict(ckpt["scheduler_state_dict"])
print(f'Restored state from {config["hparams"]["restore_ckpt"]} successfully.')
#####################################
# Training
#####################################
print("Initiating training.")
train(model, optimizer, criterion, trainloader, valloader, schedulers, config)
#####################################
# Final Test
#####################################
final_step = calc_step(config["hparams"]["n_epochs"] + 1, len(trainloader), len(trainloader) - 1)
# evaluating the final state (last.pth)
test_acc, test_loss = evaluate(model, criterion, testloader, config["hparams"]["device"])
log_dict = {
"test_loss_last": test_loss,
"test_acc_last": test_acc
}
log(log_dict, final_step, config)
# evaluating the best validation state (best.pth)
ckpt = torch.load(os.path.join(config["exp"]["save_dir"], "best.pth"))
model.load_state_dict(ckpt["model_state_dict"])
print("Best ckpt loaded.")
test_acc, test_loss = evaluate(model, criterion, testloader, config["hparams"]["device"])
log_dict = {
"test_loss_best": test_loss,
"test_acc_best": test_acc
}
log(log_dict, final_step, config)
def main(args):
config = get_config(args.conf)
seed_everything(config["hparams"]["seed"])
if config["exp"]["wandb"]:
if config["exp"]["wandb_api_key"] is not None:
with open(config["exp"]["wandb_api_key"], "r") as f:
os.environ["WANDB_API_KEY"] = f.read()
elif os.environ.get("WANDB_API_KEY", False):
print(f"Found API key from env variable.")
else:
wandb.login()
with wandb.init(project=config["exp"]["proj_name"], name=config["exp"]["exp_name"], config=config["hparams"]):
training_pipeline(config)
else:
training_pipeline(config)
if __name__ == "__main__":
parser = ArgumentParser("Driver code.")
parser.add_argument("--conf", type=str, required=True, help="Path to config.yaml file.")
args = parser.parse_args()
main(args)
| StarcoderdataPython |
4826492 | <filename>postgresqleu/confsponsor/migrations/0009_vat_allow_null.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-06-27 12:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('confsponsor', '0008_class_params_json'),
]
operations = [
migrations.AlterField(
model_name='sponsor',
name='vatnumber',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='VAT number'),
),
migrations.AlterField(
model_name='sponsor',
name='vatstatus',
field=models.IntegerField(choices=[(0, 'Company is from inside EU and has VAT number'), (1, 'Company is from inside EU, but does not have VAT number'), (2, 'Company is from outside EU')], null=True, verbose_name='VAT status'),
),
]
| StarcoderdataPython |
140705 | from __future__ import annotations
import io
import tempfile
import typing
import contextlib
import apicall.config as config
import apicall.arguments as arg
class StartCondition(typing.NamedTuple):
""" コマンドの実行開始時の条件 """
args: typing.List[str]
config: typing.Optional[config.Config]
def parse(self) -> ParsedCondition:
result = arg.parse(self.args[0], self.args[1:])
ca = arg.CommandArgs(
ns=result.ns,
args=self.args,
conf=self.config or config.Config(),
conf_file='fake',
)
return ParsedCondition(
start_cond=self,
ca=ca,
result=result,
)
class ParsedCondition(typing.NamedTuple):
""" 引数のパース完了時点での条件 """
start_cond: StartCondition
ca: arg.CommandArgs
result: arg.ParseResult
def exec(self) -> StopCondition:
out = tempfile.TemporaryFile('w+t')
err = tempfile.TemporaryFile('w+t')
with out, err:
with contextlib.redirect_stdout(out), \
contextlib.redirect_stderr(err):
exit_code = self.fn(self.ca)
out.seek(0)
err.seek(0)
return StopCondition(
out=out.read(),
err=err.read(),
exit_code=exit_code,
)
def __getattr__(self, item):
try:
return super().__getattr__(item)
except AttributeError:
return getattr(self.start_cond, item)
@property
def success(self):
return self.result.success
@property
def error_message(self):
return self.result.output
@property
def fn(self):
return self.ns.fn
@property
def ns(self):
return self.ca.ns
class StopCondition(typing.NamedTuple):
""" コマンドの実行終了後の条件 """
out: str
err: str
exit_code: arg.ExitCode
| StarcoderdataPython |
3237463 | <filename>sa/profiles/Juniper/JUNOS/get_arp.py
# ---------------------------------------------------------------------
# Juniper.JUNOS.get_arp
# ---------------------------------------------------------------------
# Copyright (C) 2007-2016 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetarp import IGetARP
class Script(BaseScript):
name = "Juniper.JUNOS.get_arp"
interface = IGetARP
rx_line = re.compile(
r"^(?P<mac>[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:[0-9a-f]{2}:"
r"[0-9a-f]{2}:[0-9a-f]{2})\s+"
r"(?P<ip>\d+\.\d+\.\d+\.\d+)\s+"
r"(?P<interface>\S+)"
)
def execute(self, vrf=None):
if not vrf:
vrf = "default"
return self.cli("show arp no-resolve vpn %s | except demux" % vrf, list_re=self.rx_line)
| StarcoderdataPython |
57661 | <gh_stars>1-10
#!/usr/bin/env python
import json
from auth0_client.Auth0Client import Auth0Client
from auth0_client.menu.menu_helper.common import *
from auth0_client.menu.menu_helper.pretty import *
try:
enrollments = {}
client = Auth0Client(auth_config())
types = ['totp','sms','push','email','recovery-code']
for my_type in types:
print(my_type)
results = client.get_a_guardian_enrollment(id=my_type)
print(pretty(results))
else:
print('No users')
except (KeyboardInterrupt, SystemExit):
sys.exit()
| StarcoderdataPython |
3221086 | <reponame>jonohart/voltha
#!/usr/bin/env python
"""
A simple process to read time-series samples from a kafka topic and shove
the data into graphite/carbon as pickled input.
The code is based on a github/gist by phobos182
(https://gist.github.com/phobos182/3931936).
As all GitHib gists, it is covered by the MIT license.
"""
from optparse import OptionParser
import simplejson
import structlog
from kafka import KafkaConsumer
import pickle
import struct
import socket
import sys
import time
from kafka.consumer.fetcher import ConsumerRecord
from kafka.errors import KafkaError
from common.utils.consulhelpers import get_endpoint_from_consul
log = structlog.get_logger()
class Graphite:
def __init__(self, host='localhost', port=2004, retry=5, delay=3,
backoff=2, timeout=10):
self.host = host
self.port = port
self.retry = retry
self.delay = delay
self.backoff = backoff
self.timeout = timeout
# Create initial socket
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.settimeout(self.timeout)
# Initiate connection
self.connect()
def _backoff(self, retry, delay, backoff):
"""Exponential backoff."""
retry -= 1
if retry == 0:
raise Exception('Timeout')
time.sleep(delay)
delay *= backoff
return retry, delay, backoff
def _retry(self, exception, func, *args):
"""
Retry calling the func catching a tuple of exceptions with backoff.
"""
retry = self.retry
delay = self.delay
backoff = self.backoff
while retry > 0:
try:
return func(*args)
except exception, e:
retry, delay, backoff = self._backoff(retry, delay, backoff)
def connect(self):
"""Connect to graphite."""
retry = self.retry
backoff = self.backoff
delay = self.delay
while retry > 0:
try:
# Attempt to connect to Graphite, break if success
self.conn.connect((self.host, self.port))
break
except socket.error, e:
# Ditch this socket. Create a new one
self.conn.close()
self.conn.connect()
retry, delay, backoff = self._backoff(retry, delay, backoff)
def close(self):
"""Close connection go Graphite."""
self.conn.close()
def send(self, data, retry=3):
"""Send data to graphite."""
retry = self.retry
backoff = self.backoff
delay = self.delay
# Attempt to send any data in the queue
while retry > 0:
# Check socket
if not self.conn:
# Attempt to restablish connection
self.close()
self.connect()
retry, delay, backoff = self._backoff(retry, delay, backoff)
continue
try:
# Send data to socket
self.conn.sendall(data)
break
except socket.error, e:
self.close()
self.connect()
retry, delay, backoff = self._backoff(retry, delay, backoff)
continue
def _pickle(batch):
"""Pickle metrics into graphite format."""
payload = pickle.dumps(batch)
header = struct.pack("!L", len(payload))
message = header + payload
return message
def _convert(msg):
"""Convert a graphite key value string to pickle."""
def extract_slice(ts, prefixes):
for object_path, metrics in prefixes.iteritems():
for metric_name, value in metrics['metrics'].iteritems():
path = '.'.join((object_path, metric_name))
yield (path, ts, value)
assert isinstance(msg, dict)
type = msg.get('type')
if type == 'slice':
extractor, kw = extract_slice, dict(ts=msg['ts'],
prefixes=msg['prefixes'])
else:
raise Exception('Unknown format')
batch = []
for path, timestamp, value in extractor(**kw):
batch.append((path, (timestamp, value)))
return batch
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-K", "--kafka", dest="kafka",
default="localhost:9092", help="Kafka bootstrap server")
parser.add_option("-c", "--consul", dest="consul",
default="localhost:8500",
help="Consul server (needed if kafak server is specifed"
"with '@kafka' value)")
parser.add_option("-t", "--topic", dest="topic", help="Kafka topic")
parser.add_option("-H", "--host", dest="graphite_host",
default="localhost", help="Graphite host")
parser.add_option("-p", "--port", dest="graphite_port", type=int,
default=2004, help="Graphite port")
(options, args) = parser.parse_args()
# Assign OptParse variables
kafka = options.kafka
consul = options.consul
topic = options.topic
host = options.graphite_host
port = options.graphite_port
# Connect to Graphite
try:
graphite = Graphite(host, port)
except socket.error, e:
print "Could not connect to graphite host %s:%s" % (host, port)
sys.exit(1)
except socket.gaierror, e:
print "Invalid hostname for graphite host %s" % (host)
sys.exit(1)
log.info('Connected to graphite at {}:{}'.format(host, port))
# Resolve Kafka value if it is based on consul lookup
if kafka.startswith('@'):
kafka = get_endpoint_from_consul(consul, kafka[1:])
# Connect to Kafka
try:
log.info('connect-to-kafka', kafka=kafka)
consumer = KafkaConsumer(topic, bootstrap_servers=kafka)
except KafkaError, e:
log.error('failed-to-connect-to-kafka', kafka=kafka, e=e)
sys.exit(1)
# Consume Kafka topic
log.info('start-loop', topic=topic)
for record in consumer:
assert isinstance(record, ConsumerRecord)
msg = record.value
try:
batch = _convert(simplejson.loads(msg))
except Exception, e:
log.warn('unknown-format', msg=msg)
continue
pickled = _pickle(batch)
graphite.send(pickled)
log.debug('sent', batch_len=len(batch))
log.info('exited')
| StarcoderdataPython |
195703 | from symplyphysics import (
symbols, Eq, pretty, solve, Quantity, units,
validate_input, validate_output, expr_to_quantity
)
from symplyphysics.laws.thermodynamics import pressure_from_temperature_and_volume as thermodynamics_law
# Description
## Boyle's law (Isothermal process): T = const, P1 * V1 = P2 * V2
## Where:
## P is pressure,
## V is volume,
## T is temperature
## V1 is initial volume, V2 is resulting volume
## P1 is initial pressure, P1 is resulting pressure
pressure_start, pressure_end = symbols('pressure_start pressure_end')
volume_start, volume_end = symbols('volume_start volume_end')
law = Eq(pressure_start * volume_start, pressure_end * volume_end)
## Derive the same law from the general ideal gas law
temperature_start, temperature_end = symbols('temperature_start temperature_end')
isothermal_condition = Eq(temperature_start, temperature_end)
eq_start = thermodynamics_law.law.subs({
thermodynamics_law.temperature: temperature_start,
thermodynamics_law.volume: volume_start,
thermodynamics_law.pressure: pressure_start})
eq_end = thermodynamics_law.law.subs({
thermodynamics_law.temperature: temperature_end,
thermodynamics_law.volume: volume_end,
thermodynamics_law.pressure: pressure_end})
derived_law = [eq_start, eq_end, isothermal_condition]
## Check the equivalence of 'law' and 'derived_law'
derived_pressure_end = solve(derived_law,
(temperature_start, temperature_end, pressure_end), dict=True)[0][pressure_end]
assert solve(law, pressure_end, dict=True)[0][pressure_end] == derived_pressure_end
def print():
return pretty(law, use_unicode=False)
@validate_input(pressure_start_=units.pressure, pressure_end_=units.pressure, volume_start_=units.volume)
@validate_output(units.volume)
def calculate_volume(pressure_start_: Quantity, volume_start_: Quantity, pressure_end_: Quantity) -> Quantity:
solved = solve(law, volume_end, dict=True)[0][volume_end]
result_expr = solved.subs({
pressure_start: pressure_start_,
volume_start: volume_start_,
pressure_end: pressure_end_})
return expr_to_quantity(result_expr, 'volume_end')
| StarcoderdataPython |
1725630 | #!/usr/bin/python
# coding=utf-8
import urllib2
import urllib
import json
import HTMLParser
import re
import alfred
################################################################################
def strip_html( html ):
p = re.compile( r"<.*?>" )
return p.sub( "", html )
def unescape_html( html ):
html_parser = HTMLParser.HTMLParser()
return html_parser.unescape( html )
def parse_href( html ):
link_begin = html.find( "<a href=\"http" )
if link_begin == -1:
return ""
link_end = html.find( "\"" , link_begin+12 )
if link_end == -1:
return ""
return html[link_begin+9:link_end]
################################################################################
def search_rfc( query ):
result = []
# params = { "q":query }
# response = urllib2.urlopen( "https://m.zhihu.com/search?"+urllib.urlencode(params) ).read().split( "\n" )
response = urllib2.urlopen( 'https://m.zhihu.com/search?q=%s' % query ).read()
print(response)
# response=response.decode('utf-8')
title = ""
link = ""
answers = ""
for line in response:
# <a class="question_link" target="_blank" href="/question/{id}">{title}</a>
# <a href="/question/{id}" class="answer zg-link-gray" target="_blank"><i></i>{answers}</a><a
if "js-title-link" in line:
title_begin = line.find( "\">" )
title_end = line.rfind( "</a>" )
if title_begin==-1 or title_end==-1:
continue
title = strip_html( line[title_begin+2:title_end] )
if title!="" and link!="" and answers!="":
result.append( alfred.Item( {"uid":alfred.uid(link), "arg":"http://www.zhihu.com/question/"+link},
unescape_html(unicode(title,"utf-8")), unicode(answers,"utf-8"), ("zhihu.png")) )
else:
continue
return result
################################################################################
def main():
( param, query ) = alfred.args2()
result = search_rfc( query )
alfred.write( alfred.xml(result) )
if __name__ == "__main__":
main()
| StarcoderdataPython |
1626592 | <filename>testslide/cli.py<gh_stars>0
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import re
import sys
import unittest
from contextlib import contextmanager
from dataclasses import dataclass
from time import time
from typing import Any, Callable, Iterator, List, Optional, Pattern, Type
import testslide.dsl
from . import Context, TestCase, _TestSlideTestResult
from .runner import DocumentFormatter, LongFormatter, ProgressFormatter, Runner
from .strict_mock import StrictMock
_unittest_testcase_loaded: bool = False
def _filename_to_module_name(name: str) -> str:
if not (
os.path.isfile(name)
and (name.lower().endswith(".py") or name.lower().endswith(".pyc"))
):
raise ValueError("Expected a .py file, got {}".format(name))
if os.path.isabs(name):
name = os.path.relpath(name, os.getcwd())
if name.lower().endswith(".pyc"):
end = -4
else:
end = -3
return name[:end].replace(os.path.sep, ".")
def _get_all_test_case_subclasses() -> List[TestCase]:
def get_all_subclasses(base: Type[unittest.TestCase]) -> List[TestCase]:
return list(
{ # type: ignore
"{}.{}".format(c.__module__, c.__name__): c
for c in (
base.__subclasses__() # type: ignore
+ [g for s in base.__subclasses__() for g in get_all_subclasses(s)] # type: ignore
)
}.values()
)
return get_all_subclasses(unittest.TestCase)
def _get_all_test_cases(import_module_names: List[str]) -> List[TestCase]:
if import_module_names:
return [
test_case
for test_case in _get_all_test_case_subclasses()
if test_case.__module__ in import_module_names
]
else:
return _get_all_test_case_subclasses()
def _load_unittest_test_cases(import_module_names: List[str]) -> None:
"""
Beta!
Search for all unittest.TestCase classes that have tests defined, and import them
as TestSlide contexts and examples. This is useful if you mix unittest.TestCase
tests and TestSlide at the same file, or if you want to just use TestSlide's test
runner for existing unittest.TestCase tests.
"""
global _unittest_testcase_loaded
if _unittest_testcase_loaded:
return
_unittest_testcase_loaded = True
for test_case in _get_all_test_cases(import_module_names):
test_method_names = [
test_method_name
for test_method_name in dir(test_case)
if test_method_name.startswith("test")
or test_method_name.startswith("ftest")
or test_method_name.startswith("xtest")
# FIXME: debug why ismethod is not properly filtering methods. Using
# callabdle as a workaround.
# if inspect.ismethod(getattr(test_case, test_method_name))
if callable(getattr(test_case, test_method_name))
]
if not test_method_names:
continue
# This extra method is needed so context_code is evaluated with different
# values of test_case.
def get_context_code(
test_case: unittest.TestCase,
) -> Callable[[testslide.dsl._DSLContext], None]:
def context_code(context: testslide.dsl._DSLContext) -> None:
for test_method_name in test_method_names:
@contextmanager
def test_result() -> Iterator[_TestSlideTestResult]:
result = _TestSlideTestResult()
yield result
result.aggregated_exceptions.raise_correct_exception()
@contextmanager
def setup_and_teardown() -> Iterator[None]:
test_case.setUpClass()
yield
test_case.tearDownClass()
# Same trick as above.
def gen_example_code(test_method_name: str) -> Callable:
def example_code(self: Any) -> None:
with test_result() as result:
with setup_and_teardown():
test_case(methodName=test_method_name)( # type: ignore
result=result
)
return example_code
# Regular example
if test_method_name.startswith("test"):
context.example(test_method_name)(
gen_example_code(test_method_name)
)
# Focused example
if test_method_name.startswith("ftest"):
context.fexample(test_method_name)(
gen_example_code(test_method_name)
)
# Skipped example
if test_method_name.startswith("xtest"):
context.xexample(test_method_name)(
gen_example_code(test_method_name)
)
return context_code
testslide.dsl.context("{}.{}".format(test_case.__module__, test_case.__name__))( # type: ignore
get_context_code(test_case)
)
@dataclass(frozen=True)
class _Config(object):
import_module_names: List[str]
shuffle: bool
list: bool
quiet: bool
fail_if_focused: bool
fail_fast: bool
focus: bool
trim_path_prefix: str
format: str
seed: Optional[int] = None
force_color: Optional[bool] = False
show_testslide_stack_trace: Optional[bool] = False
names_text_filter: Optional[str] = None
names_regex_filter: Optional[Pattern[Any]] = None
names_regex_exclude: Optional[Pattern[Any]] = None
dsl_debug: Optional[bool] = False
profile_threshold_ms: Optional[int] = None
class Cli(object):
FORMAT_NAME_TO_FORMATTER_CLASS = {
"p": ProgressFormatter,
"progress": ProgressFormatter,
"d": DocumentFormatter,
"documentation": DocumentFormatter,
"l": LongFormatter,
"long": LongFormatter,
}
@staticmethod
def _regex_type(string: str) -> Pattern:
return re.compile(string)
def _build_parser(self, disable_test_files: bool) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="TestSlide")
parser.add_argument(
"-f",
"--format",
choices=self.FORMAT_NAME_TO_FORMATTER_CLASS.keys(),
default="documentation",
help="Configure output format. Default: %(default)s",
)
parser.add_argument(
"--force-color",
action="store_true",
help="Force color output even without a terminal",
)
parser.add_argument(
"--shuffle", action="store_true", help="Randomize example execution order"
)
parser.add_argument(
"-l", "--list", action="store_true", help="List all tests one per line"
)
parser.add_argument(
"--seed",
nargs=1,
type=int,
help="Positive number to seed shuffled examples",
)
parser.add_argument(
"--focus",
action="store_true",
help="Only executed focused examples, or all if none focused",
)
parser.add_argument(
"--fail-if-focused",
action="store_true",
help="Raise an error if an example is focused. Useful when running tests in a continuous integration environment.",
)
parser.add_argument(
"--fail-fast",
action="store_true",
help="Stop execution when an example fails",
)
parser.add_argument(
"--filter-text",
nargs=1,
type=str,
help="Only execute examples that include given text in their names",
)
parser.add_argument(
"--filter-regex",
nargs=1,
type=self._regex_type,
help="Only execute examples which match given regex",
)
parser.add_argument(
"--exclude-regex",
nargs=1,
type=self._regex_type,
help="Exclude examples which match given regex from being executed",
)
parser.add_argument(
"--quiet",
action="store_true",
help="Suppress output (stdout and stderr) of tested code",
)
parser.add_argument(
"--dsl-debug",
action="store_true",
help=(
"Print debugging information during execution of TestSlide's "
"DSL tests."
),
)
parser.add_argument(
"--trim-path-prefix",
nargs=1,
type=str,
default=[self._default_trim_path_prefix],
help=(
"Remove the specified prefix from paths in some of the output. "
"Default: {}".format(repr(self._default_trim_path_prefix))
),
)
parser.add_argument(
"--show-testslide-stack-trace",
default=False,
action="store_true",
help=(
"TestSlide's own code is trimmed from stack traces by default. "
"This flags disables that, useful for TestSlide's own development."
),
)
parser.add_argument(
"--import-profiler",
nargs=1,
type=int,
default=None,
help=(
"Print profiling information slow import time for modules that took "
"more than the given number of ms to import. Experimental."
),
)
if not disable_test_files:
parser.add_argument(
"test_files",
nargs="+",
type=str,
default=[],
help=(
"List of file paths that contain either unittes.TestCase "
"tests and/or TestSlide's DSL tests."
),
)
return parser
def __init__(
self,
args: Any,
default_trim_path_prefix: Optional[str] = None,
modules: Optional[List[str]] = None,
) -> None:
self.args = args
self._default_trim_path_prefix = (
default_trim_path_prefix
if default_trim_path_prefix
else os.getcwd() + os.sep
)
self.parser = self._build_parser(disable_test_files=bool(modules))
self._modules = modules
@staticmethod
def _do_imports(
import_module_names: List[str], profile_threshold_ms: Optional[int] = None
) -> float:
def import_all() -> None:
for module_name in import_module_names:
__import__(module_name, level=0)
if profile_threshold_ms is not None:
from testslide.import_profiler import ImportProfiler
with ImportProfiler() as import_profiler:
start_time = time()
import_all()
end_time = time()
import_profiler.print_stats(profile_threshold_ms)
else:
start_time = time()
import_all()
end_time = time()
return end_time - start_time
def _load_all_examples(self, import_module_names: List[str]) -> float:
"""
Import all required modules.
"""
import_secs = self._do_imports(import_module_names)
_load_unittest_test_cases(import_module_names)
return import_secs
def _get_config_from_parsed_args(self, parsed_args: Any) -> _Config:
config = _Config(
format=parsed_args.format,
force_color=parsed_args.force_color,
trim_path_prefix=parsed_args.trim_path_prefix[0],
show_testslide_stack_trace=parsed_args.show_testslide_stack_trace,
profile_threshold_ms=parsed_args.import_profiler[0]
if parsed_args.import_profiler
else None,
shuffle=parsed_args.shuffle,
list=parsed_args.list,
seed=parsed_args.seed[0] if parsed_args.seed else None,
focus=parsed_args.focus,
fail_if_focused=parsed_args.fail_if_focused,
fail_fast=parsed_args.fail_fast,
names_text_filter=parsed_args.filter_text[0]
if parsed_args.filter_text
else None,
names_regex_filter=parsed_args.filter_regex[0]
if parsed_args.filter_regex
else None,
names_regex_exclude=parsed_args.exclude_regex[0]
if parsed_args.exclude_regex
else None,
quiet=parsed_args.quiet,
dsl_debug=parsed_args.dsl_debug,
import_module_names=self._modules
if self._modules
else [
_filename_to_module_name(test_file)
for test_file in parsed_args.test_files
],
)
return config
def run(self) -> int:
try:
parsed_args = self.parser.parse_args(self.args)
except SystemExit as e:
return e.code
config = self._get_config_from_parsed_args(parsed_args)
if config.profile_threshold_ms is not None:
import_secs = self._do_imports(
config.import_module_names, config.profile_threshold_ms
)
return 0
else:
import_secs = self._load_all_examples(config.import_module_names)
formatter = self.FORMAT_NAME_TO_FORMATTER_CLASS[config.format](
import_module_names=config.import_module_names,
force_color=config.force_color,
import_secs=import_secs,
trim_path_prefix=config.trim_path_prefix,
show_testslide_stack_trace=config.show_testslide_stack_trace,
dsl_debug=config.dsl_debug,
)
StrictMock.TRIM_PATH_PREFIX = config.trim_path_prefix
if config.list:
formatter.discovery_start()
for context in Context.all_top_level_contexts:
for example in context.all_examples:
formatter.example_discovered(example)
formatter.discovery_finish()
return 0
else:
return Runner(
contexts=Context.all_top_level_contexts,
formatter=formatter,
shuffle=config.shuffle,
seed=config.seed,
focus=config.focus,
fail_fast=config.fail_fast,
fail_if_focused=config.fail_if_focused,
names_text_filter=config.names_text_filter,
names_regex_filter=config.names_regex_filter,
names_regex_exclude=config.names_regex_exclude,
quiet=config.quiet,
).run()
def main() -> None:
if "" not in sys.path:
sys.path.insert(0, "")
try:
sys.exit(Cli(sys.argv[1:]).run())
except KeyboardInterrupt:
print("SIGINT received, exiting.", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
| StarcoderdataPython |
152943 | <filename>codes/utils/evaluate.py
# Evaluate a saved model with respect to testing data
import torch
from torch.autograd import Variable
import numpy as np
import json
import pandas as pd
import argparse
from codes.models import decoders
from codes.utils import data as data_utils
from codes.utils import constants
import pdb
from tqdm import tqdm
import pickle as pkl
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
from os.path import dirname, abspath
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
def get_args():
## arguments
parser = argparse.ArgumentParser()
parser.add_argument("-e","--exp", type=str, help="experiment to load", default="wiki_normalize_experiment2018-04-21_18:43:17")
parser.add_argument("-m","--model", type=str, help="model to load", default="model_epoch_0_step_0.mod")
parser.add_argument("-f","--file", type=str, help="testing file to load", default="full_docs_2_test.csv")
parser.add_argument("-o","--output", type=str, help="file to write the output", default="output.csv")
parser.add_argument("-c","--confidence", type=float, help="confidence to measure pruned accuracy", default=0.0)
parser.add_argument("-n","--num", type=int, help="number of evals (-1 for all)", default=-1)
args = parser.parse_args()
return args
def predict(test_df, row_id, trainer, src_text, src_len, labels, data, mode='overall'):
overall = True
if mode != 'overall':
overall = False
loss, accs, attns, preds, correct, correct_confs, incorrect_confs, probs = trainer.batchNLLLoss(
src_text, src_len, labels,
mode='infer', overall=overall)
# print(preds)
# print(correct_confs)
# print(incorrect_confs)
preds = [str(data.y_id2class['l' + str(indx + 1)][int(data.id2label[p[0]].split('_')[1])]) for indx, p in
enumerate(preds)]
for idx, pred in enumerate(preds):
test_df.at[row_id, 'pred_{}_{}'.format(mode, idx)] = pred
return test_df, attns, probs
def calculate_metrics(layers, test_file, mode='overall'):
print("Calculating metrics for mode : {}".format(mode))
print("------------------------------------------------")
for layer in range(layers):
test_file['l{}'.format(layer+1)] = test_file['l{}'.format(layer+1)].astype(str)
test_file['pred_{}_{}'.format(mode, layer)] = test_file['pred_{}_{}'.format(mode,layer)].astype(str)
acc = np.mean(test_file['l{}'.format(layer+1)] == test_file['pred_{}_{}'.format(mode, layer)])
sk_acc = accuracy_score(test_file['l{}'.format(layer+1)], test_file['pred_{}_{}'.format(mode, layer)])
sk_rec = recall_score(test_file['l{}'.format(layer+1)], test_file['pred_{}_{}'.format(mode, layer)], average='macro')
sk_f1 = f1_score(test_file['l{}'.format(layer+1)], test_file['pred_{}_{}'.format(mode, layer)], average='macro')
sk_precision = precision_score(test_file['l{}'.format(layer+1)], test_file['pred_{}_{}'.format(mode, layer)], average='macro')
print("Layer {} Metrics".format(layer+1))
print("Acc {}, Sk_accuracy {}, Recall {}, F1 Score {}, Precision {}".format(acc, sk_acc, sk_rec, sk_f1, sk_precision))
print('================================================')
def evaluate_test(trainer, data, test_file_loc, output_file_loc, model_params, total=-1):
"""
Evaluate and print metrics
:param model: Trainer (use trainer.model.eval() to disable dropout / batchnorm)
:param test_file_loc: testing file
:param output_file_loc: output file
:param model_params: params
:param layers: default 3
:return: None
"""
layers = model_params['levels']
test_file = pd.read_csv('../../data/' + test_file_loc)
for i in range(model_params['levels']):
test_file['pred_{}'.format(i)] = ''
test_file['attn_{}'.format(i)] = ''
test_file['recon_text'] = ''
test_docs = []
logging.info("Starting prediction ...")
if total == -1:
total = len(test_file)
pb = tqdm(total=total)
# pdb.set_trace()
ct = 0
attentions = []
probabilities = []
for i,row in test_file.iterrows():
text = row['text']
text = text.lower()
text = data.tokenize(text)
text = [data.word2id[w] if w in data.word2id else data.word2id[constants.UNK_WORD] for w in text]
recon_text = [data.id2word[str(w)] for w in text]
#print(text)
#print(recon_text)
text_len = len(text)
src_text = Variable(torch.LongTensor([text]), volatile=True)
src_len = [text_len]
labels = [0]
#pdb.set_trace()
labels.extend([data.label2id['l{}_{}'.format(l,data.y_class2id['l'+str(l+1)][str(row['l{}'.format(l+1)])])]
for l in range(model_params['levels'])])
labels = Variable(torch.LongTensor([labels]), volatile=True)
if model_params['use_gpu']:
src_text = src_text.cuda()
labels = labels.cuda()
renormalize = 'level'
if 'renormalize' in model_params:
renormalize = model_params['renormalize']
test_file, attns_overall, probs_overall = predict(test_file, i, trainer, src_text, src_len, labels, data, mode='overall')
test_file, attns_exact, probs_exact = predict(test_file, i, trainer, src_text, src_len, labels, data, mode='exact')
test_file.at[i, 'recon_text'] = ' '.join(recon_text)
## Store attentions
#row_attentions = []
#pdb.set_trace()
#for idx, attn in enumerate(attns):
# if type(attn) == list:
# attn = np.array([a.data.cpu().numpy() for a in attn])
# attn = np.squeeze(attn, axis=0)
# elif type(attn) == Variable:
# attn = attn.data.cpu().numpy()
# attn = np.squeeze(attn, axis=1)
# row_attentions.append(attn)
pb.update(1)
attentions.append([attns_overall, attns_exact])
probabilities.append(probs_overall)
ct +=1
if ct == total:
break
pb.close()
# Calculate Metrics
calculate_metrics(layers, test_file, mode='overall')
calculate_metrics(layers, test_file, mode='exact')
## store category embeddings
"""
cat_emb = {}
for cat_indx in range(sum(model_params['label_sizes']) + 1):
cat_inp = Variable(torch.LongTensor([cat_indx]), volatile=True).cuda()
cat_emb[cat_indx] = model.category_embedding(cat_inp).data.cpu().numpy()
del cat_inp
pkl.dump(cat_emb, open(output_file_loc + '_category_emb.pkl','wb'))
pkl.dump(model.taxonomy, open(output_file_loc + '_taxonomy.pkl','wb'))
"""
logging.info("Done predicting. Saving file.")
test_file.to_csv(output_file_loc)
pkl.dump(attentions, open(output_file_loc + '_attentions.pkl', 'wb'))
pkl.dump(probabilities, open(output_file_loc + '_probs.pkl','wb'))
if __name__ == '__main__':
## loading the model
args = get_args()
logging.info("Loading the model")
model_params = json.load(open('../../saved/'+args.exp + '/parameters.json','r'))
## load embeddings if any
if model_params['use_embedding']:
parent_dir = dirname(dirname(dirname(abspath(__file__))))
save_path_base = parent_dir + '/data/' + model_params['data_path']
model_params['embedding'] = torch.load(open(save_path_base + model_params['embedding_saved'], 'rb'))
if model_params['model_type'] == 'attentive':
model = decoders.AttentiveHierarchicalClassifier(**model_params)
elif model_params['model_type'] == 'pooling':
model = decoders.PooledHierarchicalClassifier(**model_params)
#print(model_params)
# Load model
model.load_state_dict(torch.load('../../saved/'+args.exp + '/' + args.model))
logging.info("Loaded model")
if model_params['use_gpu']:
model = model.cuda()
## prepare the data
logging.info("Loading the data")
data = data_utils.Data_Utility(
data_path=model_params['data_path'],
train_test_split=model_params['train_test_split'],
max_vocab=model_params['max_vocab'],
max_word_doc=model_params['max_word_doc'],
level = model_params['level'],
decoder_ready=model_params['decoder_ready'],
tokenization=model_params['tokenization'],
clean=model_params['clean']
)
data.load(model_params['data_type'], model_params['data_loc'], model_params['file_name'])
model.taxonomy = data.taxonomy
model_params['taxonomy'] = data.taxonomy
trainer = decoders.Trainer(model=model, **model_params)
trainer.model.eval()
evaluate_test(trainer, data, args.file, args.output, model_params)
| StarcoderdataPython |
3326850 | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import random
import string
from azure.identity import DefaultAzureCredential
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.monitor import MonitorClient
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.resource import ResourceManagementClient
def main():
SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None)
GROUP_NAME = "testgroupx"
METRIC_ALERT_NAME = "metricnamexx"
VM_NAME = "vm_name"
NETWORK_NAME = "networkxx"
SUBNET_NAME = "subnetx"
INTERFACE_NAME = "interfacexx"
your_password = '<PASSWORD>' + ''.join(random.choice(string.ascii_lowercase) for i in range(8))
# Create client
# For other authentication approaches, please see: https://pypi.org/project/azure-identity/
resource_client = ResourceManagementClient(
credential=DefaultAzureCredential(),
subscription_id=SUBSCRIPTION_ID
)
compute_client = ComputeManagementClient(
credential=DefaultAzureCredential(),
subscription_id=SUBSCRIPTION_ID
)
network_client = NetworkManagementClient(
credential=DefaultAzureCredential(),
subscription_id=SUBSCRIPTION_ID
)
monitor_client = MonitorClient(
credential=DefaultAzureCredential(),
subscription_id=SUBSCRIPTION_ID
)
# Create resource group
resource_client.resource_groups.create_or_update(
GROUP_NAME,
{"location": "eastus"}
)
# Create virtual network
vnet = network_client.virtual_networks.begin_create_or_update(
GROUP_NAME,
NETWORK_NAME,
{
'location': "eastus",
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
}
).result()
subnet = network_client.subnets.begin_create_or_update(
GROUP_NAME,
NETWORK_NAME,
SUBNET_NAME,
{'address_prefix': '10.0.0.0/24'}
).result()
# Create network interface
interface = network_client.network_interfaces.begin_create_or_update(
GROUP_NAME,
INTERFACE_NAME,
{
'location': "eastus",
'ip_configurations': [{
'name': 'MyIpConfig',
'subnet': {
'id': subnet.id
}
}]
}
).result()
# Create vm
vm = compute_client.virtual_machines.begin_create_or_update(
GROUP_NAME,
VM_NAME,
{
"location": "eastus",
"hardware_profile": {
"vm_size": "Standard_D2_v2"
},
"storage_profile": {
"image_reference": {
"sku": "2016-Datacenter",
"publisher": "MicrosoftWindowsServer",
"version": "latest",
"offer": "WindowsServer"
},
"os_disk": {
"caching": "ReadWrite",
"managed_disk": {
"storage_account_type": "Standard_LRS"
},
"name": "myVMosdisk",
"create_option": "FromImage"
},
"data_disks": [
{
"disk_size_gb": "1023",
"create_option": "Empty",
"lun": "0"
},
{
"disk_size_gb": "1023",
"create_option": "Empty",
"lun": "1"
}
]
},
"os_profile": {
"admin_username": "testuser",
"computer_name": "myVM",
"admin_password": <PASSWORD>,
"windows_configuration": {
"enable_automatic_updates": True # need automatic update for reimage
}
},
"network_profile": {
"network_interfaces": [
{
"id": interface.id,
"properties": {
"primary": True
}
}
]
}
}
).result()
RESOURCE_URI = vm.id
# Create metric alert
metric_alert = monitor_client.metric_alerts.create_or_update(
GROUP_NAME,
METRIC_ALERT_NAME,
{
"location": "global",
"description": "This is the description of the rule1",
"severity": "3",
"enabled": True,
"scopes": [
RESOURCE_URI
],
"evaluation_frequency": "PT1M",
"window_size": "PT15M",
"target_resource_type": "Microsoft.Compute/virtualMachines",
"target_resource_region": "southcentralus",
"criteria": {
"odata.type": "Microsoft.Azure.Monitor.MultipleResourceMultipleMetricCriteria",
"all_of": [
{
"criterion_type": "DynamicThresholdCriterion",
"name": "High_CPU_80",
"metric_name": "Percentage CPU",
"metric_namespace": "microsoft.compute/virtualmachines",
"operator": "GreaterOrLessThan",
"time_aggregation": "Average",
"dimensions": [],
"alert_sensitivity": "Medium",
"failing_periods": {
"number_of_evaluation_periods": "4",
"min_failing_periods_to_alert": "4"
},
}
]
},
"auto_mitigate": False,
"actions": [
]
}
)
print("Create metric alert:\n{}".format(metric_alert))
# Get metric alert
metric_alert = monitor_client.metric_alerts.get(
GROUP_NAME,
METRIC_ALERT_NAME
)
print("Get metric alert:\n{}".format(metric_alert))
# Delete metric alert
monitor_client.metric_alerts.delete(
GROUP_NAME,
METRIC_ALERT_NAME
)
print("Delete metric alert.\n")
# Delete Group
resource_client.resource_groups.begin_delete(
GROUP_NAME
).result()
if __name__ == "__main__":
main()
| StarcoderdataPython |
3391306 | import copy
from misc import *
from path import *
from draw import *
from mode import Mode
from knob import DraggableKnob
from nanogui import *
class SpecularManifoldSamplingMode(Mode):
def __init__(self, viewer):
super().__init__(viewer)
self.seed_path = None
self.solution_path = None
self.intermediate_paths = None
self.sms_mode = False
self.seed_paths = []
self.solution_paths = []
self.rough_mode = False
self.constraint_type = ConstraintType.HalfVector
self.strategy_type = StrategyType.SMS
self.dragging_start = False
self.dragging_end = False
self.dragging_spec = False
self.knob_start = DraggableKnob()
self.knob_end = DraggableKnob()
self.knob_spec = DraggableKnob()
self.n_bounces_box = None
self.animating = False
self.time = 0.0
self.scene = None
def enter(self, last):
super().enter(last)
if self.n_bounces_box:
self.n_bounces_box.set_value(self.scene.n_bounces_default)
def scene_changed(self):
scene = self.viewer.scenes[self.viewer.scene_idx]
self.n_bounces_box.set_value(scene.n_bounces_default)
def update(self, input, scene):
super().update(input, scene)
self.scene = scene
if self.animating:
self.time += 0.01
time_p = 0.5 + 0.5*np.sin(self.time)
scene.spec_u_current = lerp(time_p, 0.1, 0.9)
# Set positions and update for all three knobs
p_start = scene.sample_start_position(scene.start_u_current).p
p_end = scene.sample_end_position(scene.end_u_current).p
p_spec = scene.sample_spec_position(scene.spec_u_current).p
self.knob_start.p = copy.copy(p_start)
self.knob_end.p = copy.copy(p_end)
self.knob_spec.p = copy.copy(p_spec)
self.knob_start.update(input)
if input.click and (self.knob_start.drag_possible or self.dragging_start):
self.dragging_start = True
p_start += input.mouse_dp
else:
self.dragging_start = False
u_proj = scene.start_shape().project(p_start)
scene.start_u_current = u_proj
self.knob_end.update(input)
if input.click and (self.knob_end.drag_possible or self.dragging_end):
self.dragging_end = True
p_end += input.mouse_dp
else:
self.dragging_end = False
u_proj = scene.end_shape().project(p_end)
scene.end_u_current = u_proj
self.knob_spec.active = self.strategy_type == StrategyType.SMS and not self.sms_mode and not self.rough_mode
self.knob_spec.update(input)
if input.click and (self.knob_spec.drag_possible or self.dragging_spec):
self.dragging_spec = True
p_spec += input.mouse_dp
else:
self.dragging_spec = False
u_proj = scene.first_specular_shape().project(p_spec)
scene.spec_u_current = u_proj
if not self.sms_mode and not self.rough_mode:
if self.strategy_type == StrategyType.MNEE:
self.seed_path = scene.sample_mnee_seed_path()
else:
self.seed_path = scene.sample_seed_path(self.n_bounces_box.value())
if self.seed_path.has_specular_segment():
self.solution_path, self.intermediate_paths = self.newton_solver(scene, self.seed_path)
def newton_solver(self, scene, seed_path):
current_path = seed_path.copy()
intermediate_paths = [current_path]
i = 0
beta = 1.0
N = self.max_steps()
threshold = self.eps_threshold()
success = False
while True:
# Give up after too many iterations
if i >= N:
break
# Compute tangents and constraints
current_path.compute_tangent_derivatives(self.constraint_type)
if current_path.singular:
break
# Check for success
converged = True
for vtx in current_path:
if vtx.shape.type == Shape.Type.Reflection or vtx.shape.type == Shape.Type.Refraction:
if abs(vtx.C) > threshold:
converged = False
break
if converged:
success = True
break
proposed_offsets = current_path.copy_positions()
for k, vtx in enumerate(current_path):
if vtx.shape.type == Shape.Type.Reflection or vtx.shape.type == Shape.Type.Refraction:
proposed_offsets[k] -= self.step_size_scale()*beta * vtx.dp_du * vtx.dX
# Ray trace to re-project onto specular manifold
proposed_path = scene.reproject_path_sms(proposed_offsets, current_path, self.n_bounces_box.value())
if not current_path.same_submanifold(proposed_path):
beta = 0.5 * beta
else:
beta = min(1.0, 2*beta)
current_path = proposed_path
intermediate_paths.append(current_path)
i = i + 1
if success:
p_last = current_path[-1].p
p_spec = current_path[-2].p
d = p_spec - p_last
d_norm = norm(d)
d /= d_norm
ray = Ray2f(p_last, d, 1e-4, d_norm)
it = scene.ray_intersect(ray)
if it.is_valid():
success = False
if success:
solution_path = current_path
else:
solution_path = None
return solution_path, intermediate_paths
def draw(self, ctx, scene):
super().draw(ctx, scene)
s = scene.scale
show_seed_paths = self.show_seed_paths_chb.checked()
show_intermediate_steps = self.show_intermediate_steps_chb.checked()
if self.sms_mode:
if show_seed_paths:
for seed_path in self.seed_paths:
draw_dotted_path_lines(ctx, seed_path, 0.6*s, spacing=0.02)
for solution in self.solution_paths:
draw_path_lines(ctx, solution, '', s)
elif self.rough_mode:
draw_dotted_path_lines(ctx, self.seed_path, 0.6*s, spacing=0.02)
for solution in self.solution_paths:
draw_intermediate_path_lines(ctx, solution, nvg.RGB(80, 80, 80), s)
if self.solution_path:
draw_intermediate_path_lines(ctx, self.solution_path, nvg.RGB(255, 0, 0), s)
elif not show_intermediate_steps:
draw_dotted_path_lines(ctx, self.seed_path, s, spacing=0.02)
if self.solution_path:
draw_path_lines(ctx, self.solution_path, '', s)
else:
N = len(self.intermediate_paths)
g = np.linspace(0, 255, N)
r = np.linspace(255, 0, N)
for k, path in enumerate(self.intermediate_paths):
draw_intermediate_path_lines(ctx, path, nvg.RGB(int(r[k]), int(g[k]), 0), s)
scene.draw(ctx)
if self.sms_mode or self.rough_mode:
pass
elif self.show_constraint_chb.checked():
for k, vtx in enumerate(self.seed_path):
if vtx.shape.type == Shape.Type.Reflection or vtx.shape.type == Shape.Type.Refraction:
vtx_prev = self.seed_path[k-1]
vtx_next = self.seed_path[k+1]
wo = normalize(vtx_next.p - vtx.p)
wi = normalize(vtx_prev.p - vtx.p)
eta = self.seed_path[k].shape.eta
if dot(wi, self.seed_path[k].n) < 0.0:
eta = 1.0/eta
if self.constraint_type == ConstraintType.HalfVector:
draw_arrow(ctx, vtx.p, vtx.n, nvg.RGB(255, 0, 0), scale=s, length=0.25)
h = normalize(wi + eta * wo)
if eta != 1.0:
h *= -1
draw_arrow(ctx, vtx.p, wi, nvg.RGB(0, 0, 0), scale=s, length=0.25)
draw_arrow(ctx, vtx.p, wo, nvg.RGB(0, 0, 0), scale=s, length=0.25)
draw_arrow(ctx, vtx.p, h, nvg.RGB(0, 128, 0), scale=s, length=0.25)
constraint = dot(vtx.s, h)
draw_line(ctx, vtx.p, vtx.p+0.25*constraint*vtx.s, nvg.RGB(120, 80, 250), scale=1.2*s, endcap_b=True)
elif self.constraint_type == ConstraintType.AngleDifference:
if self.flip_constraint_chb.checked():
wi, wo = wo, wi
m = vtx.s * vtx.n_offset[0] + vtx.n * vtx.n_offset[1]
if eta == 1.0:
wio = reflect(wi, m)
else:
wio = refract(wi, m, eta)
if wio[0]:
wio = wio[1]
phi_wo = np.arctan2(wo[1], wo[0])
phi_wio = np.arctan2(wio[1], wio[0])
draw_arrow(ctx, vtx.p, vtx.n, nvg.RGB(255, 0, 0), scale=s, length=0.25)
draw_angle(ctx, vtx.p, 0.2, phi_wo, phi_wio, nvg.RGB(120, 80, 250), scale=1.2*s, flip=(phi_wio - phi_wo < 0))
draw_arrow(ctx, vtx.p, wi, nvg.RGB(0, 0, 0), scale=s, length=0.25)
draw_arrow(ctx, vtx.p, wo, nvg.RGB(0, 0, 0), scale=s, length=0.25)
draw_arrow(ctx, vtx.p, wio, nvg.RGB(0, 128, 0), scale=s, length=0.25)
elif not show_intermediate_steps:
draw_path_normals(ctx, self.seed_path, scale=s)
if self.rough_mode:
pass
elif self.sms_mode:
for solution in self.solution_paths:
draw_path_vertices(ctx, solution, '', s)
elif not show_intermediate_steps:
draw_path_vertices(ctx, self.seed_path, '', s)
if self.solution_path:
draw_path_vertices(ctx, self.solution_path, '', s)
self.knob_start.draw(ctx)
self.knob_end.draw(ctx)
self.knob_spec.draw(ctx)
def layout(self, window):
strategy_tools = Widget(window)
strategy_tools.set_layout(BoxLayout(Orientation.Horizontal,
Alignment.Middle, 0, 3))
Label(strategy_tools, "MNEE vs. SMS:")
self.strategy_mnee_btn = Button(strategy_tools, "", icons.FA_CIRCLE)
self.strategy_mnee_btn.set_flags(Button.Flags.RadioButton)
self.strategy_mnee_btn.set_pushed(self.strategy_type == StrategyType.MNEE)
def strategy_mnee_cb(state):
if state:
self.strategy_type = StrategyType.MNEE
self.strategy_mnee_btn.set_change_callback(strategy_mnee_cb)
self.strategy_sms_btn = Button(strategy_tools, "", icons.FA_CERTIFICATE)
self.strategy_sms_btn.set_flags(Button.Flags.RadioButton)
self.strategy_sms_btn.set_pushed(self.strategy_type == StrategyType.SMS)
def strategy_sms_cb(state):
if state:
self.strategy_type = StrategyType.SMS
self.strategy_sms_btn.set_change_callback(strategy_sms_cb)
Label(strategy_tools, " N=")
self.n_bounces_box = IntBox(strategy_tools)
self.n_bounces_box.set_fixed_size((50, 20))
scene = self.viewer.scenes[self.viewer.scene_idx]
self.n_bounces_box.set_value(scene.n_bounces_default)
self.n_bounces_box.set_default_value("1")
self.n_bounces_box.set_font_size(20)
self.n_bounces_box.set_spinnable(True)
self.n_bounces_box.set_min_value(1)
self.n_bounces_box.set_value_increment(1)
constraint_tools = Widget(window)
constraint_tools.set_layout(BoxLayout(Orientation.Horizontal,
Alignment.Middle, 0, 3))
Label(constraint_tools, "Constraint type: ")
self.constraint_hv_btn = Button(constraint_tools, "", icons.FA_RULER_COMBINED)
self.constraint_hv_btn.set_flags(Button.Flags.RadioButton)
self.constraint_hv_btn.set_pushed(self.constraint_type == ConstraintType.HalfVector)
def constraint_hv_cb(state):
if state:
self.constraint_type = ConstraintType.HalfVector
self.constraint_hv_btn.set_change_callback(constraint_hv_cb)
self.constraint_dir_btn = Button(constraint_tools, "", icons.FA_DRAFTING_COMPASS)
self.constraint_dir_btn.set_flags(Button.Flags.RadioButton)
self.constraint_dir_btn.set_pushed(self.constraint_type == ConstraintType.AngleDifference)
def constraint_dir_cb(state):
if state:
self.constraint_type = ConstraintType.AngleDifference
self.constraint_dir_btn.set_change_callback(constraint_dir_cb)
self.show_constraint_chb = CheckBox(constraint_tools, "Show")
self.show_constraint_chb.set_checked(True)
self.flip_constraint_chb = CheckBox(constraint_tools, "Flip")
self.flip_constraint_chb.set_checked(False)
steps_eps_tools = Widget(window)
steps_eps_tools.set_layout(BoxLayout(Orientation.Horizontal,
Alignment.Middle, 0, 3))
Label(steps_eps_tools, "N")
self.max_steps_sl = Slider(steps_eps_tools)
max_steps_tb = TextBox(steps_eps_tools)
max_steps_tb.set_value("20")
max_steps_tb.set_font_size(20)
max_steps_tb.set_alignment(TextBox.Alignment.Right)
self.max_steps_sl.set_value(0.3878)
def max_steps_cb(value):
max_steps_tb.set_value("%i" % (1 + int(49*value)))
self.max_steps_sl.set_callback(max_steps_cb)
Label(steps_eps_tools, "eps")
self.eps_sl = Slider(steps_eps_tools)
eps_tb = TextBox(steps_eps_tools)
eps_tb.set_value("1.0E-03")
eps_tb.set_font_size(20)
eps_tb.set_alignment(TextBox.Alignment.Right)
self.eps_sl.set_value(0.285)
def eps_cb(value):
eps_tb.set_value("%.1E" % 10.0**(-(1 + value*7)))
self.eps_sl.set_callback(eps_cb)
intermediate_tools = Widget(window)
intermediate_tools.set_layout(BoxLayout(Orientation.Horizontal,
Alignment.Middle, 0, 3))
Label(intermediate_tools, "Show intermediate steps:")
self.show_intermediate_steps_chb = CheckBox(intermediate_tools, "")
self.show_intermediate_steps_chb.set_checked(False)
Label(intermediate_tools, "Step size:")
self.step_size_sl = Slider(intermediate_tools)
self.step_size_sl.set_value(1.0)
sms_tools = Widget(window)
sms_tools.set_layout(BoxLayout(Orientation.Horizontal,
Alignment.Middle, 0, 2))
Label(sms_tools, "Sample")
self.n_sms_paths_box = IntBox(sms_tools)
self.n_sms_paths_box.set_fixed_size((50, 20))
self.n_sms_paths_box.set_value(10)
self.n_sms_paths_box.set_default_value("10")
self.n_sms_paths_box.set_font_size(20)
self.n_sms_paths_box.set_spinnable(True)
self.n_sms_paths_box.set_min_value(1)
self.n_sms_paths_box.set_value_increment(1)
Label(sms_tools, " paths")
self.sms_btn = Button(sms_tools, "Go", icons.FA_ROCKET)
self.sms_btn.set_background_color(Color(0, 1.0, 0, 0.1))
def sms_cb():
self.sms_mode = not self.sms_mode
if self.sms_mode:
self.sms_btn.set_background_color(Color(1.0, 0, 0, 0.1))
self.rough_btn.set_enabled(False)
else:
self.sms_btn.set_background_color(Color(0, 1.0, 0, 0.1))
self.rough_btn.set_enabled(True)
if self.sms_mode:
spec_u_current = self.scene.spec_u_current
self.seed_paths = []
self.solution_paths = []
N = self.n_sms_paths_box.value()
for k in range(N):
self.scene.spec_u_current = np.random.uniform()
seed_path = self.scene.sample_seed_path(self.n_bounces_box.value())
self.seed_paths.append(seed_path.copy())
if seed_path.has_specular_segment():
solution_path, _ = self.newton_solver(self.scene, seed_path)
if solution_path:
self.solution_paths.append(solution_path.copy())
self.scene.spec_u_current = spec_u_current
self.sms_btn.set_callback(sms_cb)
Label(sms_tools, " Show seeds:")
self.show_seed_paths_chb = CheckBox(sms_tools, "")
self.show_seed_paths_chb.set_checked(True)
rough_tools = Widget(window)
rough_tools.set_layout(BoxLayout(Orientation.Horizontal,
Alignment.Middle, 0, 2))
Label(rough_tools, "Sample")
self.n_normals_box = IntBox(rough_tools)
self.n_normals_box.set_fixed_size((50, 20))
self.n_normals_box.set_value(10)
self.n_normals_box.set_default_value("10")
self.n_normals_box.set_font_size(20)
self.n_normals_box.set_spinnable(True)
self.n_normals_box.set_min_value(1)
self.n_normals_box.set_value_increment(1)
Label(rough_tools, " normals ( ")
self.roughness_box = FloatBox(rough_tools)
self.roughness_box.set_editable(True)
self.roughness_box.set_fixed_size((60, 20))
self.roughness_box.set_value(0.1)
self.roughness_box.set_default_value("0.1")
self.roughness_box.set_font_size(20)
self.roughness_box.set_format("[0-9]*\\.?[0-9]+")
Label(rough_tools, " )")
self.rough_btn = Button(rough_tools, "Go", icons.FA_ROCKET)
self.rough_btn.set_background_color(Color(0, 1.0, 0, 0.1))
def rough_cb():
self.rough_mode = not self.rough_mode
if self.rough_mode:
self.rough_btn.set_background_color(Color(1.0, 0, 0, 0.1))
self.sms_btn.set_enabled(False)
else:
self.rough_btn.set_background_color(Color(0, 1.0, 0, 0.1))
self.sms_btn.set_enabled(True)
if self.rough_mode:
self.solution_paths = []
N = self.n_normals_box.value()
for k in range(N):
seed_path = self.seed_path.copy()
for k, vtx in enumerate(seed_path):
if vtx.shape.type == Shape.Type.Reflection or vtx.shape.type == Shape.Type.Refraction:
alpha = self.roughness_box.value()
sigma2 = 0.5*alpha*alpha
slope = np.random.normal(0, np.sqrt(sigma2))
vtx.n_offset = normalize(np.array([-slope, 1]))
if seed_path.has_specular_segment():
solution_path, _ = self.newton_solver(self.scene, seed_path)
if solution_path:
self.solution_paths.append(solution_path.copy())
self.rough_btn.set_callback(rough_cb)
return [strategy_tools, constraint_tools, steps_eps_tools, sms_tools, rough_tools, intermediate_tools], []
def keyboard_event(self, key, scancode, action, modifiers):
super().keyboard_event(key, scancode, action, modifiers)
if key == glfw.KEY_RIGHT and action == glfw.PRESS:
self.animating = not self.animating
return True
def max_steps(self):
value = self.max_steps_sl.value()
return 1 + int(49*value)
def eps_threshold(self):
value = self.eps_sl.value()
return 10.0**(-(1 + value*7))
def step_size_scale(self):
return self.step_size_sl.value() | StarcoderdataPython |
1605915 | # Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
def colorGraph(G):
# Assumes G[node] -> iterable(nodes), a in G[b] implies b in G[a]
solution = {}
numColors = 0
constraint = collections.defaultdict(set)
group = []
pending = set()
remaining = {}
for node, values in G.iteritems():
remaining[node] = len(values)
pending.add(node)
while pending:
# Select the next node to color
# HACK search for node with max remaining neighbors.
maxRemain = -1
maxNode = None
for node in pending:
if remaining[node] > maxRemain:
maxNode = node
maxRemain = remaining[node]
assert maxNode is not None
pending.remove(maxNode)
# Determine the color of the node
current = maxNode
for color in range(numColors):
if color not in constraint[current]:
break
else:
color = numColors
numColors += 1
group.append([])
solution[current] = color
group[color].append(current)
for other in G[current]:
remaining[other] -= 1
constraint[other].add(color)
return solution, group, numColors
| StarcoderdataPython |
4807642 | <reponame>tonghs/web-template<filename>utils/crypto.py
import hashlib
import hmac
from typing import Dict, List
from config import API_SECRET
def _dict_to_str(data: Dict):
keys: List[str] = list(data.keys())
params: List[str] = []
for key in sorted(keys):
if key == 'sign':
continue
value = data[key]
params.append(f"{key}={value}")
return '&'.join(params)
def hmac_sha1_encode(data: Dict):
msg: str = _dict_to_str(data)
return hmac.new(API_SECRET.encode('utf-8'), msg.encode('utf-8'), hashlib.sha1).hexdigest()
| StarcoderdataPython |
173920 | <reponame>cmu-catalyst/collage
import argparse
import tensorflow as tf
import numpy as np
import time
from shared_functions import make_matmul
def attention(input, heads):
d_model = input.shape[1].value
q = make_matmul(input, d_model)
k = make_matmul(input, d_model)
v = make_matmul(input, d_model)
# reshape query, key, value
q = tf.reshape(q, shape=(64,16,64))
k = tf.reshape(k, shape=(64,16,64))
v = tf.reshape(v, shape=(64,16,64))
# transpose q, k, v for batched matmul
q = tf.transpose(q, perm=(1,0,2))
k = tf.transpose(k, perm=(1,0,2))
v = tf.transpose(v, perm=(1,0,2))
logits = tf.matmul(q, k)
output = tf.matmul(logits, v)
# transpose the output back
output = tf.transpose(output, perm=(1,0,2))
output = tf.reshape(output, shape=(64, 1024))
# a final linear layer
output = make_matmul(tf.nn.relu(make_matmul(input, 4*d_model)), d_model)
return output
input = tf.placeholder(tf.float32, shape=(64,1024))
input_dictionary = {}
input_dictionary[input] = np.random.random_sample((64, 1024))
t = input
for i in range(12):
t = attention(t, 16)
output_nodes = [t]
config = tf.ConfigProto()
# Input model name
model_name = "bert"
with tf.Session(config=config) as sess:
# Saving
inputs = {"input_placeholder": input}
outputs = {"prediction": output_nodes[0]}
tf.saved_model.simple_save(
sess, f'models_pb/{model_name}', inputs, outputs
) | StarcoderdataPython |
1603257 | from robot.trajectory import quintic_trajectory_planning
from tools.visualize import plot_joint_trajectory
import numpy as np
if __name__ == "__main__":
q0 = np.array([-2, -1, 0, 1, 2, 3])
qd0 = np.array([0, 0, 0, 0, 0, 0])
qdd0 = np.array([0, 0, 0, 0, 0, 0])
qf = np.array([4, -3, -2, 0, 4, -2])
qdf = np.array([0, 0, 0, 0, 0, 0])
qddf = np.array([0, 0, 0, 0, 0, 0])
q, qd, qdd = quintic_trajectory_planning(q0, qf, qd0, qdf, qdd0, qddf)
plot_joint_trajectory(q, qd, qdd) | StarcoderdataPython |
53481 | <reponame>Luigimonbymus/Modern-Quest<filename>Item.py
class item():
def ___init___(self, name, desc, worth):
self.name=name
self.desc=desc
self.worth=worth
def _str_(self):
return "{}\n=====\n{}\nWorth: {}\n".format(self.name, self.desc, self.worth)
class money(item):
def __init__(self, csh):
self.csh = csh
super().__init__(name="Money",
desc="Dollar bills in cash.",
worth=self.csh)
class weapon(item):
def __init__(self, name, desc, worth, dmg):
self.dmg=dmg
super().__init__(name,desc,value)
def _str_(self):
return "{}\n=====\n{}\nWorth: {}\nDmg: {}".format(self.name, self.desc, self.worth, self.dm)
class Bat(weapon):
def __init__(self):
super().__init__(name="Bat",
desc="Just an ordinary baseball bat.",
worth=3,
dmg=5)
class Stick(weapon):
def __init__(self):
super().__init__(name="Stick",
desc="A weak yet seemingly unbreakable branch from a tree.",
worth=1,
dmg=2)
class Chainsaw(weapon):
def __init__(self):
super().__init__(name="Chainsaw",
desc="This is overkill.",
worth=20,
dmg=15)
class GlassShrd(weapon):
def __init__(self):
super().__init__(name="Glass Shard",
desc="Broken sharp glass. It's a miracle your hand is not bleeding.",
worth=0,
dmg=4)
class Clackers(weapon):
def __init__(self):
super().__init__(name="Clackers",
desc="Balls of steel.",
worth=5,
dmg=5)
class Yoyo(weapon):
def __init__(self):
super().__init__(name="Yoyo",
desc="String plus plastic equals trick shots!",
worth=3,
dmg=5)
class FirstAid(item):
def __init__(self):
super().__init__(name="First-Aid",
desc="Heals your wounds.",
worth=3)
self.heal=10
def healing(self,HP):
#Heals player
class SuperFA(FirstAid):
def __init__(self):
super().__init__(name="Super First-Aid",
desc="A clinic in a box.",
worth=5,
heal=20)
class HyperFA
def __init__(self):
super().__init__(name="Hyper First-Aid",
desc="Surgeon approved.",
worth=10,
heal=30)
| StarcoderdataPython |
194175 | import atexit
import collections
import datetime
import functools
import logging
import os
import re
import textwrap
from dataclasses import dataclass
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
from unittest.mock import patch
# This import verifies that the dependencies are available.
import sqlalchemy_bigquery
from dateutil.relativedelta import relativedelta
from google.cloud.bigquery import Client as BigQueryClient
from google.cloud.logging_v2.client import Client as GCPLoggingClient
from ratelimiter import RateLimiter
from sqlalchemy import create_engine, inspect
from sqlalchemy.engine.reflection import Inspector
from datahub.emitter import mce_builder
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.emitter.mcp_builder import (
BigQueryDatasetKey,
PlatformKey,
ProjectIdKey,
gen_containers,
)
from datahub.ingestion.api.decorators import (
SourceCapability,
SupportStatus,
capability,
config_class,
platform_name,
support_status,
)
from datahub.ingestion.api.workunit import MetadataWorkUnit
from datahub.ingestion.source.sql.sql_common import (
SQLAlchemyConfig,
SQLAlchemySource,
SqlWorkUnit,
make_sqlalchemy_type,
register_custom_type,
)
from datahub.ingestion.source.usage.bigquery_usage import (
BQ_DATE_SHARD_FORMAT,
BQ_DATETIME_FORMAT,
AuditLogEntry,
BigQueryAuditMetadata,
BigQueryTableRef,
QueryEvent,
)
from datahub.ingestion.source_config.sql.bigquery import BigQueryConfig
from datahub.ingestion.source_report.sql.bigquery import BigQueryReport
from datahub.metadata.com.linkedin.pegasus2avro.metadata.key import DatasetKey
from datahub.metadata.com.linkedin.pegasus2avro.metadata.snapshot import DatasetSnapshot
from datahub.metadata.com.linkedin.pegasus2avro.mxe import MetadataChangeEvent
from datahub.metadata.com.linkedin.pegasus2avro.schema import RecordTypeClass
from datahub.metadata.schema_classes import (
ChangeTypeClass,
DatasetLineageTypeClass,
UpstreamClass,
UpstreamLineageClass,
)
from datahub.utilities.bigquery_sql_parser import BigQuerySQLParser
from datahub.utilities.mapping import Constants
logger = logging.getLogger(__name__)
BQ_FILTER_RULE_TEMPLATE = """
protoPayload.serviceName="bigquery.googleapis.com"
AND
(
(
protoPayload.methodName="jobservice.jobcompleted"
AND
protoPayload.serviceData.jobCompletedEvent.eventName="query_job_completed"
AND
protoPayload.serviceData.jobCompletedEvent.job.jobStatus.state="DONE"
AND NOT
protoPayload.serviceData.jobCompletedEvent.job.jobStatus.error.code:*
AND
(
protoPayload.serviceData.jobCompletedEvent.job.jobStatistics.referencedTables:*
OR
protoPayload.serviceData.jobCompletedEvent.job.jobStatistics.referencedViews:*
)
)
)
AND
timestamp >= "{start_time}"
AND
timestamp < "{end_time}"
""".strip()
BQ_FILTER_RULE_TEMPLATE_V2 = """
resource.type=("bigquery_project")
AND
(
protoPayload.methodName=
(
"google.cloud.bigquery.v2.JobService.Query"
OR
"google.cloud.bigquery.v2.JobService.InsertJob"
)
AND
protoPayload.metadata.jobChange.job.jobStatus.jobState="DONE"
AND NOT protoPayload.metadata.jobChange.job.jobStatus.errorResult:*
AND (
protoPayload.metadata.jobChange.job.jobStats.queryStats.referencedTables:*
OR
protoPayload.metadata.jobChange.job.jobStats.queryStats.referencedViews:*
)
)
AND
timestamp >= "{start_time}"
AND
timestamp < "{end_time}"
""".strip()
BQ_GET_LATEST_PARTITION_TEMPLATE = """
SELECT
c.table_catalog,
c.table_schema,
c.table_name,
c.column_name,
c.data_type,
max(p.partition_id) as partition_id
FROM
`{project_id}.{schema}.INFORMATION_SCHEMA.COLUMNS` as c
join `{project_id}.{schema}.INFORMATION_SCHEMA.PARTITIONS` as p
on
c.table_catalog = p.table_catalog
and c.table_schema = p.table_schema
and c.table_name = p.table_name
where
is_partitioning_column = 'YES'
-- Filter out special partitions (https://cloud.google.com/bigquery/docs/partitioned-tables#date_timestamp_partitioned_tables)
and p.partition_id not in ('__NULL__', '__UNPARTITIONED__', '__STREAMING_UNPARTITIONED__')
and STORAGE_TIER='ACTIVE'
and p.table_name= '{table}'
group by
c.table_catalog,
c.table_schema,
c.table_name,
c.column_name,
c.data_type
order by
c.table_catalog,
c.table_schema,
c.table_name,
c.column_name
""".strip()
SHARDED_TABLE_REGEX = r"^(.+)[_](\d{4}|\d{6}|\d{8}|\d{10})$"
BQ_GET_LATEST_SHARD = """
SELECT SUBSTR(MAX(table_id), LENGTH('{table}_') + 1) as max_shard
FROM `{project_id}.{schema}.__TABLES_SUMMARY__`
WHERE table_id LIKE '{table}%'
""".strip()
# The existing implementation of this method can be found here:
# https://github.com/googleapis/python-bigquery-sqlalchemy/blob/main/sqlalchemy_bigquery/base.py#L1018-L1025.
# The existing implementation does not use the schema parameter and hence
# does not properly resolve the view definitions. As such, we must monkey
# patch the implementation.
def get_view_definition(self, connection, view_name, schema=None, **kw):
view = self._get_table(connection, view_name, schema)
return view.view_query
sqlalchemy_bigquery.BigQueryDialect.get_view_definition = get_view_definition
def bigquery_audit_metadata_query_template(
dataset: str, use_date_sharded_tables: bool
) -> str:
"""
Receives a dataset (with project specified) and returns a query template that is used to query exported
AuditLogs containing protoPayloads of type BigQueryAuditMetadata.
Include only those that:
- have been completed (jobStatus.jobState = "DONE")
- do not contain errors (jobStatus.errorResults is none)
:param dataset: the dataset to query against in the form of $PROJECT.$DATASET
:param use_date_sharded_tables: whether to read from date sharded audit log tables or time partitioned audit log
tables
:return: a query template, when supplied start_time and end_time, can be used to query audit logs from BigQuery
"""
query: str
if use_date_sharded_tables:
query = (
f"""
SELECT
timestamp,
logName,
insertId,
protopayload_auditlog AS protoPayload,
protopayload_auditlog.metadataJson AS metadata
FROM
`{dataset}.cloudaudit_googleapis_com_data_access_*`
"""
+ """
WHERE
_TABLE_SUFFIX BETWEEN "{start_date}" AND "{end_date}" AND
"""
)
else:
query = f"""
SELECT
timestamp,
logName,
insertId,
protopayload_auditlog AS protoPayload,
protopayload_auditlog.metadataJson AS metadata
FROM
`{dataset}.cloudaudit_googleapis_com_data_access`
WHERE
"""
audit_log_filter = """ timestamp >= "{start_time}"
AND timestamp < "{end_time}"
AND protopayload_auditlog.serviceName="bigquery.googleapis.com"
AND JSON_EXTRACT_SCALAR(protopayload_auditlog.metadataJson, "$.jobChange.job.jobStatus.jobState") = "DONE"
AND JSON_EXTRACT(protopayload_auditlog.metadataJson, "$.jobChange.job.jobStatus.errorResults") IS NULL
AND JSON_EXTRACT(protopayload_auditlog.metadataJson, "$.jobChange.job.jobConfig.queryConfig") IS NOT NULL;
"""
query = textwrap.dedent(query) + audit_log_filter
return textwrap.dedent(query)
def get_partition_range_from_partition_id(
partition_id: str, partition_datetime: Optional[datetime.datetime]
) -> Tuple[datetime.datetime, datetime.datetime]:
duration: relativedelta
# if yearly partitioned,
if len(partition_id) == 4:
duration = relativedelta(years=1)
if not partition_datetime:
partition_datetime = datetime.datetime.strptime(partition_id, "%Y")
partition_datetime = partition_datetime.replace(month=1, day=1)
# elif monthly partitioned,
elif len(partition_id) == 6:
duration = relativedelta(months=1)
if not partition_datetime:
partition_datetime = datetime.datetime.strptime(partition_id, "%Y%m")
partition_datetime = partition_datetime.replace(day=1)
# elif daily partitioned,
elif len(partition_id) == 8:
duration = relativedelta(days=1)
if not partition_datetime:
partition_datetime = datetime.datetime.strptime(partition_id, "%Y%m%d")
# elif hourly partitioned,
elif len(partition_id) == 10:
duration = relativedelta(hours=1)
if not partition_datetime:
partition_datetime = datetime.datetime.strptime(partition_id, "%Y%m%d%H")
else:
raise ValueError(
f"check your partition_id {partition_id}. It must be yearly/monthly/daily/hourly."
)
upper_bound_partition_datetime = partition_datetime + duration
return partition_datetime, upper_bound_partition_datetime
# Handle the GEOGRAPHY type. We will temporarily patch the _type_map
# in the get_workunits method of the source.
GEOGRAPHY = make_sqlalchemy_type("GEOGRAPHY")
register_custom_type(GEOGRAPHY)
assert sqlalchemy_bigquery._types._type_map
# STRUCT is a custom sqlalchemy data type defined by the sqlalchemy_bigquery library
# https://github.com/googleapis/python-bigquery-sqlalchemy/blob/934e25f705fd9f226e438d075c7e00e495cce04e/sqlalchemy_bigquery/_types.py#L47
register_custom_type(sqlalchemy_bigquery.STRUCT, output=RecordTypeClass)
@dataclass
class BigQueryPartitionColumn:
table_catalog: str
table_schema: str
table_name: str
column_name: str
data_type: str
partition_id: str
# We can't use close as it is not called if the ingestion is not successful
def cleanup(config: BigQueryConfig) -> None:
if config._credentials_path is not None:
logger.debug(
f"Deleting temporary credential file at {config._credentials_path}"
)
os.unlink(config._credentials_path)
@config_class(BigQueryConfig)
@platform_name("BigQuery")
@support_status(SupportStatus.CERTIFIED)
@capability(
SourceCapability.PLATFORM_INSTANCE,
"BigQuery doesn't need platform instances because project ids in BigQuery are globally unique.",
supported=False,
)
@capability(SourceCapability.DOMAINS, "Supported via the `domain` config field")
@capability(SourceCapability.DATA_PROFILING, "Optionally enabled via configuration")
@capability(SourceCapability.DESCRIPTIONS, "Enabled by default")
@capability(SourceCapability.LINEAGE_COARSE, "Enabled by default")
@capability(
SourceCapability.USAGE_STATS,
"Not provided by this module, use `bigquery-usage` for that.",
supported=False,
)
@capability(SourceCapability.DELETION_DETECTION, "Enabled via stateful ingestion")
class BigQuerySource(SQLAlchemySource):
"""
This plugin extracts the following:
- Metadata for databases, schemas, and tables
- Column types associated with each table
- Table, row, and column statistics via optional SQL profiling
- Table level lineage.
"""
def __init__(self, config, ctx):
super().__init__(config, ctx, "bigquery")
self.config: BigQueryConfig = config
self.ctx = ctx
self.report: BigQueryReport = BigQueryReport()
self.lineage_metadata: Optional[Dict[str, Set[str]]] = None
self.maximum_shard_ids: Dict[str, str] = dict()
atexit.register(cleanup, config)
def get_db_name(self, inspector: Inspector = None) -> str:
if self.config.project_id:
return self.config.project_id
else:
return self._get_project_id(inspector)
def _compute_big_query_lineage(self) -> None:
if not self.config.include_table_lineage:
return
lineage_client_project_id = self._get_lineage_client_project_id()
if self.config.use_exported_bigquery_audit_metadata:
self._compute_bigquery_lineage_via_exported_bigquery_audit_metadata(
lineage_client_project_id
)
else:
self._compute_bigquery_lineage_via_gcp_logging(lineage_client_project_id)
if self.lineage_metadata is None:
self.lineage_metadata = {}
self.report.lineage_metadata_entries = len(self.lineage_metadata)
logger.info(
f"Built lineage map containing {len(self.lineage_metadata)} entries."
)
logger.debug(f"lineage metadata is {self.lineage_metadata}")
def _compute_bigquery_lineage_via_gcp_logging(
self, lineage_client_project_id: Optional[str]
) -> None:
logger.info("Populating lineage info via GCP audit logs")
try:
_clients: List[GCPLoggingClient] = self._make_bigquery_client(
lineage_client_project_id
)
template: str = BQ_FILTER_RULE_TEMPLATE
if self.config.use_v2_audit_metadata:
template = BQ_FILTER_RULE_TEMPLATE_V2
log_entries: Iterable[AuditLogEntry] = self._get_bigquery_log_entries(
_clients, template
)
parsed_entries: Iterable[QueryEvent] = self._parse_bigquery_log_entries(
log_entries
)
self.lineage_metadata = self._create_lineage_map(parsed_entries)
except Exception as e:
self.error(
logger,
"lineage-gcp-logs",
f"Error was {e}",
)
def _compute_bigquery_lineage_via_exported_bigquery_audit_metadata(
self, lineage_client_project_id: Optional[str]
) -> None:
logger.info("Populating lineage info via exported GCP audit logs")
try:
_client: BigQueryClient = BigQueryClient(project=lineage_client_project_id)
exported_bigquery_audit_metadata: Iterable[
BigQueryAuditMetadata
] = self._get_exported_bigquery_audit_metadata(_client)
parsed_entries: Iterable[
QueryEvent
] = self._parse_exported_bigquery_audit_metadata(
exported_bigquery_audit_metadata
)
self.lineage_metadata = self._create_lineage_map(parsed_entries)
except Exception as e:
self.error(
logger,
"lineage-exported-gcp-audit-logs",
f"Error: {e}",
)
def _make_bigquery_client(
self, lineage_client_project_id: Optional[str]
) -> List[GCPLoggingClient]:
# See https://github.com/googleapis/google-cloud-python/issues/2674 for
# why we disable gRPC here.
client_options = self.config.extra_client_options.copy()
client_options["_use_grpc"] = False
if lineage_client_project_id is not None:
return [
GCPLoggingClient(**client_options, project=lineage_client_project_id)
]
else:
return [GCPLoggingClient(**client_options)]
def _get_lineage_client_project_id(self) -> Optional[str]:
project_id: Optional[str] = (
self.config.lineage_client_project_id
if self.config.lineage_client_project_id
else self.config.project_id
)
return project_id
def _get_bigquery_log_entries(
self,
clients: List[GCPLoggingClient],
template: str,
) -> Union[Iterable[AuditLogEntry], Iterable[BigQueryAuditMetadata]]:
self.report.num_total_log_entries = 0
# Add a buffer to start and end time to account for delays in logging events.
start_time = (self.config.start_time - self.config.max_query_duration).strftime(
BQ_DATETIME_FORMAT
)
self.report.log_entry_start_time = start_time
end_time = (self.config.end_time + self.config.max_query_duration).strftime(
BQ_DATETIME_FORMAT
)
self.report.log_entry_end_time = end_time
filter = template.format(
start_time=start_time,
end_time=end_time,
)
logger.info(
f"Start loading log entries from BigQuery start_time={start_time} and end_time={end_time}"
)
for client in clients:
if self.config.rate_limit:
with RateLimiter(max_calls=self.config.requests_per_min, period=60):
entries = client.list_entries(
filter_=filter, page_size=self.config.log_page_size
)
else:
entries = client.list_entries(
filter_=filter, page_size=self.config.log_page_size
)
for entry in entries:
self.report.num_total_log_entries += 1
yield entry
logger.info(
f"Finished loading {self.report.num_total_log_entries} log entries from BigQuery so far"
)
def _get_exported_bigquery_audit_metadata(
self, bigquery_client: BigQueryClient
) -> Iterable[BigQueryAuditMetadata]:
if self.config.bigquery_audit_metadata_datasets is None:
self.error(
logger, "audit-metadata", "bigquery_audit_metadata_datasets not set"
)
self.report.bigquery_audit_metadata_datasets_missing = True
return
start_time: str = (
self.config.start_time - self.config.max_query_duration
).strftime(BQ_DATETIME_FORMAT)
self.report.audit_start_time = start_time
end_time: str = (
self.config.end_time + self.config.max_query_duration
).strftime(BQ_DATETIME_FORMAT)
self.report.audit_end_time = end_time
for dataset in self.config.bigquery_audit_metadata_datasets:
logger.info(
f"Start loading log entries from BigQueryAuditMetadata in {dataset}"
)
query: str
if self.config.use_date_sharded_audit_log_tables:
start_date: str = (
self.config.start_time - self.config.max_query_duration
).strftime(BQ_DATE_SHARD_FORMAT)
end_date: str = (
self.config.end_time + self.config.max_query_duration
).strftime(BQ_DATE_SHARD_FORMAT)
query = bigquery_audit_metadata_query_template(
dataset, self.config.use_date_sharded_audit_log_tables
).format(
start_time=start_time,
end_time=end_time,
start_date=start_date,
end_date=end_date,
)
else:
query = bigquery_audit_metadata_query_template(
dataset, self.config.use_date_sharded_audit_log_tables
).format(start_time=start_time, end_time=end_time)
query_job = bigquery_client.query(query)
logger.info(
f"Finished loading log entries from BigQueryAuditMetadata in {dataset}"
)
if self.config.rate_limit:
with RateLimiter(max_calls=self.config.requests_per_min, period=60):
yield from query_job
else:
yield from query_job
# Currently we only parse JobCompleted events but in future we would want to parse other
# events to also create field level lineage.
def _parse_bigquery_log_entries(
self,
entries: Union[Iterable[AuditLogEntry], Iterable[BigQueryAuditMetadata]],
) -> Iterable[QueryEvent]:
self.report.num_parsed_log_entires = 0
for entry in entries:
event: Optional[QueryEvent] = None
missing_entry = QueryEvent.get_missing_key_entry(entry=entry)
if missing_entry is None:
event = QueryEvent.from_entry(entry)
missing_entry_v2 = QueryEvent.get_missing_key_entry_v2(entry=entry)
if event is None and missing_entry_v2 is None:
event = QueryEvent.from_entry_v2(entry)
if event is None:
self.error(
logger,
f"{entry.log_name}-{entry.insert_id}",
f"Unable to parse log missing {missing_entry}, missing v2 {missing_entry_v2} for {entry}",
)
else:
self.report.num_parsed_log_entires += 1
yield event
logger.info(
"Parsing BigQuery log entries: "
f"number of log entries successfully parsed={self.report.num_parsed_log_entires}"
)
def _parse_exported_bigquery_audit_metadata(
self, audit_metadata_rows: Iterable[BigQueryAuditMetadata]
) -> Iterable[QueryEvent]:
self.report.num_total_audit_entries = 0
self.report.num_parsed_audit_entires = 0
for audit_metadata in audit_metadata_rows:
self.report.num_total_audit_entries += 1
event: Optional[QueryEvent] = None
missing_exported_audit = (
QueryEvent.get_missing_key_exported_bigquery_audit_metadata(
audit_metadata
)
)
if missing_exported_audit is None:
event = QueryEvent.from_exported_bigquery_audit_metadata(audit_metadata)
if event is None:
self.error(
logger,
f"{audit_metadata['logName']}-{audit_metadata['insertId']}",
f"Unable to parse audit metadata missing {missing_exported_audit} for {audit_metadata}",
)
else:
self.report.num_parsed_audit_entires += 1
yield event
def _create_lineage_map(self, entries: Iterable[QueryEvent]) -> Dict[str, Set[str]]:
lineage_map: Dict[str, Set[str]] = collections.defaultdict(set)
self.report.num_total_lineage_entries = 0
self.report.num_skipped_lineage_entries_missing_data = 0
self.report.num_skipped_lineage_entries_not_allowed = 0
self.report.num_skipped_lineage_entries_other = 0
for e in entries:
self.report.num_total_lineage_entries += 1
if e.destinationTable is None or not (
e.referencedTables or e.referencedViews
):
self.report.num_skipped_lineage_entries_missing_data += 1
continue
# Skip if schema/table pattern don't allow the destination table
destination_table_str = str(e.destinationTable.remove_extras())
destination_table_str_parts = destination_table_str.split("/")
if not self.config.schema_pattern.allowed(
destination_table_str_parts[3]
) or not self.config.table_pattern.allowed(destination_table_str_parts[-1]):
self.report.num_skipped_lineage_entries_not_allowed += 1
continue
has_table = False
for ref_table in e.referencedTables:
ref_table_str = str(ref_table.remove_extras())
if ref_table_str != destination_table_str:
lineage_map[destination_table_str].add(ref_table_str)
has_table = True
has_view = False
for ref_view in e.referencedViews:
ref_view_str = str(ref_view.remove_extras())
if ref_view_str != destination_table_str:
lineage_map[destination_table_str].add(ref_view_str)
has_view = True
if has_table and has_view:
# If there is a view being referenced then bigquery sends both the view as well as underlying table
# in the references. There is no distinction between direct/base objects accessed. So doing sql parsing
# to ensure we only use direct objects accessed for lineage
parser = BigQuerySQLParser(e.query)
referenced_objs = set(
map(lambda x: x.split(".")[-1], parser.get_tables())
)
curr_lineage_str = lineage_map[destination_table_str]
new_lineage_str = set()
for lineage_str in curr_lineage_str:
name = lineage_str.split("/")[-1]
if name in referenced_objs:
new_lineage_str.add(lineage_str)
lineage_map[destination_table_str] = new_lineage_str
if not (has_table or has_view):
self.report.num_skipped_lineage_entries_other += 1
return lineage_map
def get_latest_partition(
self, schema: str, table: str
) -> Optional[BigQueryPartitionColumn]:
url = self.config.get_sql_alchemy_url()
engine = create_engine(url, **self.config.options)
with engine.connect() as con:
inspector = inspect(con)
sql = BQ_GET_LATEST_PARTITION_TEMPLATE.format(
project_id=self.get_db_name(inspector), schema=schema, table=table
)
result = con.execute(sql)
# Bigquery only supports one partition column
# https://stackoverflow.com/questions/62886213/adding-multiple-partitioned-columns-to-bigquery-table-from-sql-query
row = result.fetchone()
if row:
return BigQueryPartitionColumn(**row)
return None
def get_shard_from_table(self, table: str) -> Tuple[str, Optional[str]]:
match = re.search(SHARDED_TABLE_REGEX, table, re.IGNORECASE)
if match:
table_name = match.group(1)
shard = match.group(2)
return table_name, shard
return table, None
def is_latest_shard(self, project_id: str, schema: str, table: str) -> bool:
# Getting latest shard from table names
# https://cloud.google.com/bigquery/docs/partitioned-tables#dt_partition_shard
table_name, shard = self.get_shard_from_table(table)
if shard:
logger.debug(f"{table_name} is sharded and shard id is: {shard}")
url = self.config.get_sql_alchemy_url()
engine = create_engine(url, **self.config.options)
if f"{project_id}.{schema}.{table_name}" not in self.maximum_shard_ids:
with engine.connect() as con:
sql = BQ_GET_LATEST_SHARD.format(
project_id=project_id,
schema=schema,
table=table_name,
)
result = con.execute(sql)
for row in result:
max_shard = row["max_shard"]
self.maximum_shard_ids[
f"{project_id}.{schema}.{table_name}"
] = max_shard
logger.debug(f"Max shard for table {table_name} is {max_shard}")
return (
self.maximum_shard_ids[f"{project_id}.{schema}.{table_name}"] == shard
)
else:
return True
def get_extra_tags(
self, inspector: Inspector, schema: str, table: str
) -> Dict[str, List[str]]:
extra_tags: Dict[str, List[str]] = {}
partition: Optional[BigQueryPartitionColumn] = self.get_latest_partition(
schema, table
)
if partition:
extra_tags[partition.column_name] = [Constants.TAG_PARTITION_KEY]
return extra_tags
def generate_partition_profiler_query(
self, schema: str, table: str, partition_datetime: Optional[datetime.datetime]
) -> Tuple[Optional[str], Optional[str]]:
"""
Method returns partition id if table is partitioned or sharded and generate custom partition query for
partitioned table.
See more about partitioned tables at https://cloud.google.com/bigquery/docs/partitioned-tables
"""
partition = self.get_latest_partition(schema, table)
if partition:
partition_where_clause: str
logger.debug(f"{table} is partitioned and partition column is {partition}")
(
partition_datetime,
upper_bound_partition_datetime,
) = get_partition_range_from_partition_id(
partition.partition_id, partition_datetime
)
if partition.data_type in ("TIMESTAMP", "DATETIME"):
partition_where_clause = "{column_name} BETWEEN '{partition_id}' AND '{upper_bound_partition_id}'".format(
column_name=partition.column_name,
partition_id=partition_datetime,
upper_bound_partition_id=upper_bound_partition_datetime,
)
elif partition.data_type == "DATE":
partition_where_clause = "{column_name} = '{partition_id}'".format(
column_name=partition.column_name,
partition_id=partition_datetime.date(),
)
else:
logger.warning(f"Not supported partition type {partition.data_type}")
return None, None
custom_sql = """
SELECT
*
FROM
`{table_catalog}.{table_schema}.{table_name}`
WHERE
{partition_where_clause}
""".format(
table_catalog=partition.table_catalog,
table_schema=partition.table_schema,
table_name=partition.table_name,
partition_where_clause=partition_where_clause,
)
return (partition.partition_id, custom_sql)
else:
# For sharded table we want to get the partition id but not needed to generate custom query
table, shard = self.get_shard_from_table(table)
if shard:
return shard, None
return None, None
def is_dataset_eligible_for_profiling(
self, dataset_name: str, sql_config: SQLAlchemyConfig
) -> bool:
"""
Method overrides default profiling filter which checks profiling eligibility based on allow-deny pattern.
This one also don't profile those sharded tables which are not the latest.
"""
if not super().is_dataset_eligible_for_profiling(dataset_name, sql_config):
return False
(project_id, schema, table) = dataset_name.split(".")
if not self.is_latest_shard(project_id=project_id, table=table, schema=schema):
logger.debug(
f"{dataset_name} is sharded but not the latest shard, skipping..."
)
return False
return True
@classmethod
def create(cls, config_dict, ctx):
config = BigQueryConfig.parse_obj(config_dict)
return cls(config, ctx)
def add_config_to_report(self):
self.report.start_time = self.config.start_time
self.report.end_time = self.config.end_time
self.report.include_table_lineage = self.config.include_table_lineage
self.report.use_date_sharded_audit_log_tables = (
self.config.use_date_sharded_audit_log_tables
)
self.report.log_page_size = self.config.log_page_size
self.report.use_exported_bigquery_audit_metadata = (
self.config.use_exported_bigquery_audit_metadata
)
self.report.use_v2_audit_metadata = self.config.use_v2_audit_metadata
# Overriding the get_workunits method to first compute the workunits using the base SQLAlchemySource
# and then computing lineage information only for those datasets that were ingested. This helps us to
# maintain a clear separation between SQLAlchemySource and the BigQuerySource. Also, this way we honor
# that flags like schema and table patterns for lineage computation as well.
def get_workunits(self) -> Iterable[Union[MetadataWorkUnit, SqlWorkUnit]]:
# only compute the lineage if the object is none. This is is safety check in case if in future refactoring we
# end up computing lineage multiple times.
self.add_config_to_report()
if self.lineage_metadata is None:
self._compute_big_query_lineage()
with patch.dict(
"sqlalchemy_bigquery._types._type_map",
{"GEOGRAPHY": GEOGRAPHY},
clear=False,
):
for wu in super().get_workunits():
yield wu
if (
isinstance(wu, SqlWorkUnit)
and isinstance(wu.metadata, MetadataChangeEvent)
and isinstance(wu.metadata.proposedSnapshot, DatasetSnapshot)
):
lineage_mcp = self.get_lineage_mcp(wu.metadata.proposedSnapshot.urn)
if lineage_mcp is not None:
lineage_wu = MetadataWorkUnit(
id=f"{self.platform}-{lineage_mcp.entityUrn}-{lineage_mcp.aspectName}",
mcp=lineage_mcp,
)
yield lineage_wu
self.report.report_workunit(lineage_wu)
def get_upstream_tables(
self, bq_table: str, tables_seen: List[str] = []
) -> Set[BigQueryTableRef]:
upstreams: Set[BigQueryTableRef] = set()
assert self.lineage_metadata
for ref_table in self.lineage_metadata[str(bq_table)]:
upstream_table = BigQueryTableRef.from_string_name(ref_table)
if upstream_table.is_temporary_table(self.config.temp_table_dataset_prefix):
# making sure we don't process a table twice and not get into a recursive loop
if ref_table in tables_seen:
logger.debug(
f"Skipping table {ref_table} because it was seen already"
)
continue
tables_seen.append(ref_table)
if ref_table in self.lineage_metadata:
upstreams = upstreams.union(
self.get_upstream_tables(ref_table, tables_seen=tables_seen)
)
else:
upstreams.add(upstream_table)
return upstreams
def get_lineage_mcp(
self, dataset_urn: str
) -> Optional[MetadataChangeProposalWrapper]:
if self.lineage_metadata is None:
logger.debug("No lineage metadata so skipping getting mcp")
return None
dataset_key: Optional[DatasetKey] = mce_builder.dataset_urn_to_key(dataset_urn)
if dataset_key is None:
logger.debug(f"No dataset_key for {dataset_urn} so skipping getting mcp")
return None
project_id, dataset_name, tablename = dataset_key.name.split(".")
bq_table = BigQueryTableRef(project_id, dataset_name, tablename)
if str(bq_table) in self.lineage_metadata:
upstream_list: List[UpstreamClass] = []
# Sorting the list of upstream lineage events in order to avoid creating multiple aspects in backend
# even if the lineage is same but the order is different.
for upstream_table in sorted(
self.get_upstream_tables(str(bq_table), tables_seen=[])
):
upstream_table_class = UpstreamClass(
mce_builder.make_dataset_urn_with_platform_instance(
self.platform,
"{project}.{database}.{table}".format(
project=upstream_table.project,
database=upstream_table.dataset,
table=upstream_table.table,
),
self.config.platform_instance,
self.config.env,
),
DatasetLineageTypeClass.TRANSFORMED,
)
if self.config.upstream_lineage_in_report:
current_lineage_map: Set = self.report.upstream_lineage.get(
str(bq_table), set()
)
current_lineage_map.add(str(upstream_table))
self.report.upstream_lineage[str(bq_table)] = current_lineage_map
upstream_list.append(upstream_table_class)
if upstream_list:
upstream_lineage = UpstreamLineageClass(upstreams=upstream_list)
mcp = MetadataChangeProposalWrapper(
entityType="dataset",
changeType=ChangeTypeClass.UPSERT,
entityUrn=dataset_urn,
aspectName="upstreamLineage",
aspect=upstream_lineage,
)
return mcp
return None
def prepare_profiler_args(
self,
schema: str,
table: str,
partition: Optional[str],
custom_sql: Optional[str] = None,
) -> dict:
return dict(
schema=self.config.project_id,
table=f"{schema}.{table}",
partition=partition,
custom_sql=custom_sql,
)
@staticmethod
@functools.lru_cache()
def _get_project_id(inspector: Inspector) -> str:
with inspector.bind.connect() as connection:
project_id = connection.connection._client.project
return project_id
def normalise_dataset_name(self, dataset_name: str) -> str:
(project_id, schema, table) = dataset_name.split(".")
trimmed_table_name = (
BigQueryTableRef.from_spec_obj(
{"projectId": project_id, "datasetId": schema, "tableId": table}
)
.remove_extras()
.table
)
return f"{project_id}.{schema}.{trimmed_table_name}"
def get_identifier(
self,
*,
schema: str,
entity: str,
inspector: Inspector,
**kwargs: Any,
) -> str:
assert inspector
project_id = self._get_project_id(inspector)
table_name = BigQueryTableRef.from_spec_obj(
{"projectId": project_id, "datasetId": schema, "tableId": entity}
).table
return f"{project_id}.{schema}.{table_name}"
def standardize_schema_table_names(
self, schema: str, entity: str
) -> Tuple[str, str]:
# The get_table_names() method of the BigQuery driver returns table names
# formatted as "<schema>.<table>" as the table name. Since later calls
# pass both schema and table, schema essentially is passed in twice. As
# such, one of the schema names is incorrectly interpreted as the
# project ID. By removing the schema from the table name, we avoid this
# issue.
segments = entity.split(".")
if len(segments) != 2:
raise ValueError(f"expected table to contain schema name already {entity}")
if segments[0] != schema:
raise ValueError(f"schema {schema} does not match table {entity}")
return segments[0], segments[1]
def gen_schema_key(self, db_name: str, schema: str) -> PlatformKey:
return BigQueryDatasetKey(
project_id=db_name,
dataset_id=schema,
platform=self.platform,
instance=self.config.platform_instance
if self.config.platform_instance is not None
else self.config.env,
)
def gen_database_key(self, database: str) -> PlatformKey:
return ProjectIdKey(
project_id=database,
platform=self.platform,
instance=self.config.platform_instance
if self.config.platform_instance is not None
else self.config.env,
)
def gen_database_containers(self, database: str) -> Iterable[MetadataWorkUnit]:
domain_urn = self._gen_domain_urn(database)
database_container_key = self.gen_database_key(database)
container_workunits = gen_containers(
container_key=database_container_key,
name=database,
sub_types=["Project"],
domain_urn=domain_urn,
)
for wu in container_workunits:
self.report.report_workunit(wu)
yield wu
def gen_schema_containers(
self, schema: str, db_name: str
) -> Iterable[MetadataWorkUnit]:
schema_container_key = self.gen_schema_key(db_name, schema)
database_container_key = self.gen_database_key(database=db_name)
container_workunits = gen_containers(
schema_container_key,
schema,
["Dataset"],
database_container_key,
)
for wu in container_workunits:
self.report.report_workunit(wu)
yield wu
| StarcoderdataPython |
74437 | <reponame>Zylphrex/friendly-octo-meme
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-22 00:23
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('faver_app', '0003_contract'),
]
operations = [
migrations.RemoveField(
model_name='faveruser',
name='password',
),
]
| StarcoderdataPython |
1786381 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AutoCastVariable."""
import os
import threading
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from keras.mixed_precision import autocast_variable
from keras.optimizers.optimizer_v2 import adadelta
from keras.optimizers.optimizer_v2 import adagrad
from keras.optimizers.optimizer_v2 import adam
from keras.optimizers.optimizer_v2 import adamax
from keras.optimizers.optimizer_v2 import ftrl
from keras.optimizers.optimizer_v2 import (
gradient_descent as gradient_descent_v2,
)
from keras.optimizers.optimizer_v2 import nadam
from keras.optimizers.optimizer_v2 import rmsprop
maybe_distribute = tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.default_strategy,
tf.__internal__.distribute.combinations.mirrored_strategy_with_cpu_1_and_2, # noqa: E501
]
)
def get_var(val, dtype, name=None):
return tf.Variable(val, dtype=dtype, name=name)
def set_cpu_logical_devices_to_at_least(num):
"""Create cpu logical devices of at least a given number."""
physical_devices = tf.config.list_physical_devices("CPU")
if not physical_devices:
raise RuntimeError("No CPU found")
if len(physical_devices) >= num:
return
# By default each physical device corresponds to one logical device. We
# create multiple logical devices for the last physical device so that we
# have `num` logical devices.
num = num - len(physical_devices) + 1
logical_devices = []
for _ in range(num):
logical_devices.append(tf.config.LogicalDeviceConfiguration())
# Create logical devices from the last device since sometimes the first GPU
# is the primary graphic card and may have less memory available.
tf.config.set_logical_device_configuration(
physical_devices[-1], logical_devices
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(mode=["graph", "eager"])
)
class AutoCastVariableTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
set_cpu_logical_devices_to_at_least(3)
super().setUp()
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_read(self, distribution):
with distribution.scope():
x = get_var(1.0, tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
# outside of auto cast scope.
self.assertEqual(x.dtype, tf.float32)
self.assertEqual(x.value().dtype, tf.float32)
self.assertEqual(x.read_value().dtype, tf.float32)
self.assertEqual(tf.identity(x).dtype, tf.float32)
# within auto cast scope of different dtype
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertEqual(x.dtype, tf.float32)
self.assertEqual(x.value().dtype, tf.float16)
self.assertEqual(x.read_value().dtype, tf.float16)
self.assertEqual(tf.identity(x).dtype, tf.float16)
# within auto cast scope of same dtype
with autocast_variable.enable_auto_cast_variables(tf.float32):
self.assertEqual(x.dtype, tf.float32)
self.assertEqual(x.value().dtype, tf.float32)
self.assertEqual(x.read_value().dtype, tf.float32)
self.assertEqual(tf.identity(x).dtype, tf.float32)
def test_sparse_reads(self):
x = get_var([1.0, 2], tf.float32)
# DistributedVariables do not support sparse_read or gather_nd, so we
# pass distribute=False
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
self.assertEqual(x.sparse_read([0]).dtype, tf.float32)
self.assertEqual(x.gather_nd([0]).dtype, tf.float32)
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertEqual(x.sparse_read([0]).dtype, tf.float16)
self.assertEqual(x.gather_nd([0]).dtype, tf.float16)
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_read_nested_scopes(self, distribution):
with distribution.scope():
x = get_var(1.0, tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertEqual(x.read_value().dtype, tf.float16)
with autocast_variable.enable_auto_cast_variables(tf.float32):
self.assertEqual(x.read_value().dtype, tf.float32)
self.assertEqual(x.read_value().dtype, tf.float16)
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_dtype_is_not_string(self, distribution):
with distribution.scope():
x = get_var(1.0, tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.assertEqual(x.dtype, tf.float32)
self.assertIsInstance(x.dtype, tf.DType)
self.assertEqual(x.true_dtype, tf.float32)
self.assertIsInstance(x.true_dtype, tf.DType)
dtype = tf.float16
with autocast_variable.enable_auto_cast_variables(dtype):
self.assertEqual(x.dtype, tf.float32)
self.assertIsInstance(x.dtype, tf.DType)
self.assertEqual(x.true_dtype, tf.float32)
self.assertIsInstance(x.true_dtype, tf.DType)
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_method_delegations(self, distribution):
# Test AutoCastVariable correctly delegates Variable methods to the
# underlying variable.
with self.test_session(), distribution.scope():
for read_dtype in (tf.float32, tf.float16):
if tf.distribute.has_strategy() and not tf.executing_eagerly():
# MirroredVariable.assign will (incorrectly) return a
# Mirrored value instead of a MirroredVariable in graph
# mode. So we cannot properly wrap it in an
# AutoCastVariable.
evaluate = self.evaluate
else:
def evaluate(var):
self.assertIsInstance(
var, autocast_variable.AutoCastVariable
)
self.assertEqual(tf.identity(var).dtype, read_dtype)
return self.evaluate(var)
x = get_var(7.0, tf.float32)
x = autocast_variable.create_autocast_variable(x)
with autocast_variable.enable_auto_cast_variables(read_dtype):
self.evaluate(x.initializer)
self.assertEqual(self.evaluate(x.value()), 7)
self.assertEqual(self.evaluate(x.read_value()), 7)
self.assertTrue(x.trainable)
self.assertEqual(
x.synchronization, x._variable.synchronization
)
self.assertEqual(x.aggregation, x._variable.aggregation)
self.assertEqual(self.evaluate(x.initialized_value()), 7)
if not tf.executing_eagerly():
if not tf.distribute.has_strategy():
# These functions are not supported for
# DistributedVariables
x.load(9)
self.assertEqual(x.eval(), 9)
self.assertEqual(self.evaluate(x.initial_value), 7)
self.assertEqual(x.op, x._variable.op)
self.assertEqual(x.graph, x._variable.graph)
if not tf.distribute.has_strategy():
# These attributes are not supported for
# DistributedVariables
self.assertIsNone(x.constraint)
self.assertEqual(x.initializer, x._variable.initializer)
self.assertEqual(evaluate(x.assign(8)), 8)
self.assertEqual(evaluate(x.assign_add(2)), 10)
self.assertEqual(evaluate(x.assign_sub(3)), 7)
self.assertEqual(x.name, x._variable.name)
self.assertEqual(x.device, x._variable.device)
self.assertEqual(x.shape, ())
self.assertEqual(x.get_shape(), ())
if not tf.distribute.has_strategy():
# Test scatter_* methods. These are not supported for
# DistributedVariables
x = get_var([7, 8], tf.float32)
x = autocast_variable.create_autocast_variable(x)
with autocast_variable.enable_auto_cast_variables(
read_dtype
):
self.evaluate(x.initializer)
self.assertAllEqual(self.evaluate(x.value()), [7, 8])
def slices(val, index):
return tf.IndexedSlices(
values=tf.constant(val, dtype=tf.float32),
indices=tf.constant(index, dtype=tf.int32),
dense_shape=tf.constant([2], dtype=tf.int32),
)
self.assertAllEqual(
evaluate(x.scatter_sub(slices(1.0, 0))), [6, 8]
)
self.assertAllEqual(
evaluate(x.scatter_add(slices(1.0, 0))), [7, 8]
)
self.assertAllEqual(
evaluate(x.scatter_max(slices(9.0, 1))), [7, 9]
)
self.assertAllEqual(
evaluate(x.scatter_min(slices(8.0, 1))), [7, 8]
)
self.assertAllEqual(
evaluate(x.scatter_mul(slices(2.0, 1))), [7, 16]
)
self.assertAllEqual(
evaluate(x.scatter_div(slices(2.0, 1))), [7, 8]
)
self.assertAllEqual(
evaluate(x.scatter_update(slices(4.0, 1))), [7, 4]
)
self.assertAllEqual(
evaluate(x.scatter_nd_sub([[0], [1]], [1.0, 2.0])),
[6, 2],
)
self.assertAllEqual(
evaluate(x.scatter_nd_add([[0], [1]], [1.0, 2.0])),
[7, 4],
)
self.assertAllEqual(
evaluate(
x.scatter_nd_update([[0], [1]], [1.0, 2.0])
),
[1, 2],
)
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_operator_overloads(self, distribution):
with distribution.scope():
for read_dtype in (tf.float32, tf.float16):
x = get_var(7.0, tf.float32)
x = autocast_variable.create_autocast_variable(x)
with autocast_variable.enable_auto_cast_variables(read_dtype):
self.evaluate(x.initializer)
self.assertAlmostEqual(8, self.evaluate(x + 1))
self.assertAlmostEqual(10, self.evaluate(3 + x))
self.assertAlmostEqual(14, self.evaluate(x + x))
self.assertAlmostEqual(5, self.evaluate(x - 2))
self.assertAlmostEqual(6, self.evaluate(13 - x))
self.assertAlmostEqual(0, self.evaluate(x - x))
self.assertAlmostEqual(14, self.evaluate(x * 2))
self.assertAlmostEqual(21, self.evaluate(3 * x))
self.assertAlmostEqual(49, self.evaluate(x * x))
self.assertAlmostEqual(3.5, self.evaluate(x / 2))
self.assertAlmostEqual(1.5, self.evaluate(10.5 / x))
self.assertAlmostEqual(3, self.evaluate(x // 2))
self.assertAlmostEqual(2, self.evaluate(15 // x))
if read_dtype == tf.float32:
# The "mod" operator does not support float16
self.assertAlmostEqual(1, self.evaluate(x % 2))
self.assertAlmostEqual(2, self.evaluate(16 % x))
self.assertTrue(self.evaluate(x < 12))
self.assertTrue(self.evaluate(x <= 12))
self.assertFalse(self.evaluate(x > 12))
self.assertFalse(self.evaluate(x >= 12))
self.assertFalse(self.evaluate(12 < x))
self.assertFalse(self.evaluate(12 <= x))
self.assertTrue(self.evaluate(12 > x))
self.assertTrue(self.evaluate(12 >= x))
self.assertAlmostEqual(
343, self.evaluate(pow(x, 3)), places=4
)
self.assertAlmostEqual(
128, self.evaluate(pow(2, x)), places=4
)
self.assertAlmostEqual(-7, self.evaluate(-x))
self.assertAlmostEqual(7, self.evaluate(abs(x)))
x = get_var([7, 8, 9], tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
self.assertEqual(self.evaluate(x[1]), 8)
if tf.__internal__.tf2.enabled() and tf.executing_eagerly():
self.assertAllEqual(
x == [7.0, 8.0, 10.0], [True, True, False]
)
self.assertAllEqual(
x != [7.0, 8.0, 10.0], [False, False, True]
)
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_assign(self, distribution):
with distribution.scope():
x = get_var(0.0, tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
# outside of auto cast scope.
v1 = tf.constant(3.0, dtype=tf.float32)
v2 = tf.constant(3.0, dtype=tf.float16)
def run_and_check():
# Assign float32 values
self.assertAllClose(3.0, self.evaluate(x.assign(v1)))
self.assertAllClose(3.0 * 2, self.evaluate(x.assign_add(v1)))
self.assertAllClose(3.0, self.evaluate(x.assign_sub(v1)))
# Attempt to assign float16 values
with self.assertRaisesRegex(
ValueError,
"conversion requested dtype float32 for Tensor with dtype "
"float16",
):
self.evaluate(x.assign(v2))
with self.assertRaisesRegex(
ValueError,
"conversion requested dtype float32 for Tensor with dtype "
"float16",
):
self.evaluate(x.assign_add(v2))
with self.assertRaisesRegex(
ValueError,
"conversion requested dtype float32 for Tensor with dtype "
"float16",
):
self.evaluate(x.assign_sub(v2))
# Assign Python floats
self.assertAllClose(0.0, self.evaluate(x.assign(0.0)))
self.assertAllClose(3.0, self.evaluate(x.assign(3.0)))
self.assertAllClose(3.0 * 2, self.evaluate(x.assign_add(3.0)))
self.assertAllClose(3.0, self.evaluate(x.assign_sub(3.0)))
# Assign multiple times
# This currently doesn't work in graph mode if a strategy is
# used
if not tf.distribute.has_strategy() or tf.executing_eagerly():
assign = x.assign(1.0)
self.assertAllClose(1.0, self.evaluate(assign))
self.assertAllClose(0.0, self.evaluate(assign.assign(0.0)))
assign_add = x.assign_add(3.0)
self.assertAllClose(3.0, self.evaluate(assign_add))
self.assertAllClose(
3.0 * 3,
self.evaluate(x.assign_add(3.0).assign_add(3.0)),
)
self.assertAllClose(3.0 * 3, x)
assign_sub = x.assign_sub(3.0)
self.assertAllClose(3.0 * 2, self.evaluate(assign_sub))
self.assertAllClose(
0.0, self.evaluate(x.assign_sub(3.0).assign_sub(3.0))
)
# Assign with read_value=False
self.assertIsNone(
self.evaluate(x.assign(1.0, read_value=False))
)
self.assertAllClose(1.0, self.evaluate(x))
self.assertIsNone(
self.evaluate(x.assign_add(2.0, read_value=False))
)
self.assertAllClose(3.0, self.evaluate(x))
self.assertIsNone(
self.evaluate(x.assign_sub(3.0, read_value=False))
)
self.assertAllClose(0.0, self.evaluate(x))
# Use the tf.assign functions instead of the var.assign methods.
self.assertAllClose(
0.0, self.evaluate(tf.compat.v1.assign(x, 0.0))
)
self.assertAllClose(
3.0, self.evaluate(tf.compat.v1.assign(x, 3.0))
)
self.assertAllClose(
3.0 * 2, self.evaluate(tf.compat.v1.assign_add(x, 3.0))
)
self.assertAllClose(
3.0, self.evaluate(tf.compat.v1.assign_sub(x, 3.0))
)
run_and_check()
# reset x
self.evaluate(x.assign(0.0))
# within auto cast scope.
with autocast_variable.enable_auto_cast_variables(tf.float16):
# assign still expect float32 value even if in float16 scope
run_and_check()
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_assign_tf_function(self, distribution):
if not tf.executing_eagerly():
self.skipTest("Test is not compatible with graph mode")
with distribution.scope():
x = get_var(0.0, tf.float32)
x = autocast_variable.create_autocast_variable(x)
@tf.function
def run_assign():
return (
x.assign(1.0)
.assign_add(3.0)
.assign_add(3.0)
.assign_sub(2.0)
)
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertAllClose(5.0, self.evaluate(run_assign()))
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_op_attribute(self, distribution):
with distribution.scope():
x = get_var(0.0, tf.float32)
x = autocast_variable.create_autocast_variable(x)
# Variable.op raises an AttributeError in Eager mode and is an op in
# graph mode. Variable.assign(...).op is None in Eager mode and an
# op in Graph mode or a tf.function. We test this is also true of
# AutoCastVariable.
if tf.executing_eagerly():
with self.assertRaises(AttributeError):
x.op
self.assertIsNone(x.assign(1.0).op)
self.assertIsNone(x.assign_add(1.0).op)
self.assertIsNone(x.assign_sub(1.0).op)
else:
self.assertIsNotNone(x.op)
self.assertIsNotNone(x.assign(1.0).op)
self.assertIsNotNone(x.assign_add(1.0).op)
self.assertIsNotNone(x.assign_sub(1.0).op)
@tf.function
def func():
self.assertIsNotNone(x.assign(1.0).op)
self.assertIsNotNone(x.assign_add(1.0).op)
self.assertIsNotNone(x.assign_sub(1.0).op)
func()
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_tf_function_control_dependencies(self, distribution):
if not tf.executing_eagerly():
self.skipTest("Test is not compatible with graph mode")
with distribution.scope():
x = get_var(0.0, tf.float32)
x = autocast_variable.create_autocast_variable(x)
@tf.function
def func():
update = x.assign_add(1.0)
with tf.control_dependencies([update]):
x.assign_add(1.0)
func()
self.assertAllClose(2.0, self.evaluate(x))
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_assign_stays_in_true_dtype(self, distribution):
with distribution.scope():
x = get_var(1.0, tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
# small_val is a value such that 1.0 + small_val == 1.0 in fp16, but
# not in fp32
small_val = np.finfo("float16").eps / 2
small_tensor = tf.constant(small_val, dtype=tf.float32)
with autocast_variable.enable_auto_cast_variables(tf.float16):
# Variable should be increased, despite it appearing to be the
# same float16 value.
self.evaluate(x.assign(1.0 + small_tensor))
self.assertEqual(1.0, self.evaluate(x.value()))
self.assertEqual(1.0 + small_val, self.evaluate(x))
self.evaluate(x.assign(1.0))
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.evaluate(x.assign_add(small_tensor))
self.assertEqual(1.0, self.evaluate(x.value()))
self.assertEqual(1.0 + small_val, self.evaluate(x))
def test_thread_local_autocast_dtype(self):
x = get_var(1.0, tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertEqual(tf.identity(x).dtype, tf.float16)
# New threads should not see the modified value of the autocast
# dtype.
var_dtype = None
def f():
nonlocal var_dtype
var_dtype = x._cast_dtype
thread = threading.Thread(target=f)
thread.start()
thread.join()
self.assertEqual(var_dtype, tf.float32)
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_checkpoint(self, distribution):
with self.test_session():
with distribution.scope():
x = get_var(1.0, tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
self.evaluate(x.assign(123.0))
checkpoint = tf.train.Checkpoint(x=x)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = checkpoint.save(prefix)
self.evaluate(x.assign(234.0))
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.assertEqual(self.evaluate(x), 123.0)
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_invalid_wrapped_variable(self, distribution):
with distribution.scope():
# Wrap a non-variable
with self.assertRaisesRegex(ValueError, "variable must be of type"):
x = tf.constant([1.0], dtype=tf.float32)
autocast_variable.create_autocast_variable(x)
# Wrap a non-floating point variable
with self.assertRaisesRegex(
ValueError, "variable must be a floating point"
):
x = get_var(1, tf.int32)
autocast_variable.create_autocast_variable(x)
def test_repr(self):
# We do not test with DistributionStrategy because we do not want to
# rely on the exact __repr__ output of a DistributedVariable.
x = get_var(1.0, tf.float32, name="x")
x = autocast_variable.create_autocast_variable(x)
if tf.executing_eagerly():
self.assertStartsWith(
repr(x),
"<AutoCastVariable 'x:0' shape=() dtype=float32 "
"dtype_to_cast_to=float32, numpy=",
)
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertStartsWith(
repr(x),
"<AutoCastVariable 'x:0' shape=() dtype=float32 "
"dtype_to_cast_to=float16, numpy=",
)
else:
self.assertEqual(
repr(x),
"<AutoCastVariable 'x:0' shape=() dtype=float32 "
"dtype_to_cast_to=float32>",
)
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertEqual(
repr(x),
"<AutoCastVariable 'x:0' shape=() dtype=float32 "
"dtype_to_cast_to=float16>",
)
def test_repr_distributed(self):
strategy = tf.distribute.MirroredStrategy(["/cpu:1", "/cpu:2"])
with strategy.scope():
x = get_var(1.0, tf.float32)
x = autocast_variable.create_autocast_variable(x)
use_policy = getattr(strategy.extended, "_use_var_policy", False)
if use_policy:
self.assertRegex(
repr(x).replace("\n", " "),
"<AutoCastDistributedVariable dtype=float32 "
"dtype_to_cast_to=float32 "
"inner_variable=DistributedVariable.*>",
)
else:
self.assertRegex(
repr(x).replace("\n", " "),
"<AutoCastDistributedVariable dtype=float32 "
"dtype_to_cast_to=float32 "
"inner_variable=MirroredVariable.*>",
)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
optimizer_class=[
adadelta.Adadelta,
adagrad.Adagrad,
adam.Adam,
adamax.Adamax,
ftrl.Ftrl,
gradient_descent_v2.SGD,
nadam.Nadam,
rmsprop.RMSprop,
tf.compat.v1.train.GradientDescentOptimizer,
],
use_tf_function=[False, True],
)
)
def test_optimizer(self, optimizer_class, use_tf_function):
if use_tf_function and not tf.executing_eagerly():
self.skipTest("Test does not support graph mode with tf.function")
x = get_var(1.0, tf.float32)
x = autocast_variable.create_autocast_variable(x)
y = get_var(1.0, tf.float32)
opt = optimizer_class(learning_rate=1.0)
def f():
# Minimize both the AutoCastVariable and the normal tf.Variable.
# Both variables should be updated to the same value.
op = opt.minimize(lambda: x + y, var_list=[x, y])
return (
None
if tf.compat.v1.executing_eagerly_outside_functions()
else op
)
if use_tf_function:
f = tf.function(f)
if tf.executing_eagerly():
f()
else:
op = f()
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(op)
# Assert the AutoCastVariable has changed from its initial value
self.assertNotEqual(self.evaluate(x), 1.0)
# Assert AutoCastVariable is updated correctly by comparing it to the
# normal variable
self.assertAlmostEqual(self.evaluate(x), self.evaluate(y))
if optimizer_class in (
gradient_descent_v2.SGD,
tf.compat.v1.train.GradientDescentOptimizer,
):
# With SGD, the variables decreases by exactly 1
self.assertEqual(self.evaluate(x), 0)
if __name__ == "__main__":
tf.test.main()
| StarcoderdataPython |
148885 | <gh_stars>1-10
import datetime
import os
import shutil
import time
import tarfile
import yaml
from uuid import uuid1
import pytest
import requests
def pytest_addoption(parser):
parser.addoption('--host', help='Tator host', default='https://adamant.duckdns.org')
parser.addoption('--token', help='API token', default='')
parser.addoption('--bucket', help='Optional path to yaml file containing bucket spec. If '
'given, the project will use this bucket.')
parser.addoption('--keep', help='Do not delete project when done', action='store_true')
def pytest_generate_tests(metafunc):
if 'host' in metafunc.fixturenames:
metafunc.parametrize('host', [metafunc.config.getoption('host')])
if 'token' in metafunc.fixturenames:
metafunc.parametrize('token', [metafunc.config.getoption('token')])
def make_attribute_types():
return [
dict(
name='test_bool',
dtype='bool',
default=True,
),
dict(
name='test_int',
dtype='int',
default=0,
minimum=-1000,
maximum=1000,
),
dict(
name='test_float',
dtype='float',
default=0.0,
minimum=-1000.0,
maximum=1000.0,
),
dict(
name='test_enum',
dtype='enum',
choices=['a', 'b', 'c'],
default='a',
),
dict(
name='test_string',
dtype='string',
default='asdf',
),
dict(
name='test_datetime',
dtype='datetime',
use_current=True,
),
dict(
name='test_geopos',
dtype='geopos',
default=[-179.0, -89.0],
),
dict(
name='test_float_array',
dtype='float_array',
default=[0.0, 0.0, 0.0],
size=3,
)
]
@pytest.fixture(scope='session')
def organization(request):
""" Organization ID for a created organization. """
import tator
host = request.config.option.host
token = request.config.option.token
keep = request.config.option.keep
tator_api = tator.get_api(host, token)
current_dt = datetime.datetime.now()
dt_str = current_dt.strftime('%Y_%m_%d__%H_%M_%S')
response = tator_api.create_organization(organization_spec={
'name': f'test_organization_{dt_str}',
})
organization_id = response.id
yield organization_id
if not keep:
status = tator_api.delete_organization(organization_id)
@pytest.fixture(scope='session')
def project(request, organization):
""" Project ID for a created project. """
import tator
host = request.config.option.host
token = request.config.option.token
bucket = request.config.option.bucket
keep = request.config.option.keep
tator_api = tator.get_api(host, token)
current_dt = datetime.datetime.now()
dt_str = current_dt.strftime('%Y_%m_%d__%H_%M_%S')
project_spec = {
'name': f'test_project_{dt_str}',
'summary': f'Test project created by tator-py unit tests on {current_dt}',
'organization': organization,
}
# Create bucket object if bucket spec is given.
if bucket is not None:
with open(bucket, 'r') as f:
bucket_spec = yaml.safe_load(f)
response = tator_api.create_bucket(organization, bucket_spec=bucket_spec)
project_spec['bucket'] = response.id
response = tator_api.create_project(project_spec=project_spec)
project_id = response.id
yield project_id
if not keep:
status = tator_api.delete_project(project_id)
@pytest.fixture(scope='session')
def algo_project(request, organization):
""" Project ID for a created project. """
import tator
host = request.config.option.host
token = request.config.option.token
keep = request.config.option.keep
tator_api = tator.get_api(host, token)
current_dt = datetime.datetime.now()
dt_str = current_dt.strftime('%Y_%m_%d__%H_%M_%S')
response = tator_api.create_project(project_spec={
'name': f'algo_test_project_{dt_str}',
'summary': f'Algo test project created by tator-py unit tests on {current_dt}',
'organization': organization,
})
project_id = response.id
yield project_id
if not keep:
status = tator_api.delete_project(project_id)
@pytest.fixture(scope='session')
def image_type(request, project):
import tator
host = request.config.option.host
token = request.config.option.token
tator_api = tator.get_api(host, token)
response = tator_api.create_media_type(project, media_type_spec={
'name': 'image_type',
'description': 'Test image type',
'project': project,
'dtype': 'image',
'attribute_types': make_attribute_types(),
})
image_type_id = response.id
yield image_type_id
@pytest.fixture(scope='session')
def image_file(request):
out_path = '/tmp/test1.jpg'
if not os.path.exists(out_path):
url = 'https://www.gstatic.com/webp/gallery/1.jpg'
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(out_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
yield out_path
@pytest.fixture(scope='session')
def image(request, project, image_type, image_file):
import tator
host = request.config.option.host
token = request.config.option.token
tator_api = tator.get_api(host, token)
for progress, response in tator.util.upload_media(tator_api, image_type, image_file):
print(f"Upload image progress: {progress}%")
print(response.message)
num_retries = 0
while True:
medias = tator_api.get_media_list(project, name='test1.jpg')
if len(medias) > 0:
image_id = medias[0].id
break
num_retries += 1
max_retries = 30
assert num_retries < max_retries
time.sleep(0.5)
yield image_id
@pytest.fixture(scope='session')
def image_set(request):
out_path = '/tmp/lfw.tgz'
extract_path = '/tmp/lfw'
# Download Labeled Faces in the Wild dataset.
if not os.path.exists(out_path):
url = 'http://vis-www.cs.umass.edu/lfw/lfw.tgz'
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(out_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
# Extract the images.
if not os.path.exists(extract_path):
os.makedirs(extract_path, exist_ok=True)
tar = tarfile.open(out_path)
for item in tar:
tar.extract(item, extract_path)
image_path = os.path.join(extract_path, 'lfw')
yield image_path
shutil.rmtree(extract_path)
@pytest.fixture(scope='session')
def video_type(request, project):
import tator
host = request.config.option.host
token = request.config.option.token
tator_api = tator.get_api(host, token)
response = tator_api.create_media_type(project, media_type_spec={
'name': 'video_type',
'description': 'Test video type',
'project': project,
'dtype': 'video',
'attribute_types': make_attribute_types(),
})
video_type_id = response.id
yield video_type_id
@pytest.fixture(scope='session')
def multi_type(request, project):
import tator
host = request.config.option.host
token = request.config.option.token
tator_api = tator.get_api(host, token)
response = tator_api.create_media_type(project, media_type_spec={
'name': 'multi_type',
'description': 'Test multi type',
'project': project,
'dtype': 'multi',
'attribute_types': make_attribute_types(),
})
multi_type_id = response.id
yield multi_type_id
@pytest.fixture(scope='session')
def video_file(request):
out_path = '/tmp/AudioVideoSyncTest_BallastMedia.mp4'
if not os.path.exists(out_path):
url = 'http://www.ballastmedia.com/wp-content/uploads/AudioVideoSyncTest_BallastMedia.mp4'
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(out_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
yield out_path
@pytest.fixture(scope='session')
def video(request, project, video_type, video_file):
import tator
host = request.config.option.host
token = request.config.option.token
tator_api = tator.get_api(host, token)
attributes = {"test_string": str(uuid1())}
for progress, response in tator.util.upload_media(tator_api, video_type, video_file, attributes=attributes):
print(f"Upload video progress: {progress}%")
print(response.message)
while True:
response = tator_api.get_media_list(project, name='AudioVideoSyncTest_BallastMedia.mp4')
print("Waiting for transcode...")
time.sleep(2.5)
if len(response) == 0:
continue
if response[0].media_files is None:
continue
streaming = response[0].media_files.streaming
have_archival = response[0].media_files.archival is not None
if streaming and have_archival and len(streaming) == 4:
video_id = response[0].id
break
# Check for proper attribute setting via upload_file
assert response[0].attributes.get("test_string") == attributes.get("test_string")
# If all is kosher return the video_id
yield video_id
@pytest.fixture(scope='function')
def video_temp(request, project, video_type, video_file):
import tator
host = request.config.option.host
token = request.config.option.token
tator_api = tator.get_api(host, token)
for progress, response in tator.util.upload_media(tator_api, video_type, video_file):
print(f"Upload video progress: {progress}%")
print(response.message)
while True:
response = tator_api.get_media_list(project, name='AudioVideoSyncTest_BallastMedia.mp4')
print("Waiting for transcode...")
time.sleep(2.5)
if len(response) == 0:
continue
if response[0].media_files is None:
continue
have_streaming = response[0].media_files.streaming is not None
have_archival = response[0].media_files.archival is not None
if have_streaming and have_archival:
video_id = response[0].id
break
yield video_id
@pytest.fixture(scope='session')
def multi(request, project, multi_type, video):
import tator
host = request.config.option.host
token = request.config.option.token
tator_api = tator.get_api(host, token)
response = tator.util.make_multi_stream(tator_api, multi_type, [1, 1],
'Test multi', [video], 'Multi Videos')
multi_id = response.id
yield multi_id
@pytest.fixture(scope='session')
def dot_type(request, project, video_type, image_type):
import tator
host = request.config.option.host
token = request.config.option.token
tator_api = tator.get_api(host, token)
response = tator_api.create_localization_type(project, localization_type_spec={
'name': 'dot_type',
'description': 'Test dot type',
'project': project,
'media_types': [video_type, image_type],
'dtype': 'dot',
'attribute_types': make_attribute_types(),
})
dot_type_id = response.id
yield dot_type_id
@pytest.fixture(scope='session')
def line_type(request, project, video_type, image_type):
import tator
host = request.config.option.host
token = request.config.option.token
tator_api = tator.get_api(host, token)
response = tator_api.create_localization_type(project, localization_type_spec={
'name': 'line_type',
'description': 'Test line type',
'project': project,
'media_types': [video_type, image_type],
'dtype': 'line',
'attribute_types': make_attribute_types(),
})
line_type_id = response.id
yield line_type_id
@pytest.fixture(scope='session')
def box_type(request, project, video_type, image_type):
import tator
host = request.config.option.host
token = request.config.option.token
tator_api = tator.get_api(host, token)
response = tator_api.create_localization_type(project, localization_type_spec={
'name': 'box_type',
'description': 'Test box type',
'project': project,
'media_types': [video_type, image_type],
'dtype': 'box',
'attribute_types': make_attribute_types(),
})
box_type_id = response.id
yield box_type_id
@pytest.fixture(scope='session')
def poly_type(request, project, video_type, image_type):
import tator
host = request.config.option.host
token = request.config.option.token
tator_api = tator.get_api(host, token)
response = tator_api.create_localization_type(project, localization_type_spec={
'name': 'poly_type',
'description': 'Test poly type',
'project': project,
'media_types': [video_type, image_type],
'dtype': 'poly',
'attribute_types': make_attribute_types(),
})
poly_type_id = response.id
yield poly_type_id
@pytest.fixture(scope='session')
def state_type(request, project, video_type):
import tator
host = request.config.option.host
token = request.config.option.token
tator_api = tator.get_api(host, token)
response = tator_api.create_state_type(project, state_type_spec={
'name': 'state_type',
'description': 'Test state type',
'project': project,
'media_types': [video_type],
'association': 'Frame',
'attribute_types': make_attribute_types(),
})
state_type_id = response.id
yield state_type_id
@pytest.fixture(scope='session')
def track_type(request, project, video_type):
import tator
host = request.config.option.host
token = request.config.option.token
tator_api = tator.get_api(host, token)
response = tator_api.create_state_type(project, state_type_spec={
'name': 'track_type',
'description': 'Test track type',
'project': project,
'media_types': [video_type],
'association': 'Localization',
'attribute_types': make_attribute_types(),
})
state_type_id = response.id
yield state_type_id
@pytest.fixture(scope='session')
def collection_type(request, project, video_type):
import tator
host = request.config.option.host
token = request.config.option.token
tator_api = tator.get_api(host, token)
response = tator_api.create_state_type(project, state_type_spec={
'name': 'collection_type',
'description': 'Test collection type',
'project': project,
'media_types': [video_type],
'association': 'Media',
'attribute_types': make_attribute_types(),
})
state_type_id = response.id
yield state_type_id
@pytest.fixture(scope='session')
def leaf_type(request, project):
import tator
host = request.config.option.host
token = request.config.option.token
tator_api = tator.get_api(host, token)
response = tator_api.create_leaf_type(project, leaf_type_spec={
'name': 'leaf_type',
'description': 'Test leaf type',
'attribute_types': make_attribute_types(),
})
leaf_type_id = response.id
yield leaf_type_id
@pytest.fixture(scope='session')
def clone_project(request, organization):
""" Project ID for a created project. """
import tator
host = request.config.option.host
token = request.config.option.token
keep = request.config.option.keep
tator_api = tator.get_api(host, token)
current_dt = datetime.datetime.now()
dt_str = current_dt.strftime('%Y_%m_%d__%H_%M_%S')
response = tator_api.create_project(project_spec={
'name': f'test_clone_project_{dt_str}',
'summary': f'Test clone project created by tator-py unit tests on {current_dt}',
'organization': organization,
})
project_id = response.id
yield project_id
if not keep:
status = tator_api.delete_project(project_id)
@pytest.fixture(scope='session')
def clone_leaf_type(request, clone_project):
import tator
host = request.config.option.host
token = request.config.option.token
tator_api = tator.get_api(host, token)
response = tator_api.create_leaf_type(clone_project, leaf_type_spec={
'name': 'leaf_type',
'description': 'Test leaf type',
'attribute_types': make_attribute_types(),
})
leaf_type_id = response.id
yield leaf_type_id
# Video fixtures for attribute tests
@pytest.fixture(scope='session')
def attribute_video_type(request, project):
import tator
host = request.config.option.host
token = request.config.option.token
tator_api = tator.get_api(host, token)
response = tator_api.create_media_type(project, media_type_spec={
'name': 'attribute_video_type',
'description': 'Test video type',
'project': project,
'dtype': 'video',
'attribute_types': make_attribute_types(),
})
video_type_id = response.id
yield video_type_id
@pytest.fixture(scope="session")
def attribute_video_file(request):
out_path = f"/tmp/AudioVideoSyncTest_BallastMedia_attribute.mp4"
if not os.path.exists(out_path):
url = "http://www.ballastmedia.com/wp-content/uploads/AudioVideoSyncTest_BallastMedia.mp4"
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(out_path, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
yield out_path
@pytest.fixture(scope="session")
def attribute_video(request, project, attribute_video_type, attribute_video_file):
import tator
host = request.config.option.host
token = request.config.option.token
tator_api = tator.get_api(host, token)
for progress, response in tator.util.upload_media(tator_api, attribute_video_type, attribute_video_file):
print(f"Upload video progress: {progress}%")
print(response.message)
while True:
response = tator_api.get_media_list(
project, name="AudioVideoSyncTest_BallastMedia_attribute.mp4"
)
print("Waiting for transcode...")
time.sleep(2.5)
if len(response) == 0:
continue
if response[0].media_files is None:
continue
have_streaming = response[0].media_files.streaming is not None
have_archival = response[0].media_files.archival is not None
if have_streaming and have_archival:
video_id = response[0].id
break
yield video_id
@pytest.fixture(scope='session')
def attribute_image_type(request, project):
import tator
host = request.config.option.host
token = request.config.option.token
tator_api = tator.get_api(host, token)
response = tator_api.create_media_type(project, media_type_spec={
'name': 'attribute_image_type',
'description': 'Test image type',
'project': project,
'dtype': 'image',
'attribute_types': make_attribute_types(),
})
image_type_id = response.id
yield image_type_id
@pytest.fixture(scope='session')
def attribute_box_type(request, project, attribute_video_type, attribute_image_type):
import tator
host = request.config.option.host
token = request.config.option.token
tator_api = tator.get_api(host, token)
response = tator_api.create_localization_type(project, localization_type_spec={
'name': 'box_type',
'description': 'Test box type',
'project': project,
'media_types': [attribute_video_type, attribute_image_type],
'dtype': 'box',
'attribute_types': make_attribute_types(),
})
box_type_id = response.id
yield box_type_id
| StarcoderdataPython |
3249136 | <reponame>JavierLuna/intcode
from typing import List
import pytest
from intcode.interpreter.state import MachineState, AccessMode
@pytest.fixture
def mocked_program() -> List[int]:
return [1, 2, 3]
@pytest.fixture
def mocked_machine_state(mocked_program) -> MachineState:
return MachineState(mocked_program)
def test_get_registry_value_position(mocked_program, mocked_machine_state):
position = 0
assert mocked_machine_state.get_registry_value(AccessMode.POSITION, position) == mocked_program[
position]
def test_get_registry_value_non_existing_position_zero(mocked_program, mocked_machine_state):
position = len(mocked_program)
assert mocked_machine_state.get_registry_value(AccessMode.POSITION, position) == 0
def test_get_registry_value_immediate(mocked_program, mocked_machine_state):
position = 0
assert mocked_machine_state.get_registry_value(AccessMode.IMMEDIATE, position) == position
def test_get_registry_value_relative():
program = [1, 2, 3]
position = 0
relative_base = 1
machine_state = MachineState(program, relative_base=relative_base)
assert machine_state.get_registry_value(AccessMode.RELATIVE, position) == program[
machine_state.relative_base + position]
@pytest.mark.parametrize("delta", [-1, 0, 1])
def test_shift_relative_base(delta):
initial_relative_base = 0
machine_state = MachineState([], relative_base=initial_relative_base)
machine_state.shift_relative_base(delta)
assert machine_state.relative_base == delta
| StarcoderdataPython |
1627336 | import re
from typing import Dict, Union, Iterable, Any
from ._BaseClasses import DOMNode
POSSIBLE_TAG_CHILD = Union[str, int, float, DOMNode]
def maketag(name: str) -> type:
"""
Creates a new class for a tag with the specified name.
The class can be used like those associated with standard HTML tags :
"""
tags: Dict[str, type]
"""
Dictionary mapping tag names to the matching class.
If new classes are added by function maketag(), they are also added to this dictionary.
"""
def attribute_mapper(attr: str) -> str:
"""
For all the classes defined in the module, this function is called to transform the keyword arguments into HTML tag attributes.
By default, the function replaces underscores (_) by hyphens (-).
"""
return re.sub("^v_(.*)_(.*)$", r"v-\1:\2", attr)
# html element classes
class B(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <b> tag """
class COLGROUP(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <colgroup> tag """
class DEL(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <del> tag """
class DT(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <dt> tag """
class INS(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <ins> tag """
class PICTURE(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <picture> tag """
class VIDEO(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <video> tag """
class ABBR(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <abbr> tag """
class TH(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <th> tag """
class BUTTON(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <button> tag """
class SCRIPT(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <script> tag """
class HEAD(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <head> tag """
class SECTION(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <section> tag """
class VAR(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <var> tag """
class APPLET(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <applet> tag """
class TABLE(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <table> tag """
class KEYGEN(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <keygen> tag """
class RUBY(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <ruby> tag """
class OBJECT(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <object> tag """
class IMG(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <img> tag """
class DIV(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <div> tag """
class ISINDEX(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <isindex> tag """
class TBODY(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <tbody> tag """
class MENU(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <menu> tag """
class DFN(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <dfn> tag """
class FIELDSET(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <fieldset> tag """
class LABEL(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <label> tag """
class COL(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <col> tag """
class TEXTAREA(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <textarea> tag """
class CANVAS(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <canvas> tag """
class FONT(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <font> tag """
class ACRONYM(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <acronym> tag """
class BDI(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <bdi> tag """
class AREA(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <area> tag """
class INPUT(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <input> tag """
class DATALIST(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <datalist> tag """
class CITE(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <cite> tag """
class ASIDE(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <aside> tag """
class U(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <u> tag """
class OL(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <ol> tag """
class CENTER(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <center> tag """
class SUB(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <sub> tag """
class LEGEND(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <legend> tag """
class NOFRAMES(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <noframes> tag """
class Q(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <q> tag """
class BASE(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <base> tag """
class H3(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <h3> tag """
class BR(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <br> tag """
class RP(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <rp> tag """
class BODY(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <body> tag """
class FIGCAPTION(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <figcaption> tag """
class TR(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <tr> tag """
class RTC(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <rtc> tag """
class TT(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <tt> tag """
class BASEFONT(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <basefont> tag """
class SUMMARY(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <summary> tag """
class RB(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <rb> tag """
class NAV(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <nav> tag """
class CODE(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <code> tag """
class STRONG(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <strong> tag """
class KBD(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <kbd> tag """
class HEADER(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <header> tag """
class IFRAME(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <iframe> tag """
class TEMPLATE(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <template> tag """
class TFOOT(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <tfoot> tag """
class WBR(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <wbr> tag """
class DD(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <dd> tag """
class FIGURE(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <figure> tag """
class OPTION(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <option> tag """
class BDO(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <bdo> tag """
class SMALL(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <small> tag """
class PRE(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <pre> tag """
class SAMP(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <samp> tag """
class BIG(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <big> tag """
class FRAME(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <frame> tag """
class FOOTER(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <footer> tag """
class LI(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <li> tag """
class ADDRESS(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <address> tag """
class MAIN(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <main> tag """
class DATA(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <data> tag """
class H4(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <h4> tag """
class BLOCKQUOTE(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <blockquote> tag """
class COMMAND(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <command> tag """
class DIR(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <dir> tag """
class TIME(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <time> tag """
class OPTGROUP(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <optgroup> tag """
class FRAMESET(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <frameset> tag """
class H5(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <h5> tag """
class I(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <i> tag """
class PARAM(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <param> tag """
class TITLE(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <title> tag """
class META(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <meta> tag """
class STYLE(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <style> tag """
class PROGRESS(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <progress> tag """
class MAP(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <map> tag """
class TD(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <td> tag """
class MATH(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <math> tag """
class SVG(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <svg> tag """
class DETAILS(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <details> tag """
class OUTPUT(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <output> tag """
class SELECT(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <select> tag """
class S(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <s> tag """
class H6(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <h6> tag """
class H1(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <h1> tag """
class METER(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <meter> tag """
class P(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <p> tag """
class AUDIO(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <audio> tag """
class HTML(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <html> tag """
class HR(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <hr> tag """
class DIALOG(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <dialog> tag """
class RT(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <rt> tag """
class EM(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <em> tag """
class MENUITEM(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <menuitem> tag """
class NOSCRIPT(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <noscript> tag """
class EMBED(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <embed> tag """
class H2(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <h2> tag """
class A(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <a> tag """
class LINK(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <link> tag """
class DL(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <dl> tag """
class UL(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <ul> tag """
class TRACK(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <track> tag """
class ARTICLE(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <article> tag """
class SPAN(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <span> tag """
class MARK(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <mark> tag """
class FORM(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <form> tag """
class CAPTION(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <caption> tag """
class SUP(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <sup> tag """
class THEAD(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <thead> tag """
class STRIKE(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <strike> tag """
class SOURCE(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <source> tag """
| StarcoderdataPython |
3398449 | import torch
import torch.nn as nn
import torch.nn.functional as F
'''
NST with Polynomial Kernel, where d=2 and c=0
It can be treated as matching the Gram matrix of two vectorized feature map.
'''
class NST(nn.Module):
def __init__(self):
super(NST, self).__init__()
def forward(self, g_s, g_t):
#return [self.nst_loss(f_s, f_t) for f_s, f_t in zip(g_s, g_t)]
return self.nst_loss(g_s, g_t)
def nst_loss(self, f_s, f_t):
s_H, t_H = f_s.shape[2], f_t.shape[2]
s_W, t_W = f_s.shape[3], f_t.shape[3]
if s_H > t_H:
f_s = F.adaptive_avg_pool2d(f_s, (t_H, t_W))
elif s_H < t_H:
f_t = F.adaptive_avg_pool2d(f_t, (s_H, s_W))
else:
pass
f_s = f_s.view(f_s.size(0), f_s.size(1), -1)
f_s = F.normalize(f_s, dim=2)
f_t = f_t.view(f_t.size(0), f_t.size(1), -1)
f_t = F.normalize(f_t, dim=2)
gram_s = self.gram_matrix(f_s)
gram_t = self.gram_matrix(f_t)
loss = F.mse_loss(gram_s, gram_t)
return loss
def gram_matrix(self, fm):
return torch.bmm(fm, fm.transpose(1,2))
| StarcoderdataPython |
1759613 | from src.easy import search_insert_position_35
def test_search_insert_position():
s = search_insert_position_35.Solution()
assert s.search_insert_position([1,3,5,6], 7) == 4
assert s.search_insert_position([1,3,5,6], 5) == 2
assert s.search_insert_position([1,3,5,6], 4) == 2
assert s.search_insert_position([3,5,6,10], 1) == 0
| StarcoderdataPython |
185342 | <gh_stars>0
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class FoodProcessingConfig(AppConfig):
name = 'scieio.food_processing'
verbose_name = _("Food Processing Equipment")
| StarcoderdataPython |
163552 | <reponame>RickyMexx/SAC-tf2
from common.utils import *
class Agent:
def __init__(self, model, replay_buffer, train_env, test_env, replay_start_size,
n_episodes, batch_size, n_actions):
self.model = model
self.replay_buffer = replay_buffer
self.train_env = train_env
self.test_env = test_env
self.replay_start_size = replay_start_size
self.batch_size = batch_size
self.n_episodes = n_episodes
self.n_actions = n_actions
self.batch_size = batch_size
self.n_timesteps = train_env.spec.tags.get("wrapper_config.TimeLimit.max_episode_steps")
self.total_steps = 0
self.total_episodes = 0
def train(self):
check = 1
episode_lengths = [None] * self.n_episodes
episode_rewards = [None] * self.n_episodes
# Parameters for the consecutive actions technique
cons_acts = 4
prob_act = 0.5
# Noise + epsilon parameters
noise = OUNoise(self.n_actions)
epsilon = 1
epsilon_min = 0.1
epsilon_dk = 0.999
for e in range(self.n_episodes):
state = self.train_env.reset().astype(np.float32)
episode_reward = 0
episode_length = 0
for k in range(self.n_timesteps):
action = self.model.action(state)
#### Techniques to force exploration, useful in sparse rewards environments ####
# Using the consecutive steps technique
if check == 1 and np.random.uniform() < prob_act:
# print(self.replay_buffer.n_entries)
for i in range(cons_acts):
self.train_env.step(action)
'''
# Using OUNoise technique + epsilon-greedy
if np.random.uniform() < epsilon:
action = noise.get_action(action, k)
if check==0 and epsilon > epsilon_min:
epsilon = epsilon * epsilon_dk
'''
################################################################################
new_state, reward, done, _ = self.train_env.step(action)
new_state = new_state.astype(np.float32)
episode_length += 1
self.total_steps += 1
episode_reward += reward
self.replay_buffer.add(state, action, reward, new_state, done)
if self.replay_buffer.n_entries > self.replay_start_size:
if check == 1:
print("The buffer is ready, training is starting!")
check = 0
sample = self.replay_buffer.get_batch(self.batch_size)
softq_mean, softq_std, softq_loss, actor_loss, action_logprob_mean = self.model.train(sample,
np.resize(sample["actions"], [self.batch_size, self.n_actions]),
self.batch_size)
# print("Actor loss is", np.array(actor_loss))
# print("Q loss is", np.array(softq_loss))
state = new_state
if done:
episode_lengths[e] = k
episode_rewards[e] = episode_reward
self.total_episodes += 1
print("Episode n.", self.total_episodes, "is end! The reward is:", episode_reward,
", number of steps:", k)
self.model.save()
break
plot_episode_stats(episode_lengths, episode_rewards)
plot_reward(episode_rewards)
def test(self, model_path):
self.model.load(model_path)
while True:
obs, done = self.test_env.reset(), False
while not done:
action = self.model.action(obs.astype(np.float32))
obs, reward, done, info = self.test_env.step(action)
self.test_env.render() | StarcoderdataPython |
163980 | <filename>revs/util.py
class Util:
def __init__(self):
pass
@staticmethod
def compress_uri(uri, base_uri, prefix_map):
uri = uri.strip('<>')
if uri.startswith(base_uri):
return '<' + uri[len(base_uri):] + '>'
for prefix, prefix_uri in prefix_map.items():
if uri.startswith(prefix_uri):
return prefix + ':' + uri[len(prefix_uri):]
return uri
| StarcoderdataPython |
3273991 | <gh_stars>0
from django.apps import AppConfig
class Config(AppConfig):
name = "grandchallenge.cases"
def ready(self):
super().ready()
# noinspection PyUnresolvedReferences
import grandchallenge.cases.signals
| StarcoderdataPython |
3264111 | # =====================================
# generator=datazen
# hash=28f11439ad9a52693ca50830cd17d838
# =====================================
"""
example - description
"""
JSON = "json"
YAML = "yaml"
# quote these because jinja generates with single-quotes and black wants to
# re-format to double-quotes
TOP_LIST = "['d', 'e', 'f', 'a', 'b', 'c']"
TOP_DICT = "{'b': {'b': 'b'}, 'a': {'a': 'a'}}"
| StarcoderdataPython |
4834179 | #!/usr/bin/env python
import glob
import sys
import os
from slackviewer.main import main as slackviewer
import boto3
import botocore
# Get latest backup zip file from s3
s3 = boto3.resource('s3')
bucket_name = os.environ['BUCKET_NAME']
backup_filename = 'backup.zip'
filenames = []
for objects in s3.Bucket(bucket_name).objects.filter():
filenames.append({'last_modified': objects.last_modified, 'key': objects.key})
newest_s3_filename = max(filenames, key=lambda x: x['last_modified'])
try:
s3.Bucket(bucket_name).download_file(newest_s3_filename['key'], backup_filename)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
# Set up slack-export-viewer server
port = os.environ.get('PORT') \
if os.environ.get('PORT') is not None \
else 5000
slackviewer_path = '$(which slack-export-viewer)'
sys.argv[0] = slackviewer_path
sys.argv.append('-z')
sys.argv.append(backup_filename)
sys.argv.append('-p')
sys.argv.append(port)
sys.argv.append('--no-browser')
sys.argv.append('--ip')
sys.argv.append('0.0.0.0')
slackviewer()
| StarcoderdataPython |
3363597 | <gh_stars>1-10
import FWCore.ParameterSet.Config as cms
from ..modules.hltPreEle5WP70OpenUnseeded_cfi import *
from ..sequences.HLTBeginSequence_cfi import *
from ..sequences.HLTEle5WP70OpenUnseededSequence_cfi import *
from ..sequences.HLTEndSequence_cfi import *
MC_Ele5_WP70_Open_Unseeded = cms.Path(
HLTBeginSequence +
hltPreEle5WP70OpenUnseeded +
HLTEle5WP70OpenUnseededSequence +
HLTEndSequence
)
| StarcoderdataPython |
1775579 | import unittest
from .isomorphic_strings import Solution
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.sol = Solution()
def test_example1(self):
self.assertEqual(self.sol.isIsomorphic("egg", "add"), True)
def test_example2(self):
self.assertEqual(self.sol.isIsomorphic("foo", "bar"), False)
def test_example3(self):
self.assertEqual(self.sol.isIsomorphic("paper", "title"), True)
| StarcoderdataPython |
1742113 | <filename>site_checker/storage/redis_backend.py
from datetime import timedelta
import json
from typing import Union
import redis
from site_checker.storage.base import AbstractStorage
class RedisStorage(AbstractStorage):
def __init__(self, redis_config=None, expire_in_minutes=30, *args, **kwargs):
"""
redis_config a dict containing redis config
@param redis_config:
@type redis_config:
@param args:
@type args:
@param kwargs:
@type kwargs:
"""
redis_config = redis_config or dict(host='localhost', port=6379, db=0)
self.redis = redis.Redis(**redis_config)
self.expire_in_minutes = timedelta(minutes=expire_in_minutes)
def _get(self, url, *args, **kwargs) -> Union[None, dict]:
val = self.redis.get(url)
if val is None:
return None
return json.loads(val)
def _set(self, url, data: dict, *args, **kwargs):
self.redis.setex(name=url, value=json.dumps(data), time=self.expire_in_minutes)
| StarcoderdataPython |
1742634 | import asyncio
from unittest import mock
import pytest
from waterbutler.core import utils
class TestAsyncRetry:
@pytest.mark.asyncio
async def test_returns_success(self):
mock_func = mock.Mock(return_value='Foo')
retryable = utils.async_retry(5, 0, raven=None)(mock_func)
x = await retryable()
assert x == 'Foo'
assert mock_func.call_count == 1
@pytest.mark.asyncio
async def test_retries_until(self):
mock_func = mock.Mock(side_effect=[Exception(), 'Foo'])
retryable = utils.async_retry(5, 0, raven=None)(mock_func)
x = await retryable()
assert x == 'Foo'
assert mock_func.call_count == 2
@pytest.mark.asyncio
async def test_retries_then_raises(self):
mock_func = mock.Mock(side_effect=Exception('Foo'))
retryable = utils.async_retry(5, 0, raven=None)(mock_func)
with pytest.raises(Exception) as e:
coro = await retryable()
assert e.type == Exception
assert e.value.args == ('Foo',)
assert mock_func.call_count == 6
@pytest.mark.asyncio
async def test_retries_by_its_self(self):
mock_func = mock.Mock(side_effect=Exception())
retryable = utils.async_retry(8, 0, raven=None)(mock_func)
retryable()
await asyncio.sleep(.1)
assert mock_func.call_count == 9
async def test_docstring_survives(self):
async def mytest():
'''This is a docstring'''
pass
retryable = utils.async_retry(8, 0, raven=None)(mytest)
assert retryable.__doc__ == '''This is a docstring'''
@pytest.mark.asyncio
async def test_kwargs_work(self):
async def mytest(mack, *args, **kwargs):
mack()
assert args == ('test', 'Foo')
assert kwargs == {'test': 'Foo', 'baz': 'bam'}
return True
retryable = utils.async_retry(8, 0, raven=None)(mytest)
merk = mock.Mock(side_effect=[Exception(''), 5])
fut = retryable(merk, 'test', 'Foo', test='Foo', baz='bam')
assert await fut
assert merk.call_count == 2
@pytest.mark.asyncio
async def test_all_retry(self):
mock_func = mock.Mock(side_effect=Exception())
retryable = utils.async_retry(8, 0, raven=None)(mock_func)
retryable()
retryable()
await asyncio.sleep(.1)
assert mock_func.call_count == 18
class TestContentDisposition:
@pytest.mark.parametrize("filename,expected", [
('meow.txt', 'meow.txt'),
('résumé.txt', 'resume.txt'),
(' ¿.surprise', ' .surprise'),
('a "file"', 'a \\"file\\"'),
('yes\\no', 'yes\\\\no'),
('ctrl\x09ch\x08ar', 'ctrl_ch_ar'),
])
def test_strip_for_disposition(self, filename, expected):
disposition = utils.strip_for_disposition(filename)
assert disposition == expected
@pytest.mark.parametrize("filename,expected", [
(None, 'attachment'),
('foo.txt', "attachment; filename=\"foo.txt\"; filename*=UTF-8''foo.txt"),
(' ¿.surprise', "attachment; filename=\" .surprise\"; filename*=UTF-8''%20%C2%BF.surprise"),
])
def test_content_disposition(self, filename, expected):
disposition = utils.make_disposition(filename)
assert disposition == expected
@pytest.mark.parametrize("filename,expected", [
('foo.txt', 'foo.txt'),
('résumé.docx', 'r%C3%A9sum%C3%A9.docx'),
('oh no/why+stop.txt', 'oh%20no/why%2Bstop.txt')
])
def test_disposition_encoding(self, filename, expected):
encoded = utils.encode_for_disposition(filename)
assert encoded == expected
| StarcoderdataPython |
129613 | <reponame>maxtaylordavies/BigGAN-PyTorch<filename>datasets.py
''' Datasets
This file contains definitions for our CIFAR, ImageFolder, and HDF5 datasets
'''
import os
import os.path
import sys
from PIL import Image
import numpy as np
from tqdm import tqdm, trange
import h5py as h5
import torch
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torchvision.datasets.utils import download_url, check_integrity
import torch.utils.data as data
from torch.utils.data import DataLoader
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']
def is_image_file(filename):
"""Checks if a file is an image.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)
def find_classes(dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def make_dataset(dir, class_to_idx):
images = []
dir = os.path.expanduser(dir)
for target in tqdm(sorted(os.listdir(dir))):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if is_image_file(fname):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class SWET(data.Dataset):
def __init__(self, root, transform=None, target_transform=None,
loader=default_loader, load_in_mem=False,
index_filename='swet_imgs.npz', **kwargs):
classes, class_to_idx = find_classes(root)
# Load pre-computed image directory walk
if os.path.exists(index_filename):
print('Loading pre-saved Index file %s...' % index_filename)
imgs = np.load(index_filename)['imgs']
# If first time, walk the folder directory and save the
# results to a pre-computed file.
else:
print('Generating Index file %s...' % index_filename)
imgs = make_dataset(root, class_to_idx)
np.savez_compressed(index_filename, **{'imgs': imgs})
if len(imgs) == 0:
raise (RuntimeError("Found 0 images in subfolders of: " + root + "\n"
"Supported image extensions are: " + ",".join(
IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.target_transform = target_transform
self.loader = loader
self.load_in_mem = load_in_mem
if self.load_in_mem:
print('Loading all images into memory...')
self.data, self.labels = [], []
for index in tqdm(range(len(self.imgs))):
path, target = imgs[index][0], imgs[index][1]
self.data.append(self.transform(self.loader(path)))
self.labels.append(target)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
if self.load_in_mem:
img = self.data[index]
target = self.labels[index]
else:
path, target = self.imgs[index]
img = self.loader(str(path))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
# print(img.size(), target)
return img, int(target)
def __len__(self):
return len(self.imgs)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class SWET_HDF5(data.Dataset):
def __init__(self, root, transform=None, target_transform=None,
load_in_mem=False, train=True, download=False, validate_seed=0,
val_split=0, **kwargs): # last four are dummies
self.root = root
self.num_imgs = len(h5.File(root, 'r')['labels'])
# self.transform = transform
self.target_transform = target_transform
# Set the transform here
self.transform = transform
# load the entire dataset into memory?
self.load_in_mem = load_in_mem
# If loading into memory, do so now
if self.load_in_mem:
print('Loading %s into memory...' % root)
with h5.File(root, 'r') as f:
self.data = f['imgs'][:]
self.labels = f['labels'][:]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
# If loaded the entire dataset in RAM, get image from memory
if self.load_in_mem:
img = self.data[index]
target = self.labels[index]
# Else load it from disk
else:
with h5.File(self.root, 'r') as f:
img = f['imgs'][index]
target = f['labels'][index]
# if self.transform is not None:
# img = self.transform(img)
# Apply my own transform
img = ((torch.from_numpy(img).float() / 255) - 0.5) * 2
if self.target_transform is not None:
target = self.target_transform(target)
return img, int(target)
def __len__(self):
return self.num_imgs
# return len(self.f['imgs'])
class ImageFolder(data.Dataset):
"""A generic data loader where the images are arranged in this way: ::
root/dogball/xxx.png
root/dogball/xxy.png
root/dogball/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
"""
def __init__(self, root, transform=None, target_transform=None,
loader=default_loader, load_in_mem=False,
index_filename='imagenet_imgs.npz', **kwargs):
classes, class_to_idx = find_classes(root)
# Load pre-computed image directory walk
if os.path.exists(index_filename):
print('Loading pre-saved Index file %s...' % index_filename)
imgs = np.load(index_filename)['imgs']
# If first time, walk the folder directory and save the
# results to a pre-computed file.
else:
print('Generating Index file %s...' % index_filename)
imgs = make_dataset(root, class_to_idx)
np.savez_compressed(index_filename, **{'imgs': imgs})
if len(imgs) == 0:
raise (RuntimeError("Found 0 images in subfolders of: " + root + "\n"
"Supported image extensions are: " + ",".join(
IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.target_transform = target_transform
self.loader = loader
self.load_in_mem = load_in_mem
if self.load_in_mem:
print('Loading all images into memory...')
self.data, self.labels = [], []
for index in tqdm(range(len(self.imgs))):
path, target = imgs[index][0], imgs[index][1]
self.data.append(self.transform(self.loader(path)))
self.labels.append(target)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
if self.load_in_mem:
img = self.data[index]
target = self.labels[index]
else:
path, target = self.imgs[index]
img = self.loader(str(path))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
# print(img.size(), target)
return img, int(target)
def __len__(self):
return len(self.imgs)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
''' ILSVRC_HDF5: A dataset to support I/O from an HDF5 to avoid
having to load individual images all the time. '''
class ILSVRC_HDF5(data.Dataset):
def __init__(self, root, transform=None, target_transform=None,
load_in_mem=False, train=True, download=False, validate_seed=0,
val_split=0, **kwargs): # last four are dummies
self.root = root
self.num_imgs = len(h5.File(root, 'r')['labels'])
# self.transform = transform
self.target_transform = target_transform
# Set the transform here
self.transform = transform
# load the entire dataset into memory?
self.load_in_mem = load_in_mem
# If loading into memory, do so now
if self.load_in_mem:
print('Loading %s into memory...' % root)
with h5.File(root, 'r') as f:
self.data = f['imgs'][:]
self.labels = f['labels'][:]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
# If loaded the entire dataset in RAM, get image from memory
if self.load_in_mem:
img = self.data[index]
target = self.labels[index]
# Else load it from disk
else:
with h5.File(self.root, 'r') as f:
img = f['imgs'][index]
target = f['labels'][index]
# if self.transform is not None:
# img = self.transform(img)
# Apply my own transform
img = ((torch.from_numpy(img).float() / 255) - 0.5) * 2
if self.target_transform is not None:
target = self.target_transform(target)
return img, int(target)
def __len__(self):
return self.num_imgs
# return len(self.f['imgs'])
import pickle
class CIFAR10(dset.CIFAR10):
def __init__(self, root, train=True,
transform=None, target_transform=None,
download=True, validate_seed=0,
val_split=0, load_in_mem=True, **kwargs):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
self.val_split = val_split
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
# now load the picked numpy arrays
self.data = []
self.labels = []
for fentry in self.train_list:
f = fentry[0]
file = os.path.join(self.root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.labels += entry['labels']
else:
self.labels += entry['fine_labels']
fo.close()
self.data = np.concatenate(self.data)
# Randomly select indices for validation
if self.val_split > 0:
label_indices = [[] for _ in range(max(self.labels) + 1)]
for i, l in enumerate(self.labels):
label_indices[l] += [i]
label_indices = np.asarray(label_indices)
# randomly grab 500 elements of each class
np.random.seed(validate_seed)
self.val_indices = []
for l_i in label_indices:
self.val_indices += list(l_i[np.random.choice(len(l_i),
int(len(self.data) * val_split) // (max(self.labels) + 1),
replace=False)])
if self.train == 'validate':
self.data = self.data[self.val_indices]
self.labels = list(np.asarray(self.labels)[self.val_indices])
self.data = self.data.reshape((int(50e3 * self.val_split), 3, 32, 32))
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
elif self.train:
print(np.shape(self.data))
if self.val_split > 0:
self.data = np.delete(self.data, self.val_indices, axis=0)
self.labels = list(np.delete(np.asarray(self.labels), self.val_indices, axis=0))
self.data = self.data.reshape((int(50e3 * (1. - self.val_split)), 3, 32, 32))
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
else:
f = self.test_list[0][0]
file = os.path.join(self.root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.data = entry['data']
if 'labels' in entry:
self.labels = entry['labels']
else:
self.labels = entry['fine_labels']
fo.close()
self.data = self.data.reshape((10000, 3, 32, 32))
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.data)
class CIFAR100(CIFAR10):
base_folder = 'cifar-100-python'
url = "http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
| StarcoderdataPython |
1673360 | <reponame>enosteteo/Introducao-a-Programacao-P1<filename>4. Estrutura de Repeticao While/Lista 02/programa 02.py
cont = 25
qtdeParEPositivo = 0
while cont > 0:
numero = int(input())
if (numero >= 0) and (numero % 2 == 0):
qtdeParEPositivo += 1
cont -= 1
print(qtdeParEPositivo)
| StarcoderdataPython |
3288827 | from .Address import Address
from .Authorization import Authorization
from .Avatar import Avatar
from .BankAccount import BankAccount
from .Contract import Contract
from .DigitalAssetAddress import DigitalAssetAddress
from .EmailAddress import EmailAddress
from .Error import Error
from .JoinOrganizationInvitation import JoinOrganizationInvitation
from .KeyStoreKey import KeyStoreKey
from .Notification import Notification
from .PhoneNumberValidation import PhoneNumberValidation
from .Phonenumber import Phonenumber
from .PublicKey import PublicKey
from .RegistryEntry import RegistryEntry
from .See import See
from .SeeView import SeeView
from .TOTPSecret import TOTPSecret
from .TwoFAMethods import TwoFAMethods
from .User import User
from .UserAPIKey import UserAPIKey
from .UserOrganizations import UserOrganizations
from .api_response import APIResponse
from .unhandled_api_error import UnhandledAPIError
from .unmarshall_error import UnmarshallError
from .userview import userview
from Jumpscale import j
class UsersService:
def __init__(self, client):
pass
self.client = client
def GetAvatarImage(self, hash, headers=None, query_params=None, content_type="application/json"):
"""
Get the avatar file associated with this id
It is method for GET /users/avatar/img/{hash}
"""
uri = self.client.base_url + "/users/avatar/img/" + hash
return self.client.get(uri, None, headers, query_params, content_type)
def DeleteUserAddress(self, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Removes an address
It is method for DELETE /users/{username}/addresses/{label}
"""
uri = self.client.base_url + "/users/" + username + "/addresses/" + label
return self.client.delete(uri, None, headers, query_params, content_type)
def GetUserAddressByLabel(self, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Get the details of an address.
It is method for GET /users/{username}/addresses/{label}
"""
uri = self.client.base_url + "/users/" + username + "/addresses/" + label
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=Address(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def UpdateUserAddress(
self, data, label, username, headers=None, query_params=None, content_type="application/json"
):
"""
Update the label and/or value of an existing address.
It is method for PUT /users/{username}/addresses/{label}
"""
uri = self.client.base_url + "/users/" + username + "/addresses/" + label
return self.client.put(uri, data, headers, query_params, content_type)
def GetUserAddresses(self, username, headers=None, query_params=None, content_type="application/json"):
"""
List of all of the user his addresses.
It is method for GET /users/{username}/addresses
"""
uri = self.client.base_url + "/users/" + username + "/addresses"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Address(elem))
return APIResponse(data=resps, response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def RegisterNewUserAddress(self, data, username, headers=None, query_params=None, content_type="application/json"):
"""
Register a new address
It is method for POST /users/{username}/addresses
"""
uri = self.client.base_url + "/users/" + username + "/addresses"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=Address(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def DeleteAPIkey(self, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Removes an API key
It is method for DELETE /users/{username}/apikeys/{label}
"""
uri = self.client.base_url + "/users/" + username + "/apikeys/" + label
return self.client.delete(uri, None, headers, query_params, content_type)
def GetAPIkey(self, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Get an API key by label
It is method for GET /users/{username}/apikeys/{label}
"""
uri = self.client.base_url + "/users/" + username + "/apikeys/" + label
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=UserAPIKey(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def UpdateAPIkey(self, data, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Updates the label for the API key
It is method for PUT /users/{username}/apikeys/{label}
"""
uri = self.client.base_url + "/users/" + username + "/apikeys/" + label
return self.client.put(uri, data, headers, query_params, content_type)
def ListAPIKeys(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Lists the API keys
It is method for GET /users/{username}/apikeys
"""
uri = self.client.base_url + "/users/" + username + "/apikeys"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(UserAPIKey(elem))
return APIResponse(data=resps, response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def AddApiKey(self, data, username, headers=None, query_params=None, content_type="application/json"):
"""
Adds an APIKey to the user
It is method for POST /users/{username}/apikeys
"""
uri = self.client.base_url + "/users/" + username + "/apikeys"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=UserAPIKey(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def DeleteAuthorization(
self, grantedTo, username, headers=None, query_params=None, content_type="application/json"
):
"""
Remove the authorization for an organization, the granted organization will no longer have access the user's information.
It is method for DELETE /users/{username}/authorizations/{grantedTo}
"""
uri = self.client.base_url + "/users/" + username + "/authorizations/" + grantedTo
return self.client.delete(uri, None, headers, query_params, content_type)
def GetAuthorization(self, grantedTo, username, headers=None, query_params=None, content_type="application/json"):
"""
Get the authorization for a specific organization.
It is method for GET /users/{username}/authorizations/{grantedTo}
"""
uri = self.client.base_url + "/users/" + username + "/authorizations/" + grantedTo
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=Authorization(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def UpdateAuthorization(
self, data, grantedTo, username, headers=None, query_params=None, content_type="application/json"
):
"""
Modify which information an organization is able to see.
It is method for PUT /users/{username}/authorizations/{grantedTo}
"""
uri = self.client.base_url + "/users/" + username + "/authorizations/" + grantedTo
return self.client.put(uri, data, headers, query_params, content_type)
def GetAllAuthorizations(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Get the list of authorizations.
It is method for GET /users/{username}/authorizations
"""
uri = self.client.base_url + "/users/" + username + "/authorizations"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Authorization(elem))
return APIResponse(data=resps, response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def CreateAvatarFromImage(
self, data, label, username, headers=None, query_params=None, content_type="application/json"
):
"""
Create a new avatar with the specified label from a provided image file
It is method for POST /users/{username}/avatar/img/{label}
"""
uri = self.client.base_url + "/users/" + username + "/avatar/img/" + label
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=Avatar(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def UpdateAvatarFile(
self, data, newlabel, label, username, headers=None, query_params=None, content_type="application/json"
):
"""
Update the avatar and possibly the avatar file stored on itsyou.online
It is method for PUT /users/{username}/avatar/{label}/to/{newlabel}
"""
uri = self.client.base_url + "/users/" + username + "/avatar/" + label + "/to/" + newlabel
resp = self.client.put(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=Avatar(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def DeleteAvatar(self, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Delete the avatar with the specified label
It is method for DELETE /users/{username}/avatar/{label}
"""
uri = self.client.base_url + "/users/" + username + "/avatar/" + label
return self.client.delete(uri, None, headers, query_params, content_type)
def UpdateAvatarLink(self, data, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Update the avatar and possibly the link to the avatar
It is method for PUT /users/{username}/avatar/{label}
"""
uri = self.client.base_url + "/users/" + username + "/avatar/" + label
resp = self.client.put(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=Avatar(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def GetAvatars(self, username, headers=None, query_params=None, content_type="application/json"):
"""
List all avatars for the user
It is method for GET /users/{username}/avatar
"""
uri = self.client.base_url + "/users/" + username + "/avatar"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=Avatar(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def CreateAvatarFromLink(self, data, username, headers=None, query_params=None, content_type="application/json"):
"""
Create a new avatar with the specified label from a link
It is method for POST /users/{username}/avatar
"""
uri = self.client.base_url + "/users/" + username + "/avatar"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=Avatar(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def DeleteUserBankAccount(self, username, label, headers=None, query_params=None, content_type="application/json"):
"""
Delete a BankAccount
It is method for DELETE /users/{username}/banks/{label}
"""
uri = self.client.base_url + "/users/" + username + "/banks/" + label
return self.client.delete(uri, None, headers, query_params, content_type)
def GetUserBankAccountByLabel(
self, username, label, headers=None, query_params=None, content_type="application/json"
):
"""
Get the details of a bank account
It is method for GET /users/{username}/banks/{label}
"""
uri = self.client.base_url + "/users/" + username + "/banks/" + label
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=BankAccount(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def UpdateUserBankAccount(
self, data, username, label, headers=None, query_params=None, content_type="application/json"
):
"""
Update an existing bankaccount and label.
It is method for PUT /users/{username}/banks/{label}
"""
uri = self.client.base_url + "/users/" + username + "/banks/" + label
resp = self.client.put(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=BankAccount(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def GetUserBankAccounts(self, username, headers=None, query_params=None, content_type="application/json"):
"""
List of the user his bank accounts.
It is method for GET /users/{username}/banks
"""
uri = self.client.base_url + "/users/" + username + "/banks"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(BankAccount(elem))
return APIResponse(data=resps, response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def CreateUserBankAccount(self, data, username, headers=None, query_params=None, content_type="application/json"):
"""
Create new bank account
It is method for POST /users/{username}/banks
"""
uri = self.client.base_url + "/users/" + username + "/banks"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=BankAccount(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def GetUserContracts(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Get the contracts where the user is 1 of the parties. Order descending by date.
It is method for GET /users/{username}/contracts
"""
uri = self.client.base_url + "/users/" + username + "/contracts"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Contract(elem))
return APIResponse(data=resps, response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def CreateUserContract(self, data, username, headers=None, query_params=None, content_type="application/json"):
"""
Create a new contract.
It is method for POST /users/{username}/contracts
"""
uri = self.client.base_url + "/users/" + username + "/contracts"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=Contract(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def DeleteDigitalAssetAddress(
self, label, username, headers=None, query_params=None, content_type="application/json"
):
"""
Removes an address
It is method for DELETE /users/{username}/digitalwallet/{label}
"""
uri = self.client.base_url + "/users/" + username + "/digitalwallet/" + label
return self.client.delete(uri, None, headers, query_params, content_type)
def GetDigitalAssetAddressByLabel(
self, label, username, headers=None, query_params=None, content_type="application/json"
):
"""
Get the details of a digital wallet address.
It is method for GET /users/{username}/digitalwallet/{label}
"""
uri = self.client.base_url + "/users/" + username + "/digitalwallet/" + label
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=DigitalAssetAddress(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def UpdateDigitalAssetAddress(
self, data, label, username, headers=None, query_params=None, content_type="application/json"
):
"""
Update the label and/or value of an existing address.
It is method for PUT /users/{username}/digitalwallet/{label}
"""
uri = self.client.base_url + "/users/" + username + "/digitalwallet/" + label
return self.client.put(uri, data, headers, query_params, content_type)
def GetDigitalWallet(self, username, headers=None, query_params=None, content_type="application/json"):
"""
List all of the user his digital wallets.
It is method for GET /users/{username}/digitalwallet
"""
uri = self.client.base_url + "/users/" + username + "/digitalwallet"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(DigitalAssetAddress(elem))
return APIResponse(data=resps, response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def RegisterNewDigitalAssetAddress(
self, data, username, headers=None, query_params=None, content_type="application/json"
):
"""
Register a new digital asset address
It is method for POST /users/{username}/digitalwallet
"""
uri = self.client.base_url + "/users/" + username + "/digitalwallet"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=DigitalAssetAddress(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def ValidateEmailAddress(
self, data, label, username, headers=None, query_params=None, content_type="application/json"
):
"""
Sends validation email to email address
It is method for POST /users/{username}/emailaddresses/{label}/validate
"""
uri = self.client.base_url + "/users/" + username + "/emailaddresses/" + label + "/validate"
return self.client.post(uri, data, headers, query_params, content_type)
def DeleteEmailAddress(self, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Removes an email address
It is method for DELETE /users/{username}/emailaddresses/{label}
"""
uri = self.client.base_url + "/users/" + username + "/emailaddresses/" + label
return self.client.delete(uri, None, headers, query_params, content_type)
def UpdateEmailAddress(
self, data, label, username, headers=None, query_params=None, content_type="application/json"
):
"""
Updates the label and/or value of an email address
It is method for PUT /users/{username}/emailaddresses/{label}
"""
uri = self.client.base_url + "/users/" + username + "/emailaddresses/" + label
return self.client.put(uri, data, headers, query_params, content_type)
def GetEmailAddresses(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Get a list of the user his email addresses.
It is method for GET /users/{username}/emailaddresses
"""
uri = self.client.base_url + "/users/" + username + "/emailaddresses"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(EmailAddress(elem))
return APIResponse(data=resps, response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def RegisterNewEmailAddress(self, data, username, headers=None, query_params=None, content_type="application/json"):
"""
Register a new email address
It is method for POST /users/{username}/emailaddresses
"""
uri = self.client.base_url + "/users/" + username + "/emailaddresses"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=EmailAddress(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def DeleteFacebookAccount(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Delete the associated facebook account
It is method for DELETE /users/{username}/facebook
"""
uri = self.client.base_url + "/users/" + username + "/facebook"
return self.client.delete(uri, None, headers, query_params, content_type)
def DeleteGithubAccount(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Unlink Github Account
It is method for DELETE /users/{username}/github
"""
uri = self.client.base_url + "/users/" + username + "/github"
return self.client.delete(uri, None, headers, query_params, content_type)
def GetUserInformation(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Get all of the user his information. This will be limited to the scopes that the user has authorized. See https://gig.gitbooks.io/itsyouonline/content/oauth2/scopes.html and https://gig.gitbooks.io/itsyouonline/content/oauth2/availableScopes.html for more information.
It is method for GET /users/{username}/info
"""
uri = self.client.base_url + "/users/" + username + "/info"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=userview(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def GetKeyStoreKey(self, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Gets the key written to this users keystore for the given label by the accessing organization
It is method for GET /users/{username}/keystore/{label}
"""
uri = self.client.base_url + "/users/" + username + "/keystore/" + label
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=KeyStoreKey(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def GetKeyStore(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Lists all keys written to this users keystore by the accessing organization
It is method for GET /users/{username}/keystore
"""
uri = self.client.base_url + "/users/" + username + "/keystore"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(KeyStoreKey(elem))
return APIResponse(data=resps, response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def SaveKeyStoreKey(self, data, username, headers=None, query_params=None, content_type="application/json"):
"""
Saves a new key to this users keystore. The username, globalid and timestamp will be overwritten
It is method for POST /users/{username}/keystore
"""
uri = self.client.base_url + "/users/" + username + "/keystore"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=KeyStoreKey(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def UpdateUserName(self, data, username, headers=None, query_params=None, content_type="application/json"):
"""
Update the user his firstname and lastname
It is method for PUT /users/{username}/name
"""
uri = self.client.base_url + "/users/" + username + "/name"
return self.client.put(uri, data, headers, query_params, content_type)
def GetNotifications(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Get the list of notifications, these are pending invitations or approvals or other requests.
It is method for GET /users/{username}/notifications
"""
uri = self.client.base_url + "/users/" + username + "/notifications"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=Notification(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def LeaveOrganization(self, globalid, username, headers=None, query_params=None, content_type="application/json"):
"""
Removes the user from an organization
It is method for DELETE /users/{username}/organizations/{globalid}/leave
"""
uri = self.client.base_url + "/users/" + username + "/organizations/" + globalid + "/leave"
resp = self.client.delete(uri, None, headers, query_params, content_type)
try:
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def RejectMembership(
self, globalid, role, username, headers=None, query_params=None, content_type="application/json"
):
"""
Reject membership invitation in an organization.
It is method for DELETE /users/{username}/organizations/{globalid}/roles/{role}
"""
uri = self.client.base_url + "/users/" + username + "/organizations/" + globalid + "/roles/" + role
return self.client.delete(uri, None, headers, query_params, content_type)
def AcceptMembership(
self, data, globalid, role, username, headers=None, query_params=None, content_type="application/json"
):
"""
Accept membership in organization
It is method for POST /users/{username}/organizations/{globalid}/roles/{role}
"""
uri = self.client.base_url + "/users/" + username + "/organizations/" + globalid + "/roles/" + role
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=JoinOrganizationInvitation(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def GetUserOrganizations(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Get the list organizations a user is owner or member of
It is method for GET /users/{username}/organizations
"""
uri = self.client.base_url + "/users/" + username + "/organizations"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=UserOrganizations(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def UpdatePassword(self, data, username, headers=None, query_params=None, content_type="application/json"):
"""
Update the user his password
It is method for PUT /users/{username}/password
"""
uri = self.client.base_url + "/users/" + username + "/password"
resp = self.client.put(uri, data, headers, query_params, content_type)
try:
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def ValidatePhonenumber(
self, data, label, username, headers=None, query_params=None, content_type="application/json"
):
"""
Sends a validation text message to the phone number.
It is method for POST /users/{username}/phonenumbers/{label}/validate
"""
uri = self.client.base_url + "/users/" + username + "/phonenumbers/" + label + "/validate"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=PhoneNumberValidation(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def VerifyPhoneNumber(
self, data, label, username, headers=None, query_params=None, content_type="application/json"
):
"""
Verifies a phone number
It is method for PUT /users/{username}/phonenumbers/{label}/validate
"""
uri = self.client.base_url + "/users/" + username + "/phonenumbers/" + label + "/validate"
resp = self.client.put(uri, data, headers, query_params, content_type)
try:
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def DeleteUserPhonenumber(self, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Removes a phonenumber
It is method for DELETE /users/{username}/phonenumbers/{label}
"""
uri = self.client.base_url + "/users/" + username + "/phonenumbers/" + label
return self.client.delete(uri, None, headers, query_params, content_type)
def GetUserPhonenumberByLabel(
self, label, username, headers=None, query_params=None, content_type="application/json"
):
"""
Get the details of a phone number.
It is method for GET /users/{username}/phonenumbers/{label}
"""
uri = self.client.base_url + "/users/" + username + "/phonenumbers/" + label
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=Phonenumber(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def UpdateUserPhonenumber(
self, data, label, username, headers=None, query_params=None, content_type="application/json"
):
"""
Update the label and/or value of an existing phonenumber.
It is method for PUT /users/{username}/phonenumbers/{label}
"""
uri = self.client.base_url + "/users/" + username + "/phonenumbers/" + label
return self.client.put(uri, data, headers, query_params, content_type)
def GetUserPhoneNumbers(self, username, headers=None, query_params=None, content_type="application/json"):
"""
List of all of the user his phone numbers.
It is method for GET /users/{username}/phonenumbers
"""
uri = self.client.base_url + "/users/" + username + "/phonenumbers"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(Phonenumber(elem))
return APIResponse(data=resps, response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def RegisterNewUserPhonenumber(
self, data, username, headers=None, query_params=None, content_type="application/json"
):
"""
Register a new phonenumber
It is method for POST /users/{username}/phonenumbers
"""
uri = self.client.base_url + "/users/" + username + "/phonenumbers"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=Phonenumber(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def DeletePublicKey(self, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Delete a public key
It is method for DELETE /users/{username}/publickeys/{label}
"""
uri = self.client.base_url + "/users/" + username + "/publickeys/" + label
return self.client.delete(uri, None, headers, query_params, content_type)
def GetPublicKey(self, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Get a public key
It is method for GET /users/{username}/publickeys/{label}
"""
uri = self.client.base_url + "/users/" + username + "/publickeys/" + label
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=PublicKey(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def UpdatePublicKey(self, data, label, username, headers=None, query_params=None, content_type="application/json"):
"""
Upates the label and/or key of an existing public key
It is method for PUT /users/{username}/publickeys/{label}
"""
uri = self.client.base_url + "/users/" + username + "/publickeys/" + label
resp = self.client.put(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=PublicKey(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def ListPublicKeys(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Lists all public keys
It is method for GET /users/{username}/publickeys
"""
uri = self.client.base_url + "/users/" + username + "/publickeys"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(PublicKey(elem))
return APIResponse(data=resps, response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def AddPublicKey(self, data, username, headers=None, query_params=None, content_type="application/json"):
"""
Add a public key
It is method for POST /users/{username}/publickeys
"""
uri = self.client.base_url + "/users/" + username + "/publickeys"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=PublicKey(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def DeleteUserRegistryEntry(self, key, username, headers=None, query_params=None, content_type="application/json"):
"""
Removes a RegistryEntry from the user's registry
It is method for DELETE /users/{username}/registry/{key}
"""
uri = self.client.base_url + "/users/" + username + "/registry/" + key
return self.client.delete(uri, None, headers, query_params, content_type)
def GetUserRegistryEntry(self, key, username, headers=None, query_params=None, content_type="application/json"):
"""
Get a RegistryEntry from the user's registry.
It is method for GET /users/{username}/registry/{key}
"""
uri = self.client.base_url + "/users/" + username + "/registry/" + key
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=RegistryEntry(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def ListUserRegistry(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Lists the Registry entries
It is method for GET /users/{username}/registry
"""
uri = self.client.base_url + "/users/" + username + "/registry"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(RegistryEntry(elem))
return APIResponse(data=resps, response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def AddUserRegistryEntry(self, data, username, headers=None, query_params=None, content_type="application/json"):
"""
Adds a RegistryEntry to the user's registry, if the key is already used, it is overwritten.
It is method for POST /users/{username}/registry
"""
uri = self.client.base_url + "/users/" + username + "/registry"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=RegistryEntry(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def SignSeeObject(
self,
data,
version,
uniqueid,
globalid,
username,
headers=None,
query_params=None,
content_type="application/json",
):
"""
Sign a see object
It is method for PUT /users/{username}/see/{uniqueid}/{globalid}/sign/{version}
"""
uri = self.client.base_url + "/users/" + username + "/see/" + uniqueid + "/" + globalid + "/sign/" + version
resp = self.client.put(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=SeeView(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def GetSeeObject(
self, uniqueid, globalid, username, headers=None, query_params=None, content_type="application/json"
):
"""
Get a see object
It is method for GET /users/{username}/see/{uniqueid}/{globalid}
"""
uri = self.client.base_url + "/users/" + username + "/see/" + uniqueid + "/" + globalid
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=See(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def UpdateSeeObject(
self, data, uniqueid, globalid, username, headers=None, query_params=None, content_type="application/json"
):
"""
Updates a see object
It is method for PUT /users/{username}/see/{uniqueid}/{globalid}
"""
uri = self.client.base_url + "/users/" + username + "/see/" + uniqueid + "/" + globalid
resp = self.client.put(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=SeeView(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def GetSeeObjects(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Get a list of all see objects.
It is method for GET /users/{username}/see
"""
uri = self.client.base_url + "/users/" + username + "/see"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
resps = []
for elem in resp.json():
resps.append(SeeView(elem))
return APIResponse(data=resps, response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def CreateSeeObject(self, data, username, headers=None, query_params=None, content_type="application/json"):
"""
Create new see object
It is method for POST /users/{username}/see
"""
uri = self.client.base_url + "/users/" + username + "/see"
resp = self.client.post(uri, data, headers, query_params, content_type)
try:
if resp.status_code == 201:
return APIResponse(data=SeeView(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def RemoveTOTP(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Disable TOTP two-factor authentication.
It is method for DELETE /users/{username}/totp
"""
uri = self.client.base_url + "/users/" + username + "/totp"
return self.client.delete(uri, None, headers, query_params, content_type)
def GetTOTPSecret(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Get a TOTP secret and issuer that can be used for setting up two-factor authentication.
It is method for GET /users/{username}/totp
"""
uri = self.client.base_url + "/users/" + username + "/totp"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=TOTPSecret(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def SetupTOTP(self, data, username, headers=None, query_params=None, content_type="application/json"):
"""
Enable two-factor authentication using TOTP.
It is method for POST /users/{username}/totp
"""
uri = self.client.base_url + "/users/" + username + "/totp"
return self.client.post(uri, data, headers, query_params, content_type)
def GetTwoFAMethods(self, username, headers=None, query_params=None, content_type="application/json"):
"""
Get the possible two-factor authentication methods"
It is method for GET /users/{username}/twofamethods
"""
uri = self.client.base_url + "/users/" + username + "/twofamethods"
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=TwoFAMethods(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def GetUser(self, username, headers=None, query_params=None, content_type="application/json"):
"""
It is method for GET /users/{username}
"""
uri = self.client.base_url + "/users/" + username
resp = self.client.get(uri, None, headers, query_params, content_type)
try:
if resp.status_code == 200:
return APIResponse(data=User(resp.json()), response=resp)
message = "unknown status code={}".format(resp.status_code)
raise UnhandledAPIError(response=resp, code=resp.status_code, message=message)
except ValueError as msg:
raise UnmarshallError(resp, msg)
except UnhandledAPIError as uae:
raise uae
except Exception as e:
raise UnmarshallError(resp, e.message)
def CreateUser(self, data, headers=None, query_params=None, content_type="application/json"):
"""
Create a new user
It is method for POST /users
"""
uri = self.client.base_url + "/users"
return self.client.post(uri, data, headers, query_params, content_type)
| StarcoderdataPython |
1676570 | <reponame>vbilyi/prometheus_toolbox<gh_stars>0
from .measures import *
| StarcoderdataPython |
1622402 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import odoo
import odoo.tests
@odoo.tests.common.tagged('post_install', '-at_install', 'website_snippets')
class TestSnippets(odoo.tests.HttpCase):
def test_01_empty_parents_autoremove(self):
self.start_tour("/?enable_editor=1", "snippet_empty_parent_autoremove", login='admin')
| StarcoderdataPython |
3395685 | <reponame>c-cube/mc2
#!/usr/bin/python
import subprocess, sys
filename: str = sys.argv[1]
z3_out = subprocess.run([b'z3', filename], capture_output=True).stdout
if z3_out != b'unsat\n':
sys.exit(0)
b_out = subprocess.run([b'./mc2.exe', filename], capture_output=True).stdout
if b_out.startswith(b'Sat') and b'Error' not in b_out:
print('ohno', file=sys.stderr)
sys.exit(1)
| StarcoderdataPython |
3385161 | <gh_stars>10-100
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
import os
import random
import copy
from tensorflow.python.util import nest
from config import *
import os
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0,os.path.join(dir_path, "tensorflow_compact_bilinear_pooling"))
class ResidualWrapper(tf.contrib.rnn.RNNCell):
"""RNNCell wrapper that ensures cell inputs are added to the outputs."""
def __init__(self, cell):
"""Constructs a `ResidualWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
"""
self._cell = cell
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""Run the cell and add its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
scope: optional cell scope.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = self._cell(inputs, state, scope=scope)
nest.assert_same_structure(inputs, outputs)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
nest.map_structure(assert_shape_match, inputs, outputs)
res_outputs = nest.map_structure(
lambda inp, out: inp + out, inputs, outputs)
return (res_outputs, new_state)
def add_mc_samples(data, mc_samples):
file_names = data['file_names']
for file_name in file_names:
data['captions']['dis'][file_name]['mc_samples'] = mc_samples[file_name]['gen']
return data
def data_loader(data_path=None, data_type = '_full', use_mc_samples=False):
"""
Data format (compatible with Show Attend and Tell):
the data file is a dict has the following keys:
'file_names'
'image_idxs'
'captions': a dict has keys 'gen' for generator and 'dis' for discriminator
'features': a dict has keys 'gen' for generator and 'dis' for discriminator
(to be loaded when needed)
'word_to_idx': a dict with word to idx mapping
"""
data_train = np.load(os.path.join(data_path, "data_train_full.npy")).item()
data_val = np.load(os.path.join(data_path, "data_val_full.npy")).item()
data_test = np.load(os.path.join(data_path, "data_test_full.npy")).item()
if use_mc_samples:
mc_train = np.load(os.path.join(data_path, 'dumped_train.npy')).item()
mc_val = np.load(os.path.join(data_path, 'dumped_val.npy')).item()
mc_test = np.load(os.path.join(data_path, 'dumped_test.npy')).item()
data_train = add_mc_samples(data_train, mc_train)
data_val = add_mc_samples(data_val, mc_val)
data_test = add_mc_samples(data_test, mc_test)
data_train['features']['dis'] = np.load(
os.path.join(data_path, 'resnet152/feature_dis_train%s.npy' % (data_type))
).item()
data_val['features']['dis'] = np.load(
os.path.join(data_path, 'resnet152/feature_dis_val%s.npy' % (data_type))
).item()
data_test['features']['dis'] = np.load(
os.path.join(data_path, 'resnet152/feature_dis_test%s.npy' % (data_type))
).item()
word_embedding = np.load(
os.path.join(data_path, 'word_embedding_%s.npy' % (str(Config().embedding_size)))
)
return [data_train, data_val, data_test, word_embedding]
class Discriminator(object):
"""The model."""
def __init__(self, word_embedding, word_to_idx=None, use_glove=True,
is_training=True, dim_feat=2048, config=Config(), num_input=2):
self.x = tf.placeholder(tf.int32, [None, config.num_steps])
self.y_ = tf.placeholder(tf.float32, [None, 2])
self.img_feat = tf.placeholder(tf.float32, [None, dim_feat])
self.lr = tf.placeholder(tf.float32)
self._eos = word_to_idx['<eos>']
mask = tf.to_float(tf.equal(self.x, self._eos))
num_steps = config.num_steps
hidden_size = config.hidden_size
vocab_size = config.vocab_size
embedding_size = config.embedding_size
num_input = config.num_input
use_img_feat = config.use_img_feat
use_lstm = config.use_lstm
combine_typ = config.combine_typ
cls_hidden = config.cls_hidden
use_residual = config.use_residual
img_feat = tf.layers.dense(inputs=self.img_feat, units=hidden_size, activation=None)
if use_residual:
def lstm_cell():
return ResidualWrapper(tf.contrib.rnn.BasicLSTMCell(
hidden_size, forget_bias=1.0, state_is_tuple=True))
else:
def lstm_cell():
return tf.contrib.rnn.BasicLSTMCell(
hidden_size, forget_bias=1.0, state_is_tuple=True)
attn_cell = lstm_cell
if is_training and config.dropout_prob < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
lstm_cell(), output_keep_prob=config.dropout_prob)
cell = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in xrange(config.num_layers)], state_is_tuple=True)
if use_glove:
embedding = tf.get_variable(
"embedding", dtype=tf.float32, initializer=tf.constant(word_embedding))
else:
embedding = tf.get_variable(
"embedding", [vocab_size, embedding_size],
initializer=tf.random_uniform_initializer(minval=-1.0, maxval=1.0))
inputs = tf.nn.embedding_lookup(embedding, self.x)
if use_img_feat == 'concat_bf_lstm':
raise Exception("use_img_feat=concat_bf_lstm not supported")
img_reshape = tf.reshape(img_feat, [-1, 1, dim_feat])
img_tiled = tf.tile(img_reshape, [1, num_steps, 1])
inputs = tf.concat([inputs, img_tiled], 2)
if is_training and config.dropout_prob < 1:
inputs = tf.nn.dropout(inputs, config.dropout_prob)
if use_lstm:
ta_d_outputs = tf.TensorArray(
dtype=tf.float32, size=num_steps,
dynamic_size=False, infer_shape=True)
state = cell.zero_state(tf.shape(inputs)[0], tf.float32)
with tf.variable_scope("RNN"):
for time_step in xrange(num_steps):
if time_step > 0:
tf.get_variable_scope().reuse_variables()
(output, state) = cell(inputs[:, time_step, :], state)
ta_d_outputs = ta_d_outputs.write(time_step, output)
# batch_size x seq_length x hidden_size
ta_d_outputs = tf.transpose(
ta_d_outputs.stack(), perm=[1, 0, 2])
# apply the mask
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, tf.stack([1, 1, hidden_size]))
masked_out = ta_d_outputs * mask
output = tf.reduce_sum(masked_out, axis=1)
output_context, output_candidate = tf.split(
output, num_or_size_splits=num_input, axis=0)
else:
inputs = tf.reshape(inputs, [-1, num_steps * embedding_size])
output_context, output_candidate = tf.split(
inputs, num_or_size_splits=num_input, axis=0)
print("-"*80)
if use_img_feat == 'concat_af_lstm':
print("Image feature concatenate after the contextfeature from LSTM")
imgf_1, imgf_2 = tf.split(img_feat, num_or_size_splits=num_input, axis=0)
output_context = tf.concat([imgf_1, output_context], axis=1)
elif use_img_feat == 'only_img':
print("Image Feature Replacing the Context Feature from LSTM")
imgf_1, imgf_2 = tf.split(img_feat, num_or_size_splits=num_input, axis=0)
output_context = imgf_1
else:
print("Not using image feature")
print("-"*80)
# Combining candidate information with context information
print("-"*80)
if combine_typ == 'concat':
print("Directly concatenate context and candidate feature.")
output = tf.concat([output_context, output_candidate], axis=1)
elif combine_typ == 'bilinpool': # compact bilinear
from compact_bilinear_pooling import compact_bilinear_pooling_layer as compact_bilinear_pooling
print("Use compact bilinear pooling between candidate/context features.")
out_dim = 8192
output_context = tf.expand_dims(tf.expand_dims(output_context, 1), 1)
output_candidate = tf.expand_dims(tf.expand_dims(output_candidate, 1), 1)
output = compact_bilinear_pooling(output_context, output_candidate, out_dim)
output = tf.reshape(output, [-1, out_dim]) # make static time shape
else:
print("Use only the candidate feature.")
output = output_candidate
print("-"*80)
for _ in range(cls_hidden):
output = tf.layers.dense(inputs=output, units=512, activation=tf.nn.relu)
if is_training and config.dropout_prob < 1:
output = tf.nn.dropout(output, config.dropout_prob)
y = tf.layers.dense(inputs=output, units=2, activation=None)
score = tf.nn.softmax(y, dim=-1, name=None)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=y))
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(self.y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
self._logits = y
self._score = score
self._loss = loss
self._accuracy = accuracy
if not is_training:
return
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), config.grad_clip)
optimizer = tf.train.AdamOptimizer(self.lr)
self._train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.contrib.framework.get_or_create_global_step())
def train(sess, model, data, gen_model, epoch, dim_feat=2048, config=Config(), verbose=True):
"""Runs the model on the given data."""
start_time = time.time()
# construct two pairs for each image: (real0, real1), (real0, fake)
batch_size = int(config.batch_size / 2)
num_steps = config.num_steps
num_input = config.num_input
filename = data['file_names']
fetches = {
"loss": model._loss,
"accuracy": model._accuracy,
"train_op": model._train_op
}
if len(gen_model) == 0:
idx = range(len(filename))
else:
idx = range(len(filename)*len(gen_model))
random.shuffle(idx)
epoch_size = len(idx) // batch_size
if batch_size * epoch_size < len(idx):
epoch_size += 1
idx.extend(idx[:batch_size * epoch_size - len(idx)])
print(epoch_size)
negative_samples_idx = []
pathological_transf_idx = []
if len(gen_model) > 0:
negative_samples_idx.append(0)
if config.use_random_human or config.use_random_word or config.use_word_permutation:
negative_samples_idx.append(2)
if config.use_random_human:
pathological_transf_idx.append(0)
if config.use_random_word:
pathological_transf_idx.append(1)
if config.use_word_permutation:
pathological_transf_idx.append(2)
if config.use_mc_samples:
negative_samples_idx.append(1)
print("Negative Samples : %s"%negative_samples_idx)
print("Pathlogical Samples : %s"%pathological_transf_idx)
for i in xrange(epoch_size):
if i == epoch_size - 1:
idx_batch = idx[batch_size*i:]
else:
idx_batch = idx[batch_size*i:batch_size*(i+1)]
x = np.zeros((len(idx_batch)*num_input*2, num_steps), dtype=np.int32)
y_ = np.zeros((len(idx_batch)*2, 2), dtype=np.float32)
img = np.zeros((len(idx_batch)*num_input*2, dim_feat))
idx_batch = [ int(tmp_idx_b % len(filename)) for tmp_idx_b in idx_batch ]
for j in xrange(len(idx_batch)):
curr_img = copy.deepcopy(data['features']['dis'][filename[idx_batch[j]]])
real_cap = copy.deepcopy(data['captions']['dis'][filename[idx_batch[j]]]['human'])
real_idx = range(len(real_cap))
random.shuffle(real_idx)
# 1st pair: (real0, real1)
x[j*2, :] = real_cap[real_idx[0]]
img[j*2,:] = curr_img
x[j*2+len(idx_batch)*num_input, :] = real_cap[real_idx[1]]
img[j*2+len(idx_batch)*num_input, :] = curr_img
y_[j*2, 0] = 1.0
# 2nd pair: (real0, fake), fake is sampled from (gen, random_human, random_word)
x[j*2+1, :] = real_cap[real_idx[0]]
img[j*2+1,:] = curr_img
y_[j*2+1, 1] = 1.0
rand_ind = np.random.choice(negative_samples_idx)
if rand_ind == 0: # Use machine generated captions
if type(gen_model) == list:
model_idx = range(len(gen_model))
random.shuffle(model_idx)
chosen_model = gen_model[model_idx[0]]
else:
chosen_model = gen_model
gen_cap = copy.deepcopy(
data['captions']['dis'][filename[idx_batch[j]]][chosen_model])
if len(gen_cap.shape) == 2:
gen_idx = range(gen_cap.shape[0])
random.shuffle(gen_idx)
x[j*2+1+len(idx_batch)*num_input, :] = gen_cap[gen_idx[0], :] # gen_idx[0]
else:
x[j*2+1+len(idx_batch)*num_input, :] = gen_cap
elif rand_ind == 1: # MC samples
mc_cap = copy.deepcopy(
data['captions']['dis'][filename[idx_batch[j]]]['mc_samples'])
mc_idx = range(len(mc_cap))
random.shuffle(mc_idx)
mc_cap = mc_cap[mc_idx[0]]
x[j*2+1+len(idx_batch)*num_input, :] = mc_cap
elif rand_ind == 2:
rand_ind_2 = np.random.choice(pathological_transf_idx)
if rand_ind_2 == 0: # Random human caption
rand_j = np.random.randint(0,len(filename))
while rand_j == idx_batch[j]:
rand_j = np.random.randint(0,len(filename))
fake_cap = copy.deepcopy(data['captions']['dis'][filename[rand_j]]['human'])
fake_idx = range(len(fake_cap))
random.shuffle(fake_idx)
x[j*2+1+len(idx_batch)*num_input, :] = fake_cap[fake_idx[0]]
elif rand_ind_2 == 1: # random word replacement of human caption
human_cap = copy.deepcopy(
data['captions']['dis'][filename[idx_batch[j]]]['human'])
human_idx = range(len(human_cap))
random.shuffle(human_idx)
human_cap = human_cap[human_idx[0]]
if model._eos in list(human_cap):
end_position = list(human_cap).index(model._eos)
else:
end_position = len(human_cap) - 1
n_position = np.random.randint(min(2, end_position - 1), end_position)
rand_position = np.random.choice(end_position, size=(n_position,), replace=False)
rand_word = np.random.randint(config.vocab_size-4, size=(n_position,)) + 4
human_cap[rand_position] = rand_word
x[j*2+1+len(idx_batch)*num_input, :] = human_cap
elif rand_ind_2 == 2: # random permutation of human captions
human_cap = copy.deepcopy(
data['captions']['dis'][filename[idx_batch[j]]]['human'])
human_idx = range(len(human_cap))
random.shuffle(human_idx)
human_cap = human_cap[human_idx[0]]
if model._eos in list(human_cap):
end_position = list(human_cap).index(model._eos)
else:
end_position = len(human_cap) - 1
n_position = np.random.randint(min(2, end_position - 1), end_position)
rand_position = list(np.random.choice(end_position, size=(n_position,), replace=False))
rand_position_permutation = list(np.random.permutation(rand_position))
if rand_position_permutation == rand_position:
rand_position_permutation = list(np.random.permutation(rand_position))
human_cap[rand_position] = human_cap[rand_position_permutation]
x[j*2+1+len(idx_batch)*num_input, :] = human_cap
else:
raise Exception("random number out of bound")
else:
raise Exception("random number out of bound")
img[j*2+1+len(idx_batch)*num_input,:] = curr_img
# feed_dict = {model.x: x, model.y_: y_, model.img_feat: img, model.lr : epoch_lr}
effective_lr = config.learning_rate * config.learning_rate_decay ** epoch
feed_dict = {model.x: x, model.y_: y_, model.img_feat: img, model.lr : effective_lr}
vals = sess.run(fetches, feed_dict)
loss = vals["loss"]
accuracy = vals["accuracy"]
if verbose and (i % (epoch_size // 10) == 10 or i == epoch_size - 1):
print("%d / %d loss: %.4f accuracy: %.3f speed: %.3f wps" %
(i + 1, epoch_size, loss, accuracy,
i * 1.0 * batch_size * num_steps / (time.time() - start_time)))
return loss, accuracy
def inference(sess, model, data, gen_model, dim_feat=2048, config=Config()):
"""Runs the model on the given data."""
num_steps = config.num_steps
num_input = config.num_input
batch_size = config.batch_size
if 'file_names' in data:
filename = data['file_names']
else:
filename = data['image_ids']
acc = []
logits = []
scores = []
idx = range(len(filename))
epoch_size = len(idx) // batch_size
if batch_size * epoch_size < len(idx):
epoch_size += 1
idx.extend(idx[:batch_size * epoch_size - len(idx)])
for i in xrange(epoch_size):
if i == epoch_size - 1:
idx_batch = idx[batch_size*i:]
else:
idx_batch = idx[batch_size*i:batch_size*(i+1)]
x = np.zeros((len(idx_batch)*num_input, num_steps), dtype=np.int32)
y_ = np.zeros((len(idx_batch), 2), dtype=np.float32)
y_[:, 1] = 1.0
img = np.zeros((len(idx_batch)*num_input, dim_feat), dtype=np.float32)
for j in xrange(len(idx_batch)):
img_feat = copy.deepcopy(data['features']['dis'][filename[idx_batch[j]]])
real_cap = copy.deepcopy(data['captions']['dis'][filename[idx_batch[j]]]['human'])
real_idx = range(len(real_cap))
random.shuffle(real_idx)
x[j, :] = real_cap[real_idx[0]]
img[j,:] = img_feat
if gen_model == 'human':
x[j+len(idx_batch), :] = real_cap[real_idx[1]]
y_[j, 0] = 1.
y_[j, 1] = 0.
elif gen_model == 'random_human':
rand_j = random.randint(0,len(filename)-1)
while rand_j == idx_batch[j]:
rand_j = random.randint(0,len(filename)-1)
fake_cap = copy.deepcopy(data['captions']['dis'][filename[rand_j]]['human'])
fake_idx = range(len(fake_cap))
random.shuffle(fake_idx)
x[j+len(idx_batch), :] = fake_cap[fake_idx[0]]
elif gen_model == 'random_word':
x[j+len(idx_batch), :] = np.random.randint(
config.vocab_size-4, size=(num_steps,)) + 4
else:
x[j+len(idx_batch), :] = copy.deepcopy(
data['captions']['dis'][filename[idx_batch[j]]][gen_model])
img[j+len(idx_batch),:] = img_feat
acc_batch, logits_batch, scores_batch = sess.run([
model._accuracy, model._logits, model._score],
{model.x: x, model.y_: y_, model.img_feat:img})
acc.append(acc_batch)
logits.append(logits_batch)
scores.append(scores_batch)
print('%s Average Score: %.3f Acc: %.3f' \
% (gen_model, np.mean(np.array(scores)[:,:,0]), np.mean(np.array(acc))))
return np.array(acc), np.array(logits), np.array(scores)
| StarcoderdataPython |
3270417 | <reponame>hbasria/py-registry
import inspect
from dictutils import OrderedAttrDict
class Registry(OrderedAttrDict):
def register_decorator(self, **kwargs):
name = kwargs.get("name")
def decorator(decorated):
self.register_func(data=decorated, name=name)
return decorated
return decorator
def register(self, data=None, name=None, **kwargs):
if data is None:
return self.register_decorator(data=data, name=name, **kwargs)
else:
self.register_func(data=data, name=name, **kwargs)
return data
def get_object_name(self, data):
"""
Return a name from an element (object, class, function...)
"""
if callable(data):
return data.__name__
elif inspect.isclass(data):
return data.__class__.__name__
else:
raise ValueError(
"Cannot deduce name from given object ({}). Please user registry.register() with a 'name' argument.".format(
data
)
)
def validate(self, data):
"""
Called before registering a new value into the registry
Override this method if you want to restrict what type of data cna be registered
"""
return True
def register_func(self, data, name=None, **kwargs):
"""
Register abritrary data into the registry
"""
if self.validate(data):
name = name or self.get_object_name(data)
self[name] = data
else:
raise ValueError(
"{0} (type: {0.__class__}) is not a valid value for {1} registry".format(
data, self.__class__
)
)
| StarcoderdataPython |
1672915 | from glob import glob
def get_activations(model, model_inputs, print_shape_only=False, layer_name=None):
import keras.backend as K
print('----- activations -----')
activations = []
inp = model.input
model_multi_inputs_cond = True
if not isinstance(inp, list):
# only one input! let's wrap it in a list.
inp = [inp]
model_multi_inputs_cond = False
outputs = [layer.output for layer in model.layers if
layer.name == layer_name or layer_name is None] # all layer outputs
funcs = [K.function(inp + [K.learning_phase()], [out]) for out in outputs] # evaluation functions
if model_multi_inputs_cond:
list_inputs = []
list_inputs.extend(model_inputs)
list_inputs.append(1.)
else:
list_inputs = [model_inputs, 1.]
# Learning phase. 1 = Test mode (no dropout or batch normalization)
# layer_outputs = [func([model_inputs, 1.])[0] for func in funcs]
layer_outputs = [func(list_inputs)[0] for func in funcs]
for layer_activations in layer_outputs:
activations.append(layer_activations)
if print_shape_only:
print(layer_activations.shape)
else:
print(layer_activations)
return activations
if __name__ == '__main__':
checkpoints = glob('checkpoints/*.h5')
# pip3 install natsort
from natsort import natsorted
from keras.models import load_model
if len(checkpoints) > 0:
checkpoints = natsorted(checkpoints)
assert len(checkpoints) != 0, 'No checkpoints found.'
checkpoint_file = checkpoints[-1]
print('Loading [{}]'.format(checkpoint_file))
model = load_model(checkpoint_file)
model.compile(optimizer='adam',
loss='mse ',
metrics=['accuracy'])
print(model.summary())
get_activations(model, x_test[0:1], print_shape_only=True) # with just one sample.
get_activations(model, x_test[0:200], print_shape_only=True) # with 200 samples. | StarcoderdataPython |
1654905 | #!/usr/bin/env python
"""
This example shows how to create shipments. The variables populated below
represents the minimum required values. You will need to fill all of these, or
risk seeing a SchemaValidationError exception thrown.
Near the bottom of the module, you'll see some different ways to handle the
label data that is returned with the reply.
"""
import logging
import binascii
from example_config import CONFIG_OBJ
from fedex.services.ship_service import FedexProcessShipmentRequest
# Set this to the INFO level to see the response from Fedex printed in stdout.
logging.basicConfig(level=logging.INFO)
# This is the object that will be handling our tracking request.
# We're using the FedexConfig object from example_config.py in this dir.
shipment = FedexProcessShipmentRequest(CONFIG_OBJ)
# This is very generalized, top-level information.
# REGULAR_PICKUP, REQUEST_COURIER, DROP_BOX, BUSINESS_SERVICE_CENTER or STATION
shipment.RequestedShipment.DropoffType = 'REGULAR_PICKUP'
# See page 355 in WS_ShipService.pdf for a full list. Here are the common ones:
# STANDARD_OVERNIGHT, PRIORITY_OVERNIGHT, FEDEX_GROUND, FEDEX_EXPRESS_SAVER
shipment.RequestedShipment.ServiceType = 'PRIORITY_OVERNIGHT'
# What kind of package this will be shipped in.
# FEDEX_BOX, FEDEX_PAK, FEDEX_TUBE, YOUR_PACKAGING
shipment.RequestedShipment.PackagingType = 'FEDEX_PAK'
# Shipper contact info.
shipment.RequestedShipment.Shipper.Contact.PersonName = '<NAME>'
shipment.RequestedShipment.Shipper.Contact.CompanyName = 'Some Company'
shipment.RequestedShipment.Shipper.Contact.PhoneNumber = '9012638716'
# Shipper address.
shipment.RequestedShipment.Shipper.Address.StreetLines = ['Address Line 1']
shipment.RequestedShipment.Shipper.Address.City = 'Herndon'
shipment.RequestedShipment.Shipper.Address.StateOrProvinceCode = 'VA'
shipment.RequestedShipment.Shipper.Address.PostalCode = '20171'
shipment.RequestedShipment.Shipper.Address.CountryCode = 'US'
shipment.RequestedShipment.Shipper.Address.Residential = True
# Recipient contact info.
shipment.RequestedShipment.Recipient.Contact.PersonName = '<NAME>'
shipment.RequestedShipment.Recipient.Contact.CompanyName = 'Recipient Company'
shipment.RequestedShipment.Recipient.Contact.PhoneNumber = '9012637906'
# Recipient address
shipment.RequestedShipment.Recipient.Address.StreetLines = ['Address Line 1']
shipment.RequestedShipment.Recipient.Address.City = 'Herndon'
shipment.RequestedShipment.Recipient.Address.StateOrProvinceCode = 'VA'
shipment.RequestedShipment.Recipient.Address.PostalCode = '20171'
shipment.RequestedShipment.Recipient.Address.CountryCode = 'US'
# This is needed to ensure an accurate rate quote with the response.
shipment.RequestedShipment.Recipient.Address.Residential = True
shipment.RequestedShipment.EdtRequestType = 'NONE'
shipment.RequestedShipment.ShippingChargesPayment.Payor.ResponsibleParty.AccountNumber = CONFIG_OBJ.account_number
# Who pays for the shipment?
# RECIPIENT, SENDER or THIRD_PARTY
shipment.RequestedShipment.ShippingChargesPayment.PaymentType = 'SENDER'
# Specifies the label type to be returned.
# LABEL_DATA_ONLY or COMMON2D
shipment.RequestedShipment.LabelSpecification.LabelFormatType = 'COMMON2D'
# Specifies which format the label file will be sent to you in.
# DPL, EPL2, PDF, PNG, ZPLII
shipment.RequestedShipment.LabelSpecification.ImageType = 'PNG'
# To use doctab stocks, you must change ImageType above to one of the
# label printer formats (ZPLII, EPL2, DPL).
# See documentation for paper types, there quite a few.
shipment.RequestedShipment.LabelSpecification.LabelStockType = 'PAPER_4X6'
# This indicates if the top or bottom of the label comes out of the
# printer first.
# BOTTOM_EDGE_OF_TEXT_FIRST or TOP_EDGE_OF_TEXT_FIRST
shipment.RequestedShipment.LabelSpecification.LabelPrintingOrientation = 'BOTTOM_EDGE_OF_TEXT_FIRST'
package1_weight = shipment.create_wsdl_object_of_type('Weight')
# Weight, in pounds.
package1_weight.Value = 1.0
package1_weight.Units = "LB"
package1 = shipment.create_wsdl_object_of_type('RequestedPackageLineItem')
package1.PhysicalPackaging = 'BOX'
package1.Weight = package1_weight
# Un-comment this to see the other variables you may set on a package.
#print package1
# This adds the RequestedPackageLineItem WSDL object to the shipment. It
# increments the package count and total weight of the shipment for you.
shipment.add_package(package1)
# If you'd like to see some documentation on the ship service WSDL, un-comment
# this line. (Spammy).
#print shipment.client
# Un-comment this to see your complete, ready-to-send request as it stands
# before it is actually sent. This is useful for seeing what values you can
# change.
#print shipment.RequestedShipment
# If you want to make sure that all of your entered details are valid, you
# can call this and parse it just like you would via send_request(). If
# shipment.response.HighestSeverity == "SUCCESS", your shipment is valid.
#shipment.send_validation_request()
# Fires off the request, sets the 'response' attribute on the object.
shipment.send_request()
# This will show the reply to your shipment being sent. You can access the
# attributes through the response attribute on the request object. This is
# good to un-comment to see the variables returned by the Fedex reply.
print shipment.response
# Here is the overall end result of the query.
print "HighestSeverity:", shipment.response.HighestSeverity
# Getting the tracking number from the new shipment.
print "Tracking #:", shipment.response.CompletedShipmentDetail.CompletedPackageDetails[0].TrackingIds[0].TrackingNumber
# Net shipping costs.
print "Net Shipping Cost (US$):", shipment.response.CompletedShipmentDetail.CompletedPackageDetails[0].PackageRating.PackageRateDetails[0].NetCharge.Amount
# Get the label image in ASCII format from the reply. Note the list indices
# we're using. You'll need to adjust or iterate through these if your shipment
# has multiple packages.
ascii_label_data = shipment.response.CompletedShipmentDetail.CompletedPackageDetails[0].Label.Parts[0].Image
# Convert the ASCII data to binary.
label_binary_data = binascii.a2b_base64(ascii_label_data)
"""
This is an example of how to dump a label to a PNG file.
"""
# This will be the file we write the label out to.
png_file = open('example_shipment_label.png', 'wb')
png_file.write(label_binary_data)
png_file.close()
"""
This is an example of how to print the label to a serial printer. This will not
work for all label printers, consult your printer's documentation for more
details on what formats it can accept.
"""
# Pipe the binary directly to the label printer. Works under Linux
# without requiring PySerial. This WILL NOT work on other platforms.
#label_printer = open("/dev/ttyS0", "w")
#label_printer.write(label_binary_data)
#label_printer.close()
"""
This is a potential cross-platform solution using pySerial. This has not been
tested in a long time and may or may not work. For Windows, Mac, and other
platforms, you may want to go this route.
"""
#import serial
#label_printer = serial.Serial(0)
#print "SELECTED SERIAL PORT: "+ label_printer.portstr
#label_printer.write(label_binary_data)
#label_printer.close() | StarcoderdataPython |
54268 | from .video_utils import VideoClips
from .utils import list_dir
from .folder import make_dataset
from .vision import VisionDataset
class KineticsVideo(VisionDataset):
def __init__(self, root, frames_per_clip, step_between_clips=1):
super(KineticsVideo, self).__init__(root)
extensions = ('avi',)
classes = list(sorted(list_dir(root)))
class_to_idx = {classes[i]: i for i in range(len(classes))}
self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None)
self.classes = classes
video_list = [x[0] for x in self.samples]
self.video_clips = VideoClips(video_list, frames_per_clip, step_between_clips)
def __len__(self):
return self.video_clips.num_clips()
def __getitem__(self, idx):
video, audio, info, video_idx = self.video_clips.get_clip(idx)
label = self.samples[video_idx][1]
return video, audio, label
| StarcoderdataPython |
3264243 | #%% Packages and functions
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from lib.functions import *
import seaborn as sns; sns.set()
from datetime import datetime
#%%##################################################################################################
# input parameters
article_name='article_signal.csv'
sub_dict=['diseases','genes'] # sub_dict=['diseases','function','drugs','genes']
weight_sent='mean'#'median'#
word_Th=0.2
#%% -------------------------- GET THE EMBEDDINGS ----------------------------
vectors=Load_doc_embeddibgs(article_name)
#%% Loo for differntent thresholds
comp_rank=[]
Th_val=[]
for word_Th in np.arange(0.6,0.7,0.1):
List_ranks=[]
# print('Word Threshold = ',word_Th)
# Split the test into sentences
list_of_sentences, list_of_sentences_full, vect_sents, vect_sents_full= Split_Doc_into_sentences(vectors,word_Th,weight_sent)
#%% ---------------------- STATISTICAL PRE-PROCESSING -------------------------
topic_mean, topic_std, topic_dot= Statistical_ranking_scores(vect_sents, sub_dict)
### ---------------------- SVD-based PRE-RANKING -------------------------
sentences_SVD_Ranking=SVD_ranking_scores(vect_sents, sub_dict, list_of_sentences_full,list_of_sentences )
sentences_SVD_Ranking=sentences_SVD_Ranking.sort_values(by='Rank DiDr', ascending=False)
# SAVE THE RANKS
#%%
dateTimeObj = datetime.now();timestampStr = dateTimeObj.strftime("%Y-%b-%d");
# top sentences to be ranked manually Top +/-4
from scipy.stats import pearsonr
from scipy.stats import spearmanr
top_sent=4
Top_sentences_man= pd.concat([sentences_SVD_Ranking[0:top_sent], sentences_SVD_Ranking[-top_sent:]])
#% Save ranks
# Top_sentences_man.to_csv('./Outputs/manual_rank/'+article_name[:-4]+'-'+timestampStr+'_sentences_ranking_'+weight_sent+'_Th'+str(word_Th)+'.csv')
# sentences_SVD_Ranking.to_csv('./Outputs/SVD_ranks/'+article_name[:-4]+'-'+timestampStr+'_sentences_ranking_'+weight_sent+'_Th'+str(word_Th)+'.csv')
#%% ---------------------- RANKING VALIDATION -------------------------
#%% Manual ranking CBRC
#filename_ranks='2019-07-11-Manual_Validatoin_CHR_MHB.csv'
#corr_ranks=Ranking_validation(filename_ranks)
# Get ranks
#List_IDs=[1, 4, 12, 14, 19 , 21, 27, 28]
Top_sentences_man=Top_sentences_man.sort_index()
List_IDs=list(Top_sentences_man.index.values)
Available_IDs=list(set(List_IDs).intersection(sentences_SVD_Ranking.index.values.astype(int)))
#Top_sentecences = pd.DataFrame(columns=sentences_SVD_Ranking.columns)
#for i in Available_IDs:
# Top_sentecences.loc[i] =sentences_SVD_Ranking.loc[i]
rank_CHR=[6,4,3,5,8,1,7,2]; Top_sentences_man['Christophe']=rank_CHR
rank_MHB=[6,4,3,5,8,1,7,2]; Top_sentences_man['Magbubah']=rank_MHB
Trail_rank=len(List_IDs)-len(Available_IDs)+1
input=Top_sentences_man['Rank DiDr'].values
indices = list(range(len(input)))
indices.sort(key=lambda x: input[x])
rank_SVD_up = [0] * len(indices)
for i, x in enumerate(indices):
rank_SVD_up[x] = i+1
rank_SVD=list( rank_SVD_up) + [ rr for rr in range(Trail_rank-1,0,-1)]
Top_sentences_man['SVD-DiDr']=rank_SVD
Scorr_CM, Sp_value_CM = spearmanr(rank_CHR, rank_MHB); List_ranks.append('(Christophe, Magbubah)')
Scorr_CS, Sp_value_CS = spearmanr(rank_CHR, rank_SVD); List_ranks.append('(Christophe, SVD-DiDr)')
Scorr_SM, Sp_value_SM = spearmanr(rank_SVD, rank_MHB); List_ranks.append('(Magbubah, SVD-DiDr)')
corr_ranks=[ Scorr_CM, Scorr_CS, Scorr_SM ]
comp_rank.append(corr_ranks)
Th_val.append(word_Th)
print(rank_SVD, '--', word_Th, '--SM ', Scorr_SM)
comp_rank=np.asanyarray(comp_rank)
comp_rank=comp_rank.T
comp_rank = pd.DataFrame(comp_rank,List_ranks,columns=Th_val)
comp_rank.to_csv('./Outputs/Comparison/TOP_sentences_ranking_'+weight_sent+'_Th'+str(word_Th)+'.csv')
#%% ---------------------- PCA-based PRE-RANKING -------------------------
#sentences_PCA_Ranking=PCA_ranking_scores(vect_sents, sub_dict, list_of_sentences_full,list_of_sentences )
#%% Test Script #################################################################################################
#word='dfdf'
# df = df.reindex(index=a.index)
| StarcoderdataPython |
23450 | <reponame>RiboswitchClassifier/RiboswitchClassification<gh_stars>1-10
from sklearn.model_selection import cross_val_score, GridSearchCV, cross_validate, train_test_split
from sklearn.metrics import accuracy_score, classification_report
from sklearn.neural_network import MLPClassifier
import pandas as pd
import csv
from sklearn.preprocessing import label_binarize
from sklearn.preprocessing import StandardScaler
import numpy as np
data = pd.read_csv('processed_datasets/final_32classes.csv')
# Separate out the x_data and y_data.
x_data = data.loc[:, data.columns != "Type"]
x_data = x_data.loc[:,x_data.columns != "Sequence"]
y_data = data.loc[:, "Type"]
random_state = 100
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.7, random_state=100,stratify=y_data)
scaler = StandardScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
mlp = MLPClassifier()
mlp.fit(x_train, y_train)
y_pred_train = mlp.predict(x_train)
y_pred_test = mlp.predict(x_test)
print("classifier", mlp)
print ("Accuracy on Train Set")
print (mlp.score(x_train, y_train))
print ("MLP Classifier")
print ("Accuracy on Test Set")
print (mlp.score(x_test, y_test))
print ("Report")
print (classification_report(y_test,mlp.predict(x_test)))
param_grid = {
'activation': ['tanh', 'relu'],
'solver': ['sgd', 'adam'],
'alpha': [0.0001,0.01, 0.05,0.1,1.0],
'learning_rate': ['constant','adaptive'],
}
#,2000
#,70
grid_search = GridSearchCV(mlp, param_grid=param_grid,n_jobs=-1,cv=10)
grid_search.fit(x_train,y_train)
print(grid_search.best_params_)
print(grid_search.best_score_)
| StarcoderdataPython |
175762 | def accumulate():
pass
| StarcoderdataPython |
3257790 | import logging
import pickle
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn.cluster import DBSCAN
from config import OUTPUT_FILE, OUTPUT_LABEL_FOLDERS
from src.utils import read_image
logging.info("Loading encodings")
data = pickle.loads(open(OUTPUT_FILE, "rb").read())
df = pd.DataFrame(data)
df.drop(columns=["encoding"], inplace=True)
data = np.array(data)
encodings = [d["encoding"] for d in data]
logging.info("Clustering")
clt = DBSCAN(metric="euclidean", n_jobs=-1)
clt.fit(encodings)
label_ids = np.unique(clt.labels_)
n_unique_faces = len(np.where(label_ids > -1)[0])
logging.info(f"Found {n_unique_faces} unique faces")
df["label"] = clt.labels_
for label_id in label_ids:
Path(f"{OUTPUT_LABEL_FOLDERS}/{label_id}").mkdir(parents=True, exist_ok=True)
groups = df.groupby("label")
progress_bar = tqdm(total=len(df))
for label_id, group in groups:
for j, row in group.iterrows():
image = read_image(str(row["image_path"]))
(top, right, bottom, left) = row["loc"]
face = image[top:bottom, left:right]
face = cv2.resize(face, (96, 96))
file_path = f"{OUTPUT_LABEL_FOLDERS}/{label_id}/{j}.jpg"
cv2.imwrite(filename=file_path, img=face)
progress_bar.update()
| StarcoderdataPython |
171207 | from pyabc import ABCSMC, Distribution
from pyabc.sampler import MulticoreEvalParallelSampler, SingleCoreSampler
import scipy.stats as st
import numpy as np
from datetime import datetime, timedelta
set_acc_rate = 0.2
pop_size = 10
def model(x):
"""Some model"""
return {"par": x["par"] + np.random.randn()}
def dist(x, y):
"""Some distance"""
return abs(x["par"] - y["par"])
def test_stop_acceptance_rate_too_low(db_path):
"""Test the acceptance rate condition."""
abc = ABCSMC(model, Distribution(par=st.uniform(0, 10)), dist, pop_size)
abc.new(db_path, {"par": .5})
history = abc.run(-1, 8, min_acceptance_rate=set_acc_rate)
df = history.get_all_populations()
df["acceptance_rate"] = df["particles"] / df["samples"]
assert df["acceptance_rate"].iloc[-1] < set_acc_rate
assert df["acceptance_rate"].iloc[-2] >= set_acc_rate \
or df["t"].iloc[-2] == -1 # calibration iteration
def test_stop_early(db_path):
"""Test early stopping inside a generation."""
mc_sampler = MulticoreEvalParallelSampler(check_max_eval=True)
sc_sampler = SingleCoreSampler(check_max_eval=True)
for sampler in [mc_sampler, sc_sampler]:
abc = ABCSMC(model, Distribution(par=st.uniform(0, 10)), dist,
pop_size, sampler=sampler)
abc.new(db_path, {"par": .5})
history = abc.run(
max_nr_populations=8, min_acceptance_rate=set_acc_rate)
df = history.get_all_populations()
# offset with n_procs as more processes can have run at termination
n_procs = sampler.n_procs if hasattr(sampler, 'n_procs') else 1
df["corrected_acceptance_rate"] = \
df["particles"] / (df["samples"] - (n_procs-1))
assert df["corrected_acceptance_rate"].iloc[-1] >= set_acc_rate
def test_total_nr_simulations(db_path):
"""Test the total number of samples condition."""
abc = ABCSMC(model, Distribution(par=st.uniform(0, 10)), dist, pop_size)
abc.new(db_path, {"par": .5})
max_total_nr_sim = 142
history = abc.run(-1, 100, max_total_nr_simulations=max_total_nr_sim)
assert history.total_nr_simulations >= max_total_nr_sim
# Directly check on the history
df = history.get_all_populations()
# Make sure budget is not exceeded yet in previous iteration
assert sum(df['samples'][:-1]) < max_total_nr_sim
# Just to make sure .total_nr_simulations does what it's supposed to
assert sum(df['samples']) == history.total_nr_simulations
def test_max_walltime(db_path):
"""Test the maximum walltime condition."""
abc = ABCSMC(model, Distribution(par=st.uniform(0, 10)), dist, pop_size)
abc.new(db_path, {"par": .5})
init_walltime = datetime.now()
max_walltime = timedelta(milliseconds=500)
history = abc.run(-1, 100, max_walltime=max_walltime)
assert datetime.now() - init_walltime > max_walltime
assert history.n_populations < 100
| StarcoderdataPython |
161070 | <gh_stars>0
# Implementation of classic arcade game Pong
import simplegui
import random
# initialize globals - pos and vel encode vertical info for paddles
WIDTH = 600
HEIGHT = 400
BALL_RADIUS = 15
PAD_WIDTH = 8
PAD_HEIGHT = 80
HALF_PAD_WIDTH = PAD_WIDTH / 2
HALF_PAD_HEIGHT = PAD_HEIGHT / 2
LEFT = False
RIGHT = True
ball_pos = [WIDTH / 2, HEIGHT / 2]
ball_vel=[0,0]
paddle1_pos, paddle2_pos = (HEIGHT - PAD_HEIGHT)/2, (HEIGHT - PAD_HEIGHT)/2
paddle1_vel=0
paddle2_vel=0
score1=0
score2=0
started=False
class ImageInfo:
def __init__(self, center, size, radius = 0, lifespan = None, animated = False):
self.center = center
self.size = size
self.radius = radius
if lifespan:
self.lifespan = lifespan
else:
self.lifespan = float('inf')
self.animated = animated
def get_center(self):
return self.center
def get_size(self):
return self.size
def get_radius(self):
return self.radius
def get_lifespan(self):
return self.lifespan
def get_animated(self):
return self.animated
splash_info = ImageInfo([200, 150], [400, 300])
splash_image = simplegui.load_image("https://i.imgur.com/npmQ5Rb.png")
#ball_vel=[0,0]
# initialize ball_pos and ball_vel for new bal in middle of table
# if direction is RIGHT, the ball's velocity is upper right, else upper left
def spawn_ball(direction):
global ball_pos, ball_vel # these are vectors stored as lists
ball_pos=[WIDTH / 2, HEIGHT / 2]
if direction == RIGHT:
ball_vel[0]= random.randrange(120, 240) / 60
ball_vel[1]= -random.randrange(60,180) / 60
if direction == LEFT:
ball_vel[0]= -random.randrange(120, 240) / 60
ball_vel[1]= -random.randrange(60,180) / 60
# define event handlers
def new_game():
global paddle1_pos, paddle2_pos, paddle1_vel, paddle2_vel # these are numbers
global score1, score2 # these are ints
spawn_ball(random.choice([RIGHT,LEFT]))
def draw(canvas):
global score1, score2, paddle1_pos, paddle2_pos, ball_pos, ball_vel,started
# draw mid line and gutters
canvas.draw_line([WIDTH / 2, 0],[WIDTH / 2, HEIGHT], 1, "White")
canvas.draw_line([PAD_WIDTH, 0],[PAD_WIDTH, HEIGHT], 1, "White")
canvas.draw_line([WIDTH - PAD_WIDTH, 0],[WIDTH - PAD_WIDTH, HEIGHT], 1, "White")
# update ball
ball_pos[0] += ball_vel[0]
ball_pos[1] += ball_vel[1]
# draw ball
canvas.draw_circle(ball_pos, BALL_RADIUS, 2, "white","White")
# update paddle's vertical position, keep paddle on the screen
if 0 <= (paddle1_pos + paddle1_vel) <= HEIGHT - PAD_HEIGHT:
paddle1_pos += paddle1_vel
if 0 <= (paddle2_pos + paddle2_vel) <= HEIGHT - PAD_HEIGHT:
paddle2_pos += paddle2_vel
# draw paddles
canvas.draw_line([PAD_WIDTH / 2, paddle1_pos],[PAD_WIDTH / 2, paddle1_pos + PAD_HEIGHT], PAD_WIDTH, "White")
canvas.draw_line([WIDTH-PAD_WIDTH / 2, paddle2_pos],[WIDTH-PAD_WIDTH / 2, paddle2_pos + PAD_HEIGHT], PAD_WIDTH, "White")
# determine whether paddle and ball collide
if ball_pos[1] <= BALL_RADIUS:
ball_vel[1] = - ball_vel[1]
if ball_pos[1] >= HEIGHT - BALL_RADIUS:
ball_vel[1]= -ball_vel[1]
if ball_pos[0] <= BALL_RADIUS + PAD_WIDTH :
if paddle1_pos <= ball_pos[1] <= (paddle1_pos + PAD_HEIGHT):
sound=simplegui.load_sound('https://freesound.org/people/NoiseCollector/sounds/4391/download/4391__noisecollector__pongblipf-5.wav')
sound.play()
ball_vel[0]= - 1.1 * ball_vel[0]
else:
spawn_ball(RIGHT)
score2+=1
sound=simplegui.load_sound('https://freesound.org/people/leviclaassen/sounds/107789/download/107789__leviclaassen__hit-002.wav')
sound.play()
if ball_pos[0] >= WIDTH -BALL_RADIUS - PAD_WIDTH :
if paddle2_pos <= ball_pos[1] <= (paddle2_pos + PAD_HEIGHT):
sound=simplegui.load_sound('https://freesound.org/people/NoiseCollector/sounds/4391/download/4391__noisecollector__pongblipf-5.wav')
sound.play()
ball_vel[0]= - 1.1 * ball_vel[0]
else:
spawn_ball(LEFT)
score1+=1
sound=simplegui.load_sound('https://freesound.org/people/leviclaassen/sounds/107789/download/107789__leviclaassen__hit-002.wav')
sound.play()
#if score1==2:
#message="Player1 Wins!"
#canvas.draw_text(message,(100,200),30,"White")
#elif score2==5:
#message="Player2 Wins!"
#canvas.draw_text(message,(400,200),30,"White")
# draw scores
canvas.draw_text(str(score1),(200,20),20,"White")
canvas.draw_text(str(score2),(400,20),20,"White")
if not started:
canvas.draw_image(splash_image, splash_info.get_center(),
splash_info.get_size(), [WIDTH / 2, HEIGHT / 2],
splash_info.get_size())
def keydown(key):
acc=5
global paddle1_vel, paddle2_vel
if key==simplegui.KEY_MAP["w"]:
paddle1_vel = -acc
elif key==simplegui.KEY_MAP["s"]:
paddle1_vel = acc
elif key==simplegui.KEY_MAP["down"]:
paddle2_vel = acc
elif key==simplegui.KEY_MAP["up"]:
paddle2_vel = -acc
def keyup(key):
acc=1
global paddle1_vel, paddle2_vel
if key==simplegui.KEY_MAP["w"]:
paddle1_vel = 0
elif key==simplegui.KEY_MAP["s"]:
paddle1_vel = 0
elif key==simplegui.KEY_MAP["down"]:
paddle2_vel = 0
elif key==simplegui.KEY_MAP["up"]:
paddle2_vel = 0
def click(pos):
global started,Max_rock,score,lives
center = [WIDTH / 2, HEIGHT / 2]
size = splash_info.get_size()
inwidth = (center[0] - size[0] / 2) < pos[0] < (center[0] + size[0] / 2)
inheight = (center[1] - size[1] / 2) < pos[1] < (center[1] + size[1] / 2)
if (not started) and inwidth and inheight:
started = True
new_game()
def restart():
global score1,score2,started,ball_vel,ball_pos
new_game()
score1=0
score2=0
ball_vel=[0,0]
ball_pos = [WIDTH / 2, HEIGHT / 2]
started=False
# create frame
frame = simplegui.create_frame("Pong", WIDTH, HEIGHT)
frame.set_draw_handler(draw)
frame.set_keydown_handler(keydown)
frame.set_keyup_handler(keyup)
frame.set_mouseclick_handler(click)
frame.add_button("Restart", restart, 100)
# start frame
frame.start()
| StarcoderdataPython |
1647175 | <filename>research/DailyPriceInfo/trend_analysis_utils.py
trend_type = ['three_day_up', '5_perc_up', '3_perc_up', '1_perc_up',\
'three_day_down', '5_perc_down', '3_perc_down', '1_perc_down', \
'1_perc_var', 'NA']
def get_trend_type(close_price):
if (len(close_price) >= 4):
if (close_price[0] > close_price[1]) and (close_price[1] > close_price[2]) \
and (close_price[2] > close_price[3]):
return 'three_day_up'
if (close_price[0] < close_price[1]) and (close_price[1] < close_price[2]) \
and (close_price[2] < close_price[3]):
return 'three_day_down'
var = 100 * (close_price[0] - close_price[3]) / close_price[3]
#print('var=',var)
if (var >= 5.0):
return '5_perc_up'
elif (var >= 3.0):
return '3_perc_up'
elif (var >= 1.0):
return '1_perc_up'
elif(var <= -5.0):
return '5_perc_down'
elif(var <= -3.0):
return '3_perc_down'
elif(var <= -1.0):
return '1_perc_down'
else:
return '1_perc_var'
else:
return 'NA'
if __name__ == '__main__':
price1 = [105, 103, 101, 100]
price2 = [103, 102, 101, 100]
price3 = [101, 99, 99, 100]
price4 = [94, 99, 99, 100]
price5 = [96, 99, 99, 100]
price6 = [98, 99, 99, 100]
price7 = [99.5, 99.0, 99.0, 100.0]
price8 = [100.5, 99.0, 99.0, 100.0]
price9 = [110, 100, 90]
price10 = [105, 99, 101, 100]
price11 = [103.5, 99, 101, 100]
print(get_trend_type(price1))
print(get_trend_type(price2))
print(get_trend_type(price3))
print(get_trend_type(price4))
print(get_trend_type(price5))
print(get_trend_type(price6))
print(get_trend_type(price7))
print(get_trend_type(price8))
print(get_trend_type(price9))
print(get_trend_type(price10))
print(get_trend_type(price11))
| StarcoderdataPython |
2342 | import sys
import os
from tempfile import TemporaryDirectory
import numpy as np
import tensorflow.compat.v1 as tf
tf.get_logger().setLevel('ERROR') # only show error messages
from recommenders.utils.timer import Timer
from recommenders.utils.constants import SEED
from recommenders.models.deeprec.deeprec_utils import (
prepare_hparams
)
from recommenders.datasets.amazon_reviews import download_and_extract, data_preprocessing, _create_vocab
from recommenders.datasets.download_utils import maybe_download
from recommenders.models.deeprec.models.sequential.sli_rec import SLI_RECModel as SeqModel
# from recommenders.models.deeprec.models.sequential.asvd import A2SVDModel as SeqModel
# from recommenders.models.deeprec.models.sequential.caser import CaserModel as SeqModel
# from recommenders.models.deeprec.models.sequential.gru4rec import GRU4RecModel as SeqModel
# from recommenders.models.deeprec.models.sequential.sum import SUMModel as SeqModel
#from recommenders.models.deeprec.models.sequential.nextitnet import NextItNetModel
from recommenders.models.deeprec.io.sequential_iterator import SequentialIterator
#from recommenders.models.deeprec.io.nextitnet_iterator import NextItNetIterator
print("System version: {}".format(sys.version))
print("Tensorflow version: {}".format(tf.__version__))
yaml_file = '/home/jialia/wsdm/src/recommenders/examples/wsdm2022/sli_rec_B.yaml'
RANDOM_SEED = SEED # Set None for non-deterministic result
# data_path = os.path.join("tests", "resources", "deeprec", "slirec")
# data_path = '/home/jialia/wsdm/seq_datasets/B_full_feature_v2'
data_path = sys.argv[1]
print(os.path.abspath(data_path)) ## the path where I enter the cmd
# for test
train_file = os.path.join(data_path, r'train_instances.txt')
valid_file = os.path.join(data_path, r'valid_instances.txt')
test_file = os.path.join(data_path, r'valid.tsv')
pred_file = os.path.join(data_path, r'inter_test.tsv')
final_pred_file = os.path.join(data_path, r'final_test.tsv')
user_vocab = os.path.join(data_path, r'user_vocab.pkl')
item_vocab = os.path.join(data_path, r'item_vocab.pkl')
cate_vocab = os.path.join(data_path, r'category_vocab.pkl')
output_file = os.path.join(data_path, r'inter_test_output.txt')
submit_file = os.path.join(data_path, r'final_test_output.txt')
train_num_ngs = 9 # number of negative instances with a positive instance for training
valid_num_ngs = 9 # number of negative instances with a positive instance for validation
test_num_ngs = 9 # number of negative instances with a positive instance for testing
_create_vocab(
[train_file, valid_file],
user_vocab, item_vocab, cate_vocab
)
### NOTE:
### remember to use `_create_vocab(train_file, user_vocab, item_vocab, cate_vocab)` to generate the user_vocab, item_vocab and cate_vocab files, if you are using your own dataset rather than using our demo Amazon dataset.
hparams = prepare_hparams(yaml_file,
# user_dropout=False,
embed_l2=0.,
layer_l2=0.,
enable_BN=True, ##-- True
learning_rate=0.001, # set to 0.01 if batch normalization is disable else 0.001
epochs=100000,
EARLY_STOP=40000,
batch_size=400,
show_step=5000,
MODEL_DIR=os.path.join(data_path, "model/"),
SUMMARIES_DIR=os.path.join(data_path, "summary/"),
user_vocab=user_vocab,
item_vocab=item_vocab,
cate_vocab=cate_vocab,
need_sample=False,
train_num_ngs=train_num_ngs, # provides the number of negative instances for each positive instance for loss computation.
loss='log_loss', #'log_loss', 'softmax'
max_seq_length=50,
cont_feat_len=85,
use_cont_feat=False,
init_item_emb=False,
shuffle=True
)
print(hparams.values)
input_creator = SequentialIterator
model = SeqModel(hparams, input_creator, seed=RANDOM_SEED)
# model.load_model(os.path.join(data_path, "model_20220118_20k_0.8923", 'step_20000'))
with Timer() as train_time:
model = model.fit(train_file, valid_file, valid_num_ngs=9, eval_metric='auc')
print('Time cost for training is {0:.2f} mins'.format(train_time.interval/60.0))
### model = model.fit(test_file, test_file, valid_num_ngs=9, eval_metric='auc') ##-- quick test
model.load_model(os.path.join(data_path, "model", 'best_model'))
res_syn = model.run_eval(test_file, num_ngs=9)
print(res_syn)
model.predict(pred_file, output_file)
model.predict(final_pred_file, submit_file)
# print('Job finished. B, continue training = 20k, seq=50')
# print('Job finished. B_v2, epoch=50k, seq=100')
## ASVD: 0.867497
## GRU: 0.877529
## SLi-Rec: 0.892736
## B_v4: 0.8937
print("Job:B_full_feature_v2, with BN, no cont feat, seq=50, shuffle=True")
## B_full_feature_v2 no cont_feat, with BN
##5k: 0.8778
##10k: 0.8827
##20k: 0.8848
##25k: 0.8824
##35k: 0.8878
##40k: 0.8903
##45k: 0.8876
##50k: 0.8925
##55k: 0.8903
##60k: 0.8894
##65k: 0.8904
##70k: 0.8814
##75k: 0.8896
##80k: 0.8871
##85k: 0.8920
## with shuffle:
##5k: 0.8793
##10k: 0.8884
##15k: 0.8898
##20k: 0.8923
##25k: 0.8908
##30k: 0.8895
##35k: 0.8888
##40k: 0.8913
##45k: 0.8909
##50k: 0.8876
##65k: 0.8881 | StarcoderdataPython |
1677200 | <reponame>nathanael-fijalkow/DeepSynth
import logging
import time
import random
import csv
import matplotlib.pyplot as plt
import numpy as np
from math import log10
from type_system import Type, PolymorphicType, PrimitiveType, Arrow, List, UnknownType, INT, BOOL
from program import Program, Function, Variable, BasicPrimitive, New
from cfg import CFG
from pcfg import PCFG
from dsl import DSL
from program_as_list import reconstruct_from_compressed
from Algorithms.heap_search import heap_search
from Algorithms.heap_search_naive import heap_search_naive
from Algorithms.a_star import a_star
from Algorithms.threshold_search import threshold_search
from Algorithms.dfs import dfs
from Algorithms.bfs import bfs
from Algorithms.sort_and_add import sort_and_add
from Algorithms.sqrt_sampling import sqrt_sampling, sqrt_sampling_with_sbsur
from DSL.deepcoder import semantics,primitive_types
logging_levels = {0: logging.INFO, 1: logging.DEBUG}
verbosity = 0
logging.basicConfig(format='%(message)s', level=logging_levels[verbosity])
seed = 100
random.seed(seed)
np.random.seed(seed)
deepcoder = DSL(semantics, primitive_types)
type_request = Arrow(List(INT),List(INT))
deepcoder_CFG = deepcoder.DSL_to_CFG(type_request, max_program_depth = 4)
deepcoder_PCFG = deepcoder_CFG.CFG_to_Random_PCFG()
# these colors come from a graphical design webpage
# but I think that they actually look worse
# they are disabled here
six_colors = [None]*6#["#003f5c","#444e86","#955196","#dd5182","#ff6e54","#ffa600"]
seven_colors = [None]*7#["#003f5c","#374c80","#7a5195","#bc5090","#ef5675","#ff764a","#ffa600"]
list_algorithms = [
(bfs, 'BFS', {'beam_width' : 5e5}),
(dfs, 'DFS', {}),
(sort_and_add, 'Sort&Add', {}),
# (sqrt_sampling_with_sbsur, 'SQRT+SBS', {}),
(threshold_search, 'Threshold', {'initial_threshold' : 1e-4, 'scale_factor' : 5e3}),
(sqrt_sampling, 'SQRT', {}),
(heap_search, 'Heap Search', {}),
(a_star, 'A*', {}),
]
# Set of algorithms where we need to reconstruct the programs
reconstruct = {dfs, bfs, threshold_search, a_star,
sort_and_add, sqrt_sampling_with_sbsur}
# Set of randomised algorithms
randomised = {sqrt_sampling, sqrt_sampling_with_sbsur}
def run_algorithm(pcfg, algo_index):
'''
Run the algorithm until timeout, and for each program record probability and time of output
'''
algorithm, name_algo, param = list_algorithms[algo_index]
result = []
search_time = 0
gen = algorithm(pcfg, **param)
found = False
# To remove the cost of initialisation
program = next(gen)
if algorithm in reconstruct:
target_type = pcfg.start[0]
program = reconstruct_from_compressed(program, target_type)
probability = pcfg.probability_program(pcfg.start, program)
nb_programs = 1
cumulative_probability = probability
seen = set()
seen.add(program.hash)
while (search_time < timeout):
search_time -= time.perf_counter()
try:
program = next(gen)
except StopIteration:
search_time += time.perf_counter()
logging.debug(
"Output the last program after {}".format(nb_programs))
break # no next program
search_time += time.perf_counter()
# logging.debug('program found: {}'.format(program))
# Reconstruction if needed
if algorithm in reconstruct:
target_type = pcfg.start[0]
program = reconstruct_from_compressed(program, target_type)
if program.hash not in seen:
seen.add(program.hash)
probability = pcfg.probability_program(pcfg.start, program)
cumulative_probability += probability
nb_programs += 1
row = search_time, probability, cumulative_probability
# row = search_time, probability, log10(1 - cumulative_probability)
result.append(row)
# if nb_programs % 10_000 == 0:
# logging.debug('tested {} programs'.format(nb_programs))
return result
def create_dataset():
logging.info('Create dataset')
number_algorithms = len(list_algorithms)
timepoints = np.logspace(start = -3, stop = log10(timeout), num = number_timepoints)
countpoints = np.linspace(start = 0, stop = max_number_programs, num = number_countpoints)
r_time = np.zeros((number_samples, number_algorithms, number_timepoints))
r_program = np.zeros((number_samples, number_algorithms, number_countpoints))
for i in range(number_samples):
deepcoder_PCFG = deepcoder_CFG.CFG_to_Random_PCFG()
for algo_index in range(number_algorithms):
algorithm, name_algo, param = list_algorithms[algo_index]
logging.info('start run number {}: {}'.format(i+1, name_algo))
res = run_algorithm(pcfg = deepcoder_PCFG, algo_index = algo_index)
r_time[i][algo_index] = np.interp(timepoints,
[search_time for search_time,_,_ in res],
[cumulative_probability for _,_,cumulative_probability in res])
r_program[i][algo_index] = np.interp(countpoints,
range(len(res)),
[cumulative_probability for _,_,cumulative_probability in res])
logging.info('finished run number {}'.format(i+1))
result_time_mean = np.mean(r_time, axis=0)
result_time_std = np.std(r_time, axis=0)
result_program_mean = np.mean(r_program, axis=0)
result_program_std = np.std(r_program, axis=0)
for algo_index in range(number_algorithms):
algorithm, name_algo, param = list_algorithms[algo_index]
with open('results_syntactic/cumulative_probability_vs_time_{}_{}.csv'.format(name_algo, timeout), 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
header = ['search time', 'mean cumulative probability', 'standard deviation']
writer.writerow(header)
for x,t in enumerate(timepoints):
writer.writerow((t, result_time_mean[algo_index][x], result_time_std[algo_index][x]))
with open('results_syntactic/cumulative_probability_vs_number_programs_{}_{}.csv'.format(name_algo, timeout), 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
header = ['number of programs', 'mean cumulative probability', 'standard deviation']
writer.writerow(header)
for x in range(number_countpoints):
writer.writerow((x, result_program_mean[algo_index][x], result_program_std[algo_index][x]))
# Plot cumulative probability VS time
def plot_cumulative_probability_vs_time():
logging.info('Plot cumulative probability VS time')
plt.style.use('seaborn-colorblind')
for algo_index in range(len(list_algorithms)):
algorithm, name_algo, param = list_algorithms[algo_index]
# timepoints = np.arange(start = 0, stop = number_timepoints)
timepoints = np.logspace(start = -3, stop = log10(timeout), num = number_timepoints)
logging.info('retrieve run: {}'.format(name_algo))
with open('results_syntactic/cumulative_probability_vs_time_{}_{}.csv'.format(name_algo, timeout), 'r', encoding='UTF8', newline='') as f:
reader = csv.reader(f)
result_mean = np.zeros(number_timepoints)
result_std = np.zeros(number_timepoints)
for i, row in enumerate(reader):
if i == 0:
continue
result_mean[i-1] = row[1]
result_std[i-1] = row[2]
logging.info('retrieved')
result_top = result_mean + .5 * result_std
result_low = result_mean - .5 * result_std
sc = plt.scatter(timepoints, result_mean, label = name_algo, s = 5)
color = sc.get_facecolors()[0].tolist()
plt.fill_between(timepoints, result_top, result_low, facecolor = color, alpha=0.2)
plt.legend()
plt.xlim((1e-3,timeout))
plt.xlabel('time (in seconds)')
plt.xscale('log')
plt.ylim((0,1))
plt.ylabel('cumulative probability')
plt.savefig("results_syntactic/cumulative_probability_vs_time_%s.png" % seed,
dpi=500,
bbox_inches='tight')
plt.clf()
# Plot cumulative probability VS number of programs
def plot_cumulative_probability_vs_number_programs():
logging.info('Plot cumulative probability VS number of programs')
countpoints = np.linspace(start = 0, stop = max_number_programs, num = number_countpoints)
for algo_index in range(len(list_algorithms)):
algorithm, name_algo, param = list_algorithms[algo_index]
# heap search and A* are the same here
if name_algo == 'A*':
continue
logging.info('retrieve run: {}'.format(name_algo))
with open('results_syntactic/cumulative_probability_vs_number_programs_{}_{}.csv'.format(name_algo, timeout), 'r', encoding='UTF8', newline='') as f:
reader = csv.reader(f)
result_mean = np.zeros(number_countpoints)
result_std = np.zeros(number_countpoints)
for i, row in enumerate(reader):
if i == 0:
continue
result_mean[i-1] = row[1]
result_std[i-1] = row[2]
logging.info('retrieved')
result_top = result_mean + .5 * result_std
result_low = result_mean - .5 * result_std
sc = plt.scatter(countpoints,result_mean,label = name_algo, s = 5)
color = sc.get_facecolors()[0].tolist()
plt.fill_between(countpoints, result_top, result_low, facecolor = color, alpha=0.2)
plt.ticklabel_format(axis='x', style='sci', scilimits=(3,5))
plt.xlabel('number of programs')
plt.xlim((0,max_number_programs))
plt.ylabel('cumulative probability')
plt.ylim((0,1))
plt.legend(loc = 'lower right')
plt.savefig("results_syntactic/cumulative_probability_vs_number_programs_%s.png" % seed,
dpi=500,
bbox_inches='tight')
plt.clf()
number_samples = 50
number_timepoints = 1_000
timeout = 1
number_countpoints = 1_000
max_number_programs = 2e5
create_dataset()
plot_cumulative_probability_vs_time()
plot_cumulative_probability_vs_number_programs()
| StarcoderdataPython |
59576 | <reponame>WadeBarnes/von-bc-registries-audit
#!/usr/bin/python
import os
import psycopg2
import datetime
import time
import json
import decimal
import requests
import csv
from config import get_connection, get_db_sql, get_sql_record_count, CORP_TYPES_IN_SCOPE, corp_num_with_prefix, bare_corp_num
QUERY_LIMIT = '200000'
REPORT_COUNT = 10000
ERROR_THRESHOLD_COUNT = 5
# value for PROD is "https://orgbook.gov.bc.ca/api/v3"
ORGBOOK_API_URL = os.environ.get('ORGBOOK_API_URL', 'http://localhost:8081/api/v3')
TOPIC_QUERY = "/topic/registration.registries.ca/"
TOPIC_NAME_SEARCH = "/search/topic?inactive=false&latest=true&revoked=false&name="
TOPIC_ID_SEARCH = "/search/topic?inactive=false&latest=true&revoked=false&topic_id="
def get_bc_reg_corps():
"""
Reads all corps and corp types from the BC Reg database and writes to a csv file.
"""
# run this query against BC Reg database:
sql1 = """
select corp.corp_num, corp.corp_typ_cd, corp.recognition_dts, corp.bn_9,
corp_name.corp_nme, corp_name_as.corp_nme corp_nme_as
from bc_registries.corporation corp
left join bc_registries.corp_name corp_name
on corp_name.corp_num = corp.corp_num
and corp_name.end_event_id is null
and corp_name.corp_name_typ_cd in ('CO','NB')
left join bc_registries.corp_name corp_name_as
on corp_name_as.corp_num = corp.corp_num
and corp_name_as.end_event_id is null
and corp_name_as.corp_name_typ_cd in ('AS')
where corp.corp_num not in (
select corp_num from bc_registries.corp_state where state_typ_cd = 'HWT');
"""
sql2 = """
select corp.corp_num, corp.corp_typ_cd, corp.recognition_dts, corp.bn_9,
jurisdiction.can_jur_typ_cd, jurisdiction.xpro_typ_cd, jurisdiction.othr_juris_desc,
corp_state.state_typ_cd, corp_op_state.op_state_typ_cd, corp_type.corp_class
from bc_registries.corporation corp
left join bc_registries.corp_type
on corp_type.corp_typ_cd = corp.corp_typ_cd
left join bc_registries.jurisdiction
on jurisdiction.corp_num = corp.corp_num
and jurisdiction.end_event_id is null
left join bc_registries.corp_state
on corp_state.corp_num = corp.corp_num
and corp_state.end_event_id is null
left join bc_registries.corp_op_state
on corp_op_state.state_typ_cd = corp_state.state_typ_cd
where corp.corp_num not in (
select corp_num from bc_registries.corp_state where state_typ_cd = 'HWT');
"""
bc_reg_corps = {}
bc_reg_corp_types = {}
bc_reg_corp_names = {}
bc_reg_count = 0
with open('export/bc_reg_corps.csv', mode='w') as corp_file:
fieldnames = ["corp_num", "corp_type", "corp_name", "recognition_dts", "bn_9", "can_jur_typ_cd", "xpro_typ_cd", "othr_juris_desc", "state_typ_cd", "op_state_typ_cd", "corp_class"]
corp_writer = csv.DictWriter(corp_file, fieldnames=fieldnames, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
corp_writer.writeheader()
print("Get corp stats from BC Registries DB", datetime.datetime.now())
start_time = time.perf_counter()
processed_count = 0
bc_reg_recs = get_db_sql("bc_registries", sql1)
for bc_reg_rec in bc_reg_recs:
if bc_reg_rec['corp_typ_cd'] in CORP_TYPES_IN_SCOPE:
bc_reg_count = bc_reg_count + 1
full_corp_num = corp_num_with_prefix(bc_reg_rec['corp_typ_cd'], bc_reg_rec['corp_num'])
corp_name = bc_reg_rec['corp_nme_as'] if (bc_reg_rec['corp_nme_as'] and 0 < len(bc_reg_rec['corp_nme_as'])) else bc_reg_rec['corp_nme']
bc_reg_corp = {
"corp_num": full_corp_num,
"corp_type": bc_reg_rec['corp_typ_cd'],
"corp_name": corp_name,
"recognition_dts": bc_reg_rec['recognition_dts'],
"bn_9": bc_reg_rec['bn_9'],
"can_jur_typ_cd": "",
"xpro_typ_cd": "",
"othr_juris_desc": "",
"state_typ_cd": "",
"op_state_typ_cd": "",
"corp_class": "",
}
bc_reg_corps[full_corp_num] = bc_reg_corp
bc_reg_corp_types[bc_reg_corp["corp_num"]] = bc_reg_corp["corp_type"]
bc_reg_corp_names[bc_reg_corp["corp_num"]] = bc_reg_corp["corp_name"]
bc_reg_recs_2 = get_db_sql("bc_registries", sql2)
for bc_reg_rec in bc_reg_recs_2:
if bc_reg_rec['corp_typ_cd'] in CORP_TYPES_IN_SCOPE:
full_corp_num = corp_num_with_prefix(bc_reg_rec['corp_typ_cd'], bc_reg_rec['corp_num'])
if full_corp_num in bc_reg_corps:
bc_reg_corp = bc_reg_corps[full_corp_num]
else:
bc_reg_corp = {
"corp_num": full_corp_num,
"corp_type": bc_reg_rec['corp_typ_cd'],
"corp_name": "",
"recognition_dts": bc_reg_rec['recognition_dts'],
"bn_9": bc_reg_rec['bn_9'],
}
bc_reg_corp["can_jur_typ_cd"] = bc_reg_rec['can_jur_typ_cd']
bc_reg_corp["xpro_typ_cd"] = bc_reg_rec['xpro_typ_cd']
bc_reg_corp["othr_juris_desc"] = bc_reg_rec['othr_juris_desc']
bc_reg_corp["state_typ_cd"] = bc_reg_rec['state_typ_cd']
bc_reg_corp["op_state_typ_cd"] = bc_reg_rec['op_state_typ_cd']
bc_reg_corp["corp_class"] = bc_reg_rec['corp_class']
bc_reg_corps[full_corp_num] = bc_reg_corp
for full_corp_num in bc_reg_corps:
bc_reg_corp = bc_reg_corps[full_corp_num]
corp_writer.writerow(bc_reg_corp)
return get_bc_reg_corps_csv()
def get_bc_reg_corps_csv():
"""
Check if all the BC Reg corps are in orgbook (with the same corp type)
"""
bc_reg_corp_types = {}
bc_reg_corp_names = {}
bc_reg_corp_infos = {}
with open('export/bc_reg_corps.csv', mode='r') as corp_file:
corp_reader = csv.DictReader(corp_file)
for row in corp_reader:
bc_reg_corp_types[row["corp_num"]] = row["corp_type"]
bc_reg_corp_names[row["corp_num"]] = row["corp_name"]
bc_reg_corp_infos[row["corp_num"]] = {
"corp_num": row["corp_num"],
"corp_type": row["corp_type"],
"corp_name": row["corp_name"],
"recognition_dts": row["recognition_dts"],
"bn_9": row["bn_9"],
"can_jur_typ_cd": row["can_jur_typ_cd"],
"xpro_typ_cd": row["xpro_typ_cd"],
"othr_juris_desc": row["othr_juris_desc"],
"state_typ_cd": row["state_typ_cd"],
"op_state_typ_cd": row["op_state_typ_cd"],
"corp_class": row["corp_class"],
}
return (bc_reg_corp_types, bc_reg_corp_names, bc_reg_corp_infos)
def get_orgbook_all_corps():
"""
Reads all companies from the orgbook database
"""
conn = None
try:
conn = get_connection('org_book')
except (Exception) as error:
print(error)
raise
# get all the corps from orgbook
print("Get corp stats from OrgBook DB", datetime.datetime.now())
orgbook_corp_types = {}
orgbook_corp_names = {}
orgbook_corp_infos = {}
with open('export/orgbook_search_corps.csv', mode='w') as corp_file:
fieldnames = ["corp_num", "corp_type", "registration_date", "corp_name", "home_jurisdiction", "entity_status", "bus_num"]
corp_writer = csv.DictWriter(corp_file, fieldnames=fieldnames, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
corp_writer.writeheader()
sql4_a = "select id from credential_type where description = 'registration.registries.ca'"
sql4_b = "select id from credential_type where description = 'business_number.registries.ca'"
corp_typ_id = None
bus_num_id = None
try:
cur = conn.cursor()
cur.execute(sql4_a)
for row in cur:
corp_typ_id = row[0]
cur.close()
cur = conn.cursor()
cur.execute(sql4_b)
for row in cur:
bus_num_id = row[0]
cur.close()
except (Exception) as error:
print(error)
raise
sql4 = """
select topic.source_id, attribute.value entity_type, attr_reg_dt.value registration_date,
name.text entity_name, name_as.text entity_name_assumed,
attr_juris.value home_jurisdiction, attr_status.value entity_status,
attr_bus_num.value bus_num
from topic
left join credential as cred_corp_typ on cred_corp_typ.topic_id = topic.id and cred_corp_typ.latest = true and cred_corp_typ.credential_type_id = """ + str(corp_typ_id) + """
left join attribute on attribute.credential_id = cred_corp_typ.id and attribute.type = 'entity_type'
left join attribute attr_reg_dt on attr_reg_dt.credential_id = cred_corp_typ.id and attr_reg_dt.type = 'registration_date'
left join attribute attr_juris on attr_juris.credential_id = cred_corp_typ.id and attr_juris.type = 'home_jurisdiction'
left join attribute attr_status on attr_status.credential_id = cred_corp_typ.id and attr_status.type = 'entity_status'
left join name on name.credential_id = cred_corp_typ.id and name.type = 'entity_name'
left join name name_as on name_as.credential_id = cred_corp_typ.id and name_as.type = 'entity_name_assumed'
left join credential as cred_bus_num on cred_bus_num.topic_id = topic.id and cred_bus_num.latest = true and cred_bus_num.credential_type_id = """ + str(bus_num_id) + """
left join attribute as attr_bus_num on attr_bus_num.credential_id = cred_bus_num.id and attr_bus_num.type = 'business_number'
"""
try:
cur = conn.cursor()
cur.execute(sql4)
for row in cur:
orgbook_corp_types[row[0]] = row[1]
corp_name = row[4] if (row[4] and 0 < len(row[4])) else row[3]
orgbook_corp_names[row[0]] = corp_name
write_corp = {
"corp_num": row[0],
"corp_type": row[1],
"registration_date": row[2],
"corp_name":corp_name,
"home_jurisdiction": row[5],
"entity_status": row[6],
"bus_num": row[7],
}
corp_writer.writerow(write_corp)
orgbook_corp_infos[row[0]] = write_corp
cur.close()
except (Exception) as error:
print(error)
raise
return get_orgbook_all_corps_csv()
def get_orgbook_all_corps_csv():
orgbook_corp_types = {}
orgbook_corp_names = {}
orgbook_corp_infos = {}
with open('export/orgbook_search_corps.csv', mode='r') as corp_file:
corp_reader = csv.DictReader(corp_file)
for row in corp_reader:
orgbook_corp_types[row["corp_num"]] = row["corp_type"]
orgbook_corp_names[row["corp_num"]] = row["corp_name"]
orgbook_corp_infos[row["corp_num"]] = row
return (orgbook_corp_types, orgbook_corp_names, orgbook_corp_infos)
def get_event_proc_future_corps():
"""
Reads from the event processor database and writes to a csv file:
- corps queued for future processing (we don't check if these are in orgbook or not)
"""
corps = []
future_corps = {}
sql1 = """SELECT corp_num FROM event_by_corp_filing WHERE process_date is null;"""
corp_recs = get_db_sql("event_processor", sql1)
if 0 < len(corp_recs):
for corp_rec in corp_recs:
corps.append({'corp_num': corp_rec['corp_num']})
with open('export/event_future_corps.csv', mode='w') as corp_file:
fieldnames = ["corp_num"]
corp_writer = csv.DictWriter(corp_file, fieldnames=fieldnames, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
corp_writer.writeheader()
for corp in corps:
corp_writer.writerow(corp)
future_corps[corp["corp_num"]] = corp["corp_num"]
return get_event_proc_future_corps_csv()
def get_event_proc_future_corps_csv():
"""
Corps that are still in the event processor queue waiting to be processed (won't be in orgbook yet)
"""
future_corps = {}
with open('export/event_future_corps.csv', mode='r') as corp_file:
corp_reader = csv.DictReader(corp_file)
for row in corp_reader:
future_corps[row["corp_num"]] = row["corp_num"]
return future_corps
def get_event_proc_audit_corps():
"""
Reads from the event processor database and writes to a csv file:
- all corps in the event processor audit log
"""
audit_corps = []
sql3 = """SELECT corp_num, corp_type FROM CORP_AUDIT_LOG;"""
corp_audit_recs = get_db_sql("event_processor", sql3)
if 0 < len(corp_audit_recs):
for corp_rec in corp_audit_recs:
audit_corps.append({'corp_num': corp_rec['corp_num'], 'corp_type': corp_rec['corp_type']})
with open('export/event_audit_corps.csv', mode='w') as corp_file:
fieldnames = ["corp_num", "corp_type"]
corp_writer = csv.DictWriter(corp_file, fieldnames=fieldnames, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
corp_writer.writeheader()
for corp in audit_corps:
corp_writer.writerow(corp)
return audit_corps
def get_agent_wallet_ids():
"""
Reads from the exported list of wallet id's
"""
agent_wallet_ids = {}
with open('export/export-wallet-cred-ids.txt', mode='r') as corp_file:
corp_reader = csv.DictReader(corp_file)
for row in corp_reader:
agent_wallet_ids[row["wallet_id"]] = row["wallet_id"]
return agent_wallet_ids
def append_agent_wallet_ids(agent_ids):
"""
Appends agent credential ids to our local cache
"""
with open('export/export-wallet-cred-ids.txt', mode='a') as corp_file:
fieldnames = ["type", "wallet_id"]
corp_writer = csv.DictWriter(corp_file, fieldnames=fieldnames, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for agent_id in agent_ids:
corp_writer.writerow({"type": "Indy::Credential", "wallet_id": agent_id["credential_id"]})
| StarcoderdataPython |
75983 | <gh_stars>10-100
#!/usr/bin/env python
import os
import sys
import argparse
import re
import imctools.io.mcdparser as mcdparser
import imctools.io.txtparser as txtparser
import imctools.io.ometiffparser as omeparser
import imctools.io.mcdxmlparser as meta
############################################
############################################
## PARSE ARGUMENTS
############################################
############################################
Description = 'Split nf-core/imcyto input data by full/ilastik stack.'
Epilog = """Example usage: python run_imctools.py <MCD/TXT/TIFF> <METADATA_FILE>"""
argParser = argparse.ArgumentParser(description=Description, epilog=Epilog)
argParser.add_argument('INPUT_FILE', help="Input files with extension '.mcd', '.txt', or '.tiff'.")
argParser.add_argument('METADATA_FILE', help="Metadata file containing 3 columns i.e. metal,full_stack,ilastik_stack. See pipeline usage docs for file format information.")
args = argParser.parse_args()
############################################
############################################
## PARSE & VALIDATE INPUTS
############################################
############################################
## READ AND VALIDATE METADATA FILE
ERROR_STR = 'ERROR: Please check metadata file'
HEADER = ['metal', 'full_stack', 'ilastik_stack']
fin = open(args.METADATA_FILE,'r')
header = fin.readline().strip().split(',')
if header != HEADER:
print("{} header: {} != {}".format(ERROR_STR,','.join(header),','.join(HEADER)))
sys.exit(1)
metalDict = {}
for line in fin.readlines():
lspl = line.strip().split(',')
metal,fstack,istack = lspl
## CHECK THREE COLUMNS IN LINE
if len(lspl) != len(HEADER):
print("{}: Invalid number of columns - should be 3!\nLine: '{}'".format(ERROR_STR,line.strip()))
sys.exit(1)
## CHECK VALID INCLUDE/EXCLUDE CODES
if fstack not in ['0','1'] or istack not in ['0','1']:
print("{}: Invalid column code - should be 0 or 1!\nLine: '{}'".format(ERROR_STR,line.strip()))
sys.exit(1)
## CREATE DICTIONARY
metal = metal.upper()
if metal not in metalDict:
metalDict[metal] = [bool(int(x)) for x in [fstack,istack]]
fin.close()
## OUTPUT FILE LINKING ROI IDS TO ROI LABELS (IMAGE DESCRIPTION)
roi_map = open(os.path.basename(args.INPUT_FILE)+'_ROI_map.csv', "w")
## USE DIFFERENT PARSERS CORRESPONDING TO THE INPUT FILE FORMAT
file_type = re.sub(".*\.([^.]+)$", '\\1', args.INPUT_FILE.lower())
## CONVERT INPUT_FILE TO TIFF AND WRITE RELEVANT TIFF IMAGES
if file_type == "mcd":
parser = mcdparser.McdParser(args.INPUT_FILE)
acids = parser.acquisition_ids
else:
if file_type == "txt":
parser = txtparser.TxtParser(args.INPUT_FILE)
elif file_type == "tiff" or file_type == "tif":
parser = omeparser.OmetiffParser(args.INPUT_FILE)
else:
print("{}: Invalid input file type - should be txt, tiff, or mcd!".format(file_type))
sys.exit(1)
# THERE IS ONLY ONE ACQUISITION - ROI FOLDER NAMED ACCORDING TO INPUT FILENAME
acids = [ re.sub('.txt|.tiff', '', os.path.basename(parser.filename).lower().replace(" ", "_")) ]
for roi_number in acids:
if file_type == "mcd":
imc_ac = parser.get_imc_acquisition(roi_number)
acmeta = parser.meta.get_object(meta.ACQUISITION, roi_number)
roi_label = parser.get_acquisition_description(roi_number)
roi_map.write("roi_%s,%s,%s,%s" % (roi_number, roi_label, acmeta.properties['StartTimeStamp'], acmeta.properties['EndTimeStamp']) + "\n")
else:
imc_ac = parser.get_imc_acquisition()
# NO INFORMATION ON IMAGE ACQUISITION TIME FOR TXT AND TIFF FILE FORMATS
roi_map.write("roi_%s,,," % (roi_number) + "\n")
for i,j in enumerate(HEADER[1:]):
## WRITE TO APPROPRIATE DIRECTORY
dirname = "roi_%s/%s" % (roi_number, j)
if not os.path.exists(dirname):
os.makedirs(dirname)
# SELECT THE METALS FOR THE CORRESPNDING STACK (i) TO CREATE OME TIFF STACK
label_indices = [ idx for idx in range(0, len(imc_ac.channel_labels)) if len([ entry for entry in metalDict if imc_ac.channel_labels[idx].upper().startswith(entry) and metalDict[entry][i]]) > 0 ]
metal_stack = [ imc_ac.channel_metals[idx] for idx in label_indices ]
if len(metal_stack) > 0:
img = imc_ac.get_image_writer(filename=os.path.join("roi_%s" % (roi_number), "%s.ome.tiff" % j), metals=metal_stack)
img.save_image(mode='ome', compression=0, dtype=None, bigtiff=True)
else:
print("None of the metals exists in metasheet file for {}".format(j))
sys.exit(1)
for l, m in zip(imc_ac.channel_labels, imc_ac.channel_metals):
filename = "%s.tiff" % (l)
# MATCH METAL LABEL TO METADATA METAL COLUMN
metal_label = l.split('_')[0].upper()
metal = [ entry for entry in metalDict if metal_label.upper().startswith(entry) and metalDict[entry][i] ]
if len(metal) == 1:
if metalDict[metal[0]][i]:
img = imc_ac.get_image_writer(filename=os.path.join(dirname,filename), metals=[m])
img.save_image(mode='ome', compression=0, dtype=None, bigtiff=False)
elif len(metal) > 1:
print("{} metal has multiple matches found".format(metal_label))
elif len([ entry for entry in metalDict if metal_label.upper().startswith(entry)]) == 0:
print("{} metal does not exist in metasheet file".format(metal_label))
roi_map.close()
| StarcoderdataPython |
3375015 | # -*- coding:utf-8 -*-
import logging
logger = logging.getLogger(__name__)
import ast
import re
import os.path
import tempfile
import shutil
import hashlib
import stat
from prestring.python import PythonModule
from functools import partial
from collections import namedtuple
from io import StringIO
from kamo.expr import (
WithContextExprVistor,
collect_variable_name
)
marker = object()
"""
{module} :: {statement}+
{statement} :: {doctag} | {comment} | {pythoncode} | {if} | {for} | {deftag} | {text}
{doctag} :: '<%doc>' {text} '<%/doc>'
{comment} :: '##' {text}
{pythoncode} :: '<%' {text} '%>'
{if} :: '%if' {expr} ':' {text} ['%elif' {text} ':' {text}]* ['%else' {text} ':' {text}]? '%endif'
{for} :: '%for' {expr} 'in' {expr} ':' {text} %endfor
{deftag} :: '<%def' {defname} '>' {text} '</%def>'
{expr} :: {text - {newline}} | '(' {text} ')'
{newline} :: '\n'
{text} :: [{expr} '\n']+
"""
class Intern(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Intern {!r}>'.format(self.name)
class Line(object):
def __init__(self, body):
self.body = body
begin_doc = Intern("<%doc>")
end_doc = Intern("</%doc>")
begin_def = Intern("<%def>")
end_def = Intern("</%def>")
comment = Intern("##")
begin_code = Intern("<%")
end_code = Intern("%>")
begin_if = Intern("%if")
begin_else = Intern("%else")
begin_elif = Intern("%elif")
end_if = Intern("%endif")
begin_for = Intern("%for")
end_for = Intern("%endfor")
class Scanner(re.Scanner):
def __init__(self, *args, **kwargs):
super(Scanner, self).__init__(*args, **kwargs)
self.body = []
def append(self, x):
self.body.append(x)
def extend(self, x):
self.body.extend(x)
def __call__(self, body):
for line in body.split("\n"):
self.scan(line)
return self.body
Lexer = partial(Scanner, [
('\s*<%doc>(.+)(?=</%doc>)', lambda s, x: s.extend([begin_doc, s.match.group(1)])),
("\s*<%(!?)\s*(.+)\s*(?=%>)", lambda s, x: s.extend([begin_code, s.match.group(1), s.match.group(2)])),
('\s*<%doc>', lambda s, x: s.append(begin_doc)),
('\s*</%doc>', lambda s, x: s.append(end_doc)),
('\s*<%def\s*name="([^>]+)"\s*>', lambda s, x: s.extend([begin_def, s.match.group(1)])),
('\s*</%def>', lambda s, x: s.append(end_def)),
('\s*## (.*)', lambda s, x: s.extend((comment, s.match.group(1)))),
("\s*<%(!?)", lambda s, x: s.extend([begin_code, s.match.group(1)])),
("\s*%>", lambda s, x: s.append(end_doc)),
("\s*%\s*if", lambda s, x: s.append(begin_if)),
("\s*%\s*elif", lambda s, x: s.append(begin_elif)),
("\s*%\s*else", lambda s, x: s.append(begin_else)),
("\s*%\s*endif", lambda s, x: s.append(end_if)),
("\s*%\s*for", lambda s, x: s.append(begin_for)),
("\s*%\s*endfor", lambda s, x: s.append(end_for)),
(".+", lambda s, x: s.append(x))
])
Doc = namedtuple("Doc", "body multiline")
Code = namedtuple("Code", "body ast declared is_module_level")
Def = namedtuple("Def", "body name args declared")
Text = namedtuple("Text", "body")
Expr = namedtuple("Expr", "body ast decorators declared")
If = namedtuple("If", "keyword expr body") # xxx: include if, elif, else
For = namedtuple("For", "keyword expr src body")
Optimized = namedtuple("Optimized", "tokens")
class Parser(object):
def __init__(self):
self.stack = [[]]
self.frame = self.stack[-1]
self.depth = 0
self.i = 0
@property
def body(self):
return self.stack[0]
def push_frame(self):
# [[x, y, <pos>]] -> [[x, y, [<pos>]]]
frame = []
self.frame.append(frame)
self.depth += 1
self.frame = frame
def pop_frame(self):
frame = self.stack
for i in range(self.depth):
frame = frame[-1]
self.depth -= 1
self.frame = frame
def parse_expr(self, expr, decorators=None, is_declared=False): # hmm.
ast_node = ast.parse(expr).body[0]
if is_declared:
declared = collect_variable_name(ast_node)
else:
declared = set()
return Expr(expr,
ast_node,
decorators=decorators or [],
declared=declared)
def __call__(self, tokens):
self.i = 0
n = len(tokens)
while n > self.i:
self.parse_statement(tokens)
return self.body
def parse_statement(self, tokens):
t = tokens[self.i]
if t is begin_doc:
self.parse_doc(tokens)
elif t is comment:
self.parse_comment(tokens)
elif t is begin_code:
self.parse_code(tokens)
elif t is begin_if:
self.parse_if(tokens)
elif t is begin_elif:
self.parse_elif(tokens)
elif t is begin_else:
self.parse_else(tokens)
elif t is end_if:
self.parse_end_if(tokens)
elif t is begin_for:
self.parse_for(tokens)
elif t is end_for:
self.parse_end_for(tokens)
elif t is begin_def:
self.parse_def(tokens)
else:
self.parse_text(tokens)
def parse_doc(self, tokens):
self.i += 1 # skip
body = []
while tokens[self.i] is not end_doc:
body.append(tokens[self.i])
self.i += 1
self.i += 1 # skip
self.frame.append(Doc(body, multiline=True))
def parse_comment(self, tokens):
self.i += 1 # skip
self.frame.append(Doc([tokens[self.i]], multiline=False))
self.i += 1
def parse_code(self, tokens):
self.i += 1 # skip
is_module_level = bool(tokens[self.i])
self.i += 1 # skip
body = []
while tokens[self.i] is not end_doc:
body.append(tokens[self.i])
self.i += 1
self.i += 1 # skip
body = "\n".join(body)
ast_node = ast.parse(body)
declared = collect_variable_name(ast_node)
self.frame.append(Code(body,
ast_node,
declared=declared,
is_module_level=is_module_level))
def parse_def(self, tokens):
self.i += 1 # skip
body = []
arguments = tokens[self.i]
name = arguments.split("(", 1)[0]
args = [e.strip() for e in arguments[len(name) + 1:-1].split(",")]
self.i += 1
while tokens[self.i] is not end_def:
body.append(tokens[self.i])
self.i += 1
self.i += 1 # skip
parsedbody = []
for token, is_emitting_var in split_with(self.emit_var_rx, "\n".join(body)):
if is_emitting_var:
token = token[2:-1] # ${foo} -> foo
token_with_filter = [e.strip(" ") for e in token.split("|")] # foo|bar|boo -> [foo, bar, boo]
token = token_with_filter[0]
token = self.parse_expr(token, token_with_filter[1:])
parsedbody.append((token, is_emitting_var))
self.frame.append(Def([Text(parsedbody)], name, args, declared=set([name])))
def parse_if(self, tokens):
self.i += 1 # skip
self.frame.append(("if", self.parse_expr(tokens[self.i].strip(": ")))) # hmm.
self.i += 1
self.push_frame()
self.parse_statement(tokens)
def _create_if_block(self, tokens):
# create if-block, elif-block, else-block
self.pop_frame()
body = self.frame.pop()
keyword, cond = self.frame.pop()
self.frame.append(If(keyword, cond, body))
def parse_elif(self, tokens):
self._create_if_block(tokens)
self.i += 1 # skip
self.frame.append(("elif", self.parse_expr(tokens[self.i].strip(": ")))) # hmm.
self.i += 1
self.push_frame()
self.parse_statement(tokens)
def parse_else(self, tokens):
self._create_if_block(tokens)
self.i += 1 # skip
self.frame.append(("else", None)) # hmm.
self.i += 1
self.push_frame()
self.parse_statement(tokens)
def parse_end_if(self, tokens):
self._create_if_block(tokens)
self.i += 1
def parse_for(self, tokens):
self.i += 1 # skip
# for expr in expr:
expr, src = [e.strip(" ") for e in tokens[self.i].rsplit(" in ", 1)]
expr = self.parse_expr(expr.strip(" "), is_declared=True)
src = self.parse_expr(src.rstrip(": "))
self.frame.append(("for", expr, src))
self.i += 1
self.push_frame()
self.parse_statement(tokens)
def parse_end_for(self, tokens):
# create for-block
self.pop_frame()
body = self.frame.pop()
keyword, expr, src = self.frame.pop()
self.frame.append(For(keyword, expr, src, body))
self.i += 1 # skip
emit_var_rx = re.compile("\${[^}]+}") # 雑
def parse_text(self, tokens):
body = []
for token, is_emitting_var in split_with(self.emit_var_rx, tokens[self.i]):
if is_emitting_var:
token = token[2:-1] # ${foo} -> foo
token_with_filter = [e.strip(" ") for e in token.split("|")] # foo|bar|boo -> [foo, bar, boo]
token = token_with_filter[0]
token = self.parse_expr(token, token_with_filter[1:])
body.append((token, is_emitting_var))
self.frame.append(Text(body))
self.i += 1
def split_with(rx, sentence):
r = []
while sentence:
m = rx.search(sentence)
if not m:
r.append((sentence, False))
return r
if not m.start() == 0:
r.append((sentence[:m.start()], False))
r.append((m.group(0), True))
sentence = sentence[m.end():]
return r
class _DeclaredStore(object):
def __init__(self):
self.stack = [set()]
def __contains__(self, k):
return any(k in frame for frame in self.stack)
def push_frame(self, s):
self.stack.append(s)
def pop_frame(self):
self.stack.pop()
class Optimizer(object):
def optimize(self, tokens, text, result):
last_is_text = False
for t in tokens:
if isinstance(t, Text):
emitting_status = False
for pair in t.body:
if pair[1] == emitting_status: # emitting_status
text.body.append(pair)
else:
emitting_status = not emitting_status
self.compact(text)
result.append(text)
text = Text([pair])
if text.body[-1][1] is False:
text.body.append(("\n", False))
else:
self.compact(text)
result.append(text)
text = Text([("\n", False)])
last_is_text = True
else:
if last_is_text:
self.compact(text)
result.append(text)
text = Text([("", False)])
last_is_text = False
if isinstance(t, If):
body = []
self.optimize(t.body, Text([("", False)]), body)
result.append(If(t.keyword, t.expr, body))
elif isinstance(t, For):
body = []
self.optimize(t.body, Text([("", False)]), body)
result.append(For(t.keyword, t.expr, t.src, body))
elif isinstance(t, Def):
body = []
self.optimize(t.body, Text([("", False)]), body)
result.append(Def(body, t.name, t.args, t.declared))
else:
result.append(t)
if last_is_text:
self.compact(text)
result.append(text)
def compact(self, text):
if text.body[0][1] is False: # text
body = "".join(pair[0] for pair in text.body)
text.body.clear()
text.body.append((body, False))
if text.body[0][0] == "":
text.body.pop(0)
def __call__(self, tokens):
r = []
self.optimize(tokens, Text([("", False)]), r)
self.body = Optimized(r)
return self.body
class Compiler(object):
def __init__(self, m=None, default="''", getter="c[{!r}]", default_decorators=["str"]):
self.depth = 0
self.m = m or PythonModule()
self.variables = None
self.toplevel = None
self.default = default
self.getter = getter
self.declaredstore = _DeclaredStore()
self.default_decorators = default_decorators
self.optimized = False
def __call__(self, tokens, name="render", args="io, **c"):
"""
from: ${x}
create:
def render(io, **context):
context["x"]
"""
if isinstance(tokens, Optimized):
tokens = tokens.tokens
self.optimized = True
self.toplevel = self.m.submodule()
with self.m.def_(name, args):
self.variables = self.m.submodule()
self.variables.stmt("write = io.write")
# self.variables.stmt("get = c.get")
# self.variables.stmt("M = object()")
for t in tokens:
self.visit(t)
self.optimized = False
return self.m
def visit(self, t):
method = getattr(self, "visit_{}".format(t.__class__.__name__.lower()))
method(t)
def visit_text(self, node):
for token, is_visit_var in node.body:
if is_visit_var:
self.m.stmt("write({})".format(self.calc_expr(token, emit=True)))
else:
self.m.stmt("write({!r})".format(token))
if not self.optimized:
self.m.stmt("write('\\n')")
def visit_doc(self, doc):
if doc.multiline:
self.m.stmt("########################################")
for line in doc.body:
self.m.stmt("# {}".format(line))
if doc.multiline:
self.m.stmt("########################################")
self.m.sep()
def visit_code(self, code):
m = self.toplevel if code.is_module_level else self.m
for line in code.body.split("\n"): # xxx:
m.stmt(line)
self.declaredstore.stack[-1].update(code.declared)
m.sep()
def visit_def(self, node):
self.declaredstore.stack[-1].update(node.declared)
with self.m.def_(node.name, *node.args):
try:
self.declaredstore.push_frame(set(node.args))
for text in node.body:
self.visit_text(text)
self.m.return_("''")
finally:
self.declaredstore.pop_frame()
def calc_expr(self, expr, emit=False):
io = StringIO()
v = WithContextExprVistor(io, self.declaredstore, getter=self.getter)
v.visit(expr.ast)
result = io.getvalue()
if emit:
if expr.decorators:
for f in expr.decorators:
result = "{}({})".format(f, result)
for f in self.default_decorators:
result = "{}({})".format(f, result)
return result
def visit_if(self, node):
if node.expr is None: # else
self.m.stmt("{}:".format(node.keyword))
else:
self.m.stmt("{} {}:".format(node.keyword, self.calc_expr(node.expr)))
with self.m.scope():
self._visit_children(node.body)
def visit_for(self, node):
self.m.stmt("{} {} in {}:".format(node.keyword, node.expr.body, self.calc_expr(node.src)))
self.declaredstore.push_frame(node.expr.declared)
try:
with self.m.scope():
self._visit_children(node.body)
finally:
self.declaredstore.pop_frame()
def _visit_children(self, node):
if isinstance(node, list):
for c in node:
self._visit_children(c)
else:
self.visit(node)
class TemplateNotFound(Exception):
pass
class Template(object):
def __init__(self, source=None, module_id=None, path=None, tmpdir=None,
manager=None, optimize=True, nocache=False):
# from file path is not None, from string source is not None
self._source = source
self.module_id = module_id or hashlib.md5(source.encode("utf-8")).hexdigest()
self.tmpdir = tmpdir
self.path = path
self.manager = manager or get_default_manager()
self.optimize = optimize
self.nocache = nocache
@property
def source(self):
if self._source is not None:
return self._source
with open(self.path) as rf:
self._source = rf.read()
return self._source
def render(self, **kwargs):
io = StringIO()
self.get_render_function()(io, **kwargs)
return io.getvalue()
@property
def code(self):
import inspect
return inspect.getsourcefile(self.get_render_module())
def get_render_module(self):
module = None
if not self.nocache:
module = self.manager.load_module(self.module_id, self.path)
if module is not None:
return module
module = self.compile()
return module
def get_render_function(self):
return self.get_render_module().render
def _compile(self):
lexer = Lexer()
parser = Parser()
compiler = Compiler()
source = self.source
if self.optimize:
optimizer = Optimizer()
return compiler(optimizer(parser(lexer(source))), name="render")
else:
return compiler(parser(lexer(source)), name="render")
def compile(self):
code = str(self._compile())
return _compile(self.module_id, code, tmpdir=None)
class TemplateManager(object):
def __init__(self, directories=["."], optimize=True,
template_factory=None,
tmpdir=tempfile.gettempdir()):
self.directories = directories
self.template_cache = {}
self.module_cache = {} # xxx?
self.optimize = optimize
self.tmpdir = tmpdir
self.template_factory = template_factory or Template
def lookup(self, filename):
if filename in self.template_cache:
return self.template_cache[filename]
for d in self.directories:
path = os.path.join(d, filename)
if os.path.exists(path):
return self.load_template(filename, path)
raise TemplateNotFound(filename)
def load_template(self, filename, path):
template = self.template_factory(None,
module_id=filename,
path=path,
tmpdir=self.tmpdir,
manager=self,
optimize=self.optimize)
self.template_cache[filename] = template
return template
def create_template(self, s):
return self.template_factory(s,
module_id=None,
path=None,
tmpdir=self.tmpdir,
manager=self,
optimize=self.optimize)
self.template_cache[template.module_id] = template
return template
def load_module(self, module_id, path):
if module_id in self.module_cache:
logger.info("cached: module_id=%s", module_id)
return self.module_cache[module_id]
try:
module_path = os.path.join(self.tmpdir, module_id)
if path is not None and os.path.exists(path) and os.path.exists(module_path):
file_mtime = os.stat(path)[stat.ST_MTIME]
if file_mtime >= os.stat(module_path)[stat.ST_MTIME]:
logger.info("cache is obsoluted: module_id=%s (mtime=%s)", module_id, file_mtime)
return None
module = load_module(module_id, module_path)
self.module_cache[module_id] = module
return module
except FileNotFoundError:
return None
default_manager = TemplateManager([])
def get_default_manager():
global default_manager
return default_manager
def set_default_manager(manager):
global default_manager
default_manager = manager
def load_module(module_id, path):
from importlib import machinery
return machinery.SourceFileLoader(module_id, path).load_module()
def _compile(module_id, code, tmpdir=None):
tmpdir = tmpdir or tempfile.gettempdir()
logger.debug("compiled code:\n%s", code)
fd, path = tempfile.mkstemp()
os.write(fd, code.encode("utf-8"))
dst = os.path.join(tmpdir, module_id)
logger.info("generated module file: %s", dst)
shutil.move(path, dst)
return load_module(module_id, dst)
if __name__ == "__main__":
from kamo._sample import template
print("========================================")
print("input")
print("========================================")
print(template)
print("========================================")
print("compiled")
print("========================================")
lexer = Lexer()
lexer(template)
parser = Parser()
parser(lexer.body)
optimizer = Optimizer()
optimizer(parser.body)
compiler = Compiler()
compiler(optimizer.body)
for i, line in enumerate(str(compiler.m).split("\n")):
print("{:3< }: {}".format(i, line))
env = {}
exec(str(compiler.m), env)
import sys
print("========================================")
print("output")
print("========================================")
env["render"](sys.stdout, x=10, xs=["foo", "bar", "boo"], hello="hello ", boo="(o_0)")
| StarcoderdataPython |
30739 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import concurrent.futures
import logging
import tripleohelper.baremetal
from tripleohelper import ovb_bmc
import tripleohelper.provisioners.openstack.provisioner as os_provisioner
from tripleohelper.provisioners.openstack import utils as os_utils
import tripleohelper.server as server
LOG = logging.getLogger('tripleohelper')
class Baremetal(server.Server):
"""A baremetal node."""
def __init__(self, nova_api, neutron, keypair, key_filename, security_groups, name):
server.Server.__init__(self, None, via_ip='192.0.2.240', key_filename=key_filename)
self.nova_api = nova_api
self.neutron = neutron
self.mac = None
self._os_instance = None
self._provision_port_id = None
self._keypair = keypair
self._key_filename = key_filename
self._security_groups = security_groups
self.name = name
self.flavor = None
self.status = None
super(Baremetal, self).__init__(hostname=None)
def deploy(self, image_name, ip, flavor='m1.small'):
"""Create the node.
This method should only be called by the BaremetalFactory.
"""
body_value = {
"port": {
"admin_state_up": True,
"name": self.name + '_provision',
"network_id": os_utils.get_network_id(self.nova_api, 'provision_bob'),
'fixed_ips': [{'ip_address': ip}]}}
response = self.neutron.create_port(body=body_value)
self._provision_port_id = response['port']['id']
self.mac = response['port']['mac_address']
image_id_to_boot_from = os_utils.get_image_id(self.nova_api, image_name)
flavor_id = os_utils.get_flavor_id(self.nova_api, flavor)
# TODO(Gonéri): We don't need keypair for the BM nodes
keypair_id = os_utils.get_keypair_id(self.nova_api, self._keypair)
# Ensure with get DHCP lease on the provision network first
nics = [{'port-id': self._provision_port_id}]
self._os_instance = os_provisioner.build_openstack_instance(
self.nova_api,
self.name,
image_id_to_boot_from,
flavor_id,
keypair_id,
nics)
if not self._os_instance:
LOG.error("deployment has failed")
raise Exception()
os_provisioner.add_provision_security_group(self.nova_api)
os_utils.add_security_groups(self._os_instance, ['provision'])
os_utils.add_security_groups(self._os_instance, self._security_groups)
LOG.info("add security groups '%s'" % self._security_groups)
LOG.info("instance '%s' ready to use" % self.name)
# the instance should be off for Ironic
self._os_instance.stop()
def admin_state_up(self, state):
"""Turns up/down the network connection."""
self.neutron.update_port(self._provision_port_id, {'port': {'admin_state_up': state}})
def pxe_netboot(self, filename):
"""Specify which file ipxe should load during the netboot."""
new_port = {
'extra_dhcp_opts': [
{'opt_name': 'bootfile-name', 'opt_value': 'http://192.0.2.240:8088/' + filename, 'ip_version': 4, },
{'opt_name': 'tftp-server', 'opt_value': '192.0.2.240', 'ip_version': '4'},
{'opt_name': 'server-ip-address', 'opt_value': '192.0.2.240', 'ip_version': '4'}
]
}
self.neutron.update_port(self._provision_port_id, {'port': new_port})
def refresh_nova_instance(self):
self._os_instance = self.nova_api.servers.get(self._os_instance.id)
def shutdown(self):
self.refresh_nova_instance()
if self._os_instance.status == 'ACTIVE':
self._os_instance.stop()
def refresh_status(self, undercloud):
self.refresh_nova_instance()
ports = self.neutron.list_ports(name='%s_provision' % self.name)
self.hostname = ports['ports'][0]['fixed_ips'][0]['ip_address']
self.via_ip = undercloud.hostname
self._provision_port_id = ports['ports'][0]['id']
if self._os_instance.status == 'SHUTOFF':
return
command = """cat .ssh/authorized_keys | ssh -o UserKnownHostsFile=/dev/null -o PasswordAuthentication=no -o stricthostkeychecking=no heat-admin@{node_ip} 'sudo bash -c "cat >> ~root/.ssh/authorized_keys"'"""
# The VM may be blocked because of ipxe
undercloud.run(command.format(node_ip=self.hostname), user='stack', success_status=(0, 255,))
class BaremetalFactory(tripleohelper.baremetal.BaremetalFactory):
def __init__(self, nova_api, neutron, keypair, key_filename, security_groups,
os_params={}):
self.instackenv = []
self.nova_api = nova_api
self.neutron = neutron
self._idx = 100
self._keypair = keypair
self._key_filename = key_filename
self._security_groups = security_groups
self.nodes = []
if os_params:
self.bmc = self.create_bmc(**os_params)
def initialize(self, size=2):
"""Populate the node poll.
:param size: the number of node to create.
"""
# The IP should be in this range, this is the default DHCP range used by the introspection.
# inspection_iprange = 192.0.2.100,192.0.2.120
for i in range(0, size):
self.nodes.append(
Baremetal(
self.nova_api,
self.neutron,
self._keypair,
self._key_filename,
self._security_groups,
name='baremetal_%d' % i))
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
for bm_node in self.nodes:
future = executor.submit(
bm_node.deploy,
'ipxe.usb',
'192.0.2.%d' % self._idx,
flavor='m1.large')
self._idx += 1
bm_node._future = future
for bm_node in self.nodes:
bm_node._future.result()
pm_addr = self.bmc.register_host(bm_node.name)
self.instackenv.append({
"pm_type": "pxe_ipmitool",
"mac": [bm_node.mac],
# TODO(Gonéri): We should get these informations from the baremetal node's flavor
"cpu": "4",
"memory": "8196",
"disk": "80",
"arch": "x86_64",
"pm_user": "admin",
"pm_password": "password",
"pm_addr": pm_addr
})
self.bmc.ssh_pool.stop_all()
def reload_environment(self, undercloud):
servers = {}
for s in self.nova_api.servers.list():
if s.name.startswith('baremetal_'):
servers[s.name] = s
for name, s in sorted(servers.items()):
node = Baremetal(
self.nova_api,
self.neutron,
keypair=self._keypair,
key_filename=self._key_filename,
security_groups=self._security_groups,
name=s.name)
node._os_instance = s
self.nodes.append(node)
i = iter(self.nodes)
for instack_node in self.load_instackenv_content(undercloud):
node = next(i)
node.mac = instack_node['mac'][0]
node.refresh_status(undercloud)
# restore the flavor
undercloud.add_environment_file(user='stack', filename='stackrc')
command = """ironic node-list --fields properties|sed -n 's/.*profile:\([-_a-z]*\),.*/\\1/p'"""
flavor_list = undercloud.run(command, user='stack')[0].split()
if flavor_list:
i = iter(flavor_list)
for node in self.nodes:
node.flavor = next(i)
def create_bmc(self, os_username, os_password, os_project_id, os_auth_url):
"""Deploy the BMC machine.
This machine hosts the ipmi servers, each ipmi server is associated to a baremetal
node and has its own IP.
"""
bmc = ovb_bmc.OvbBmc(
nova_api=self.nova_api,
neutron=self.neutron,
keypair=self._keypair,
key_filename=self._key_filename,
security_groups=self._security_groups,
image_name='Fedora 23 x86_64',
ip='192.0.2.254',
os_username=os_username,
os_password=<PASSWORD>,
os_project_id=os_project_id,
os_auth_url=os_auth_url)
return bmc
def pxe_netboot(self, filename='boot.ipxe'):
"""Configure the OVB underlying Neutron to do a network boot
:param filename: the name of the IPXE script to boot on. Default
is boot.ipxe.
"""
for bm_node in self.nodes:
bm_node.pxe_netboot(filename)
| StarcoderdataPython |
1679173 | <filename>app/api/track/service/merging.py
from typing import Union, Optional
from tracardi.domain.profile import Profiles, Profile
from tracardi.service.storage.driver import storage
async def merge(profile: Optional[Profile], limit=2000) -> Union[Profiles, None]:
# Merging, schedule save only if there is an update in flow.
if profile is not None: # Profile can be None if profile_less event is processed
if profile.operation.needs_merging() and profile.operation.needs_update():
return await profile.merge(storage.driver.profile.load_profiles_to_merge, limit=limit)
return None
| StarcoderdataPython |
3364496 | import komand
from .schema import GetAuthenticationSourceInput, GetAuthenticationSourceOutput
# Custom imports below
from komand_rapid7_insightvm.util import endpoints
from komand_rapid7_insightvm.util.resource_requests import ResourceRequests
class GetAuthenticationSource(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='get_authentication_source',
description='Get the details for an authentication source',
input=GetAuthenticationSourceInput(),
output=GetAuthenticationSourceOutput())
def run(self, params={}):
resource_helper = ResourceRequests(self.connection.session, self.logger)
endpoint = endpoints.AuthenticationSource.authentication_sources(self.connection.console_url, params.get('id'))
self.logger.info("Using %s ..." % endpoint)
response = resource_helper.resource_request(endpoint=endpoint)
return {"authentication_source": response}
| StarcoderdataPython |
17466 | """Exceptions for Renault API."""
class RenaultException(Exception): # noqa: N818
"""Base exception for Renault API errors."""
pass
class NotAuthenticatedException(RenaultException): # noqa: N818
"""You are not authenticated, or authentication has expired."""
pass
| StarcoderdataPython |
1718969 | <reponame>mattmurch/furl
# -*- coding: utf-8 -*-
#
# furl - URL manipulation made simple.
#
# <NAME>
# grunseid.com
# <EMAIL>
#
# License: Build Amazing Things (Unlicense)
#
from .compat import string_types
absent = object()
def callable_attr(obj, attr):
return hasattr(obj, attr) and callable(getattr(obj, attr))
def is_iterable_but_not_string(v):
return callable_attr(v, '__iter__') and not isinstance(v, string_types)
| StarcoderdataPython |
3354353 | <reponame>cwandtj/A2P2
import glob, re, os, operator, itertools, copy, collections
from math import sin, cos, acos,radians
import parser
import our_module
import ast
import sys
# change runjob.sh
#our_module.change_vasp_path()
open('compound_directories', 'w').close()
# we choose files to work on depending on arguments given to the script, eg: l, -f
cif_files = []
poscar_files = []
arguments = sys.argv
# if it's empty so we work on all files with cif and vasp extension
if len(arguments) == 1:
cif_files = glob.glob('*.cif') # to read all files with "cif" extension in the current directory
poscar_files = glob.glob('*.vasp') # to read all files with "vasp" extension in the current directory
# if the argument "l" is given the we get cif and vasp file names from a list
if (len(arguments) == 3) and arguments[1] == '-l':
f_list = open(arguments[2], 'r')
lines = f_list.readlines()
for line in lines:
line_list = filter(None, line.strip().split())
for file_name in line_list:
if file_name[-4:] == '.cif':
cif_files.append(file_name)
if file_name[-5:] == '.vasp':
poscar_files.append(file_name)
if (len(arguments)>2) and arguments[1]=='-f':
f_list = arguments[2:]
for file_name in f_list:
if file_name[-4:] == '.cif':
cif_files.append(file_name)
if file_name[-5:] == '.vasp':
cif_files.append(file_name)
# we first convert those cif files to postcar/vasp files :) the longest part
#######################################################################################################################
for file in cif_files:
open(file[:-4]+'.vasp', 'w').close() # a POSCAR file is created
poscar_file = open(file[:-4]+'.vasp', 'w')
cif_file = open(file, 'r') # one of the "cif" files is opened
line = cif_file.readline()
# variables
chemical_formula = ''
chemical_formula_dict = collections.OrderedDict()
a = 0.0
b = 0.0
c = 0.0
alpha = 0.0
beta = 0.0
gamma = 0.0
matrix = []
atom_site_count = 0
number_of_isotops = 0
nbr_of_atoms = 0
dict = collections.OrderedDict()
records = collections.OrderedDict() # to build up chemical formula in case we don't have one
formulae = []
keys = []
while line:
# read the chemical formula
if our_module.chemical_formula_extract(line):
chemical_formula = our_module.chemical_formula_extract(line)
# read a, b, c, alpha, beta, and gamma
if our_module.cell_length_angle_extract('cell_length_a', line):
a = our_module.cell_length_angle_extract('cell_length_a', line)
if our_module.cell_length_angle_extract('cell_length_b', line):
b = our_module.cell_length_angle_extract('cell_length_b', line)
if our_module.cell_length_angle_extract('cell_length_c', line):
c = our_module.cell_length_angle_extract('cell_length_c', line)
if our_module.cell_length_angle_extract('cell_angle_alpha' , line):
alpha = radians(our_module.cell_length_angle_extract('cell_angle_alpha' , line))
if our_module.cell_length_angle_extract('cell_angle_beta' , line):
beta = radians(our_module.cell_length_angle_extract('cell_angle_beta' , line))
if our_module.cell_length_angle_extract('cell_angle_gamma' , line):
gamma = radians(our_module.cell_length_angle_extract('cell_angle_gamma' , line))
line = line.strip()
if ('_atom_site_' == line[:11]):
key = line.strip().split(' ')[0]
dict[key] = []
atom_site_count = 1
line = cif_file.readline()
keys.append(key)
line = line.strip()
while ('_atom_site_' == line[:11]):
key = line.strip().split(' ')[0]
keys.append(key)
dict[key] = []
atom_site_count += 1
line = cif_file.readline().strip()
line_list = filter(None, line.strip().split(' '))
while (len(line_list) == atom_site_count):
for j in range(atom_site_count):
dict[keys[j]].append(line_list[j])
line = cif_file.readline()
line_list = filter(None, line.strip().split(' '))
number_of_isotops += 1
if '_symmetry_equiv_pos_as_xyz' in line:
tmp = cif_file.readline().strip()
while (len(tmp) != 0):
if tmp[0].isdigit():
try:
tmp = tmp[tmp.index("'")+1:]
tmp = tmp[:tmp.index("'")]
tmp = [i.strip() for i in tmp.split(',')]
if (tmp[0] != 'x') or (tmp[1] != 'y') or (tmp[2] != 'z'):
formulae.append(tmp)
tmp = cif_file.readline().strip()
except ValueError:
tmp = tmp.split(' ')[1:]
tmp = ' '.join(tmp)
tmp = tmp.strip()
tmp = [i.strip() for i in tmp.split(',')]
if (tmp[0] != 'x') or (tmp[1] != 'y') or (tmp[2] != 'z'):
formulae.append(tmp)
tmp = cif_file.readline().strip()
else:
try:
tmp = tmp[tmp.index("'")+1:]
tmp = tmp[:tmp.index("'")]
tmp = [i.strip() for i in tmp.split(',')]
if (tmp[0] != 'x') or (tmp[1] != 'y') or (tmp[2] != 'z'):
x_cor, y_cor, z_cor = tmp_list[0], tmp_list[1], tmp_list[2]
x, y, z = 1,1,1
code_x, code_y, code_z = parser.expr(x_cor).compile(), \
parser.expr(y_cor).compile(), parser.expr(z_cor).compile()
if (isinstance(eval(code_x), (int, long)) and isinstance(eval(code_y), (int, long))
and isinstance(eval(code_x), (int, long))):
formulae.append(tmp)
tmp = cif_file.readline().strip()
except ValueError:
if not isinstance(tmp, list):
tmp = [i.strip() for i in tmp.split(',')]
if len(tmp) == 3:
if (tmp[0] != 'x') or (tmp[1] != 'y') or (tmp[2] != 'z'):
x_cor, y_cor, z_cor = tmp[0], tmp[1], tmp[2]
x, y, z = 1,1,1
code_x, code_y, code_z = parser.expr(x_cor).compile(), \
parser.expr(y_cor).compile(), parser.expr(z_cor).compile()
if (isinstance(eval(code_x), (int, long)) and isinstance(eval(code_y), (int, long))
and isinstance(eval(code_x), (int, long))):
formulae.append(tmp)
elif len(tmp) != 3:
x = cif_file.readline()
cif_file.seek(-2*len(x), 1)
break
tmp = cif_file.readline().strip()
line = cif_file.readline()
#####################################################################
gamma_star = 0.0
try:
val = (cos(alpha)*cos(beta)-cos(gamma))/(sin(alpha)*sin(beta))
gamma_star = acos(val)
except ZeroDivisionError:
print "Division by zero!"
matrix.append([a*sin(beta), 0.0, a*cos(beta)])
matrix.append([-b*sin(alpha)*cos(gamma_star), b*sin(alpha)*sin(gamma_star), b*cos(alpha)])
matrix.append([0.0, 0.0, c])
for i in range(number_of_isotops):
x = float(dict['_atom_site_fract_x'][i])
y = float(dict['_atom_site_fract_y'][i])
z = float(dict['_atom_site_fract_z'][i])
if x < 0:
x = 1+x
if y < 0:
y = 1+y
if z < 0:
z = 1+z
if dict['_atom_site_type_symbol'][i] in records.keys():
records[dict['_atom_site_type_symbol'][i]].append((x,y,z))
else:
records.update({dict['_atom_site_type_symbol'][i]:[]})
records[dict['_atom_site_type_symbol'][i]].append((x,y,z))
#################################################################
# dealing with formulae
###########################################################
for formula in formulae:
#poscar_file.write("'"+form+"'"+"\n")
x_cor, y_cor, z_cor = formula[0].strip(), formula[1].strip(), formula[2].strip()
if (x_cor != 'x' or y_cor != 'y' or z_cor != 'z'):
x_cor = x_cor.replace("/","*1.0/")
y_cor = y_cor.replace("/","*1.0/")
z_cor = z_cor.replace("/","*1.0/")
code_x = parser.expr(x_cor).compile()
code_y = parser.expr(y_cor).compile()
code_z = parser.expr(z_cor).compile()
for i in range(number_of_isotops):
x = float (dict['_atom_site_fract_x'][i])
y = float (dict['_atom_site_fract_y'][i])
z = float (dict['_atom_site_fract_z'][i])
x_n, y_n, z_n = eval(code_x), eval(code_y), eval(code_z)
if x_n < 0.0:
x_n = 1.0+x_n
elif x_n == 0:
x_n = 0.0
elif x_n > 1.0:
x_n = x_n%1
if y_n < 0.0:
y_n = 1.0+y_n
elif y_n == 0:
y_n = 0.0
elif y_n > 1.0:
y_n = x_n%1
if z_n < 0.0:
z_n = 1.0+z_n
elif z_n == 0:
z_n = 0.0
elif z_n > 1.0:
z_n = z_n%1
if dict['_atom_site_type_symbol'][i] in records.keys():
records[dict['_atom_site_type_symbol'][i]].append((x_n,
y_n,
z_n))
else:
records.update({dict['_atom_site_type_symbol'][i]:[]})
records[dict['_atom_site_type_symbol'][i]].append((x_n,
y_n,
z_n))
for key in records.keys():
redundance_removed = list(set(records[key]))
records[key] = redundance_removed
for key in records.keys():
for cordinate1 in records[key]:
for cordinate2 in records[key]:
cordinate1_copy = (cordinate1[0]*a,cordinate1[1]*b,cordinate1[2]*c)
cordinate2_copy = (cordinate2[0]*a,cordinate2[1]*b,cordinate2[2]*c)
if (our_module.calculate_distance(cordinate1_copy, cordinate2_copy) < 1) and \
(cordinate1_copy != cordinate2_copy):
records[key].remove(cordinate2)
for elem in records.keys():
chemical_formula_dict.update({elem:len(records[elem])})
if chemical_formula == '':
for elem in records.keys():
chemical_formula += elem + str(chemical_formula_dict[elem]) + " "
atomicity = re.findall(r'\d+', chemical_formula)
atom_symb = ''.join(i for i in chemical_formula if not i.isdigit())
atom_symb = atom_symb.strip().split(' ')
# Writing to POSCAR
#1.
################################################
poscar_file.write(chemical_formula + '\n')
poscar_file.write('1.0\n')
################################################
#2.
for i in range(len(matrix)):
poscar_file.write("%-9f%-9f%-9f \n" %(matrix[i][0], matrix[i][1], matrix[i][2]))
#3.
###########################################################
nbr_of_atom_symb = len(atom_symb)
for i in range(nbr_of_atom_symb):
if (i == nbr_of_atom_symb-1):
poscar_file.write("%-4s\n" % atom_symb[i])
else:
poscar_file.write("%-4s" % atom_symb[i])
#########################################################
#4.
##########################################################
for i in range(nbr_of_atom_symb):
if (i == nbr_of_atom_symb-1):
poscar_file.write("%-4d\n" % int(atomicity[i]))
else:
poscar_file.write("%-4d" % int(atomicity[i]))
##########################################################
#5.
##########################################################
poscar_file.write('direct\n')
##########################################################
#6
############################################################
for elem in records.keys():
for coordinate in records[elem]:
x, y, z = coordinate[0], coordinate[1], coordinate[2]
poscar_file.write('%-9f%-9f%-9f%-9s\n' % (x,y,z,elem))
poscar_file.close()
#create folders
our_module.create_compound_folder_cif(file[:-4]+'.vasp')
#######################################################################################################################
# Then we deal with user's poscar/vasp files
for file in poscar_files:
our_module.create_compound_folder_poscar(file)
| StarcoderdataPython |
40212 | <reponame>viebboy/PyGOP
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Author: <NAME>
Email: <EMAIL>, <EMAIL>
github: https://github.com/viebboy
"""
from . import gop_utils
from . import gop_operators
from . import misc
| StarcoderdataPython |
1638461 | <reponame>FelixTheC/onlineOrderForm
from django import forms
from .models import OrderContactDeliveryAddresse
from .models import OrderContactInvoiceAddresse
class InvoiceAddresseForm(forms.ModelForm):
class Meta:
model = OrderContactInvoiceAddresse
fields = '__all__'
labels = {
'complete_addresse': 'Street',
'email_addresse': 'e-mail address',
'telephone_nr': 'telephone'
}
class DeliveryAddresseForm(forms.ModelForm):
class Meta:
model = OrderContactDeliveryAddresse
fields = '__all__'
labels = {
'delivery_organisation_name': 'Dlvy org name',
'delivery_complete_addresse': 'Dlvy street',
'delivery_zip_code': 'Dlvy zip code',
'delivery_city': 'Dlvy city',
'delivery_country': 'Dlvy country',
'delivery_contact_person': 'Dlvy contact person',
'delivery_email_addresse': 'Dlvy e-mail address',
'delivery_telephone_nr': 'Dlvy telephone',
}
def clean(self):
cleaned_data = super(DeliveryAddresseForm, self).clean()
organisation_name = cleaned_data.get('delivery_organisation_name')
complete_addresse = cleaned_data.get('delivery_complete_addresse')
zip_code = cleaned_data.get('delivery_zip_code')
city = cleaned_data.get('delivery_city')
country = cleaned_data.get('delivery_country')
contact_person = cleaned_data.get('delivery_contact_person')
email_addresse = cleaned_data.get('delivery_email_addresse')
telephone_nr = cleaned_data.get('delivery_telephone_nr')
belt_thickness = cleaned_data.get('delivery_belt_thickness')
belt_colour = cleaned_data.get('delivery_belt_colour')
if organisation_name is not None:
if len(organisation_name) < 2:
self.add_error('organisation_name', 'Please fill in your organisation name')
if complete_addresse is not None:
if len(complete_addresse) < 2:
self.add_error('complete_addresse', 'Please check your address')
if zip_code is not None:
if len(zip_code) < 2:
self.add_error('zip_code', 'Please check your zip code')
if city is not None:
if len(city) < 2:
self.add_error('city', 'Please check your city')
if country is not None:
if len(country) < 2:
self.add_error('country', 'Please check your country')
if contact_person is not None:
if len(contact_person) < 2:
self.add_error('contact_person', 'Please check your contact person')
if email_addresse is not None:
if len(email_addresse) < 2 and '@' not in email_addresse:
self.add_error('Please check your email address')
if telephone_nr is not None:
if len(telephone_nr) < 2:
self.add_error('telephone_nr', 'Please check your telephone number')
if belt_thickness is not None:
if '-------' in belt_thickness:
self.add_error('belt_thickness', 'Please select a valid choice')
if belt_colour is not None:
if '--------' in belt_colour:
self.add_error('belt_colour', 'Please select a valid choice') | StarcoderdataPython |
166523 | <filename>spotilyzer/subcommands/csv/requests.py<gh_stars>0
"""
spotilyzer requests CSV
"""
# system imports
import csv
# project imports
from ..json.requests import REQUESTS_KEY, POD_NAME_KEY, REPLICAS_KEY, \
CORE_LIMIT_KEY, MEM_LIMIT_KEY
# constants
_types = (str, int, float, float)
def load_requests(frequests):
"""
Load requests CSV file. Formatting checks are performed
:param frequests: path to CSV file.
:return: requests object
"""
request_list = []
with open(frequests, mode='r', encoding='utf-8-sig') as fobj:
reader = csv.reader(fobj)
header = reader.__next__()
_validate_header(header, frequests)
rowlen = len(header)
line = 2
for row in reader:
_validate_row(rowlen, row, frequests, line)
request = _get_request(rowlen, header, row, frequests, line)
request_list.append(request)
line += 1
_validate_pod_names(request_list, frequests)
return {REQUESTS_KEY: request_list}
def _validate_header(header, frequests):
if header != [POD_NAME_KEY, REPLICAS_KEY, CORE_LIMIT_KEY, MEM_LIMIT_KEY]:
raise SyntaxError(f"invalid header in {frequests}")
def _validate_row(rowlen, row, frequests, line):
if len(row) != rowlen:
raise SyntaxError("incorrect number of entries in "
f"{frequests}, line {line}")
def _get_request(rowlen, header, row, frequests, line):
try:
return {header[i]: _types[i](row[i]) for i in range(rowlen)}
except ValueError as err:
raise SyntaxError(f"invalid type in {frequests}, line {line}") from err
def _validate_pod_names(request_list, frequests):
if len(request_list) != len(set(r[POD_NAME_KEY] for r in request_list)):
raise SyntaxError(f"pod names in {frequests} are not unique")
| StarcoderdataPython |
3245740 | import os
import json
import time
import codecs
import plistlib
import subprocess
import lyrebird
from lyrebird import context
from lyrebird.log import get_logger
from . import wda_helper
from pathlib import Path
_log = get_logger()
ideviceinstaller = None
idevice_id = None
idevicescreenshot = None
ideviceinfo = None
idevicesyslog = None
root = os.path.dirname(__file__)
static = os.path.abspath(os.path.join(root, 'static'))
model_json = os.path.abspath(os.path.join(root, 'config/comparison_table_model.json'))
storage = lyrebird.get_plugin_storage()
tmp_dir = os.path.abspath(os.path.join(storage, 'tmp'))
crash_dir = os.path.abspath(os.path.join(storage, 'crash'))
screenshot_dir = os.path.abspath(os.path.join(storage, 'screenshot'))
PLIST_PATH = os.path.join(storage, 'plist')
error_msg = None
ios_driver = wda_helper.Helper()
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
if not os.path.exists(crash_dir):
os.makedirs(crash_dir)
def check_environment():
"""
检查用户环境,第三方依赖是否正确安装。
:return:
"""
global ideviceinstaller, idevice_id, idevicescreenshot, ideviceinfo, idevicesyslog, error_msg
if not os.path.exists('/usr/local/bin/ideviceinfo'):
error_msg = {"show_error": True,
"user_message": '<b>No ideviceinfo program found, need libimobiledevice '
'dependencies with Homebrew, See <a href="https://github.com/'
'meituan/lyrebird-ios#%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98" '
'target="_blank">README 常见问题</a></b>'}
time.sleep(20)
_log.debug('No libimobiledevice program found.')
else:
p = subprocess.Popen('/usr/local/bin/ideviceinfo', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
err = p.stderr.read().decode()
if len(err):
error_msg = {"show_error": True,
"user_message": '<b>ideviceinfo program found but not working with error, '
'See <a href="https://github.com/'
'meituan/lyrebird-ios#%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98" '
'target="_blank">README 常见问题</a></b>'}
time.sleep(20)
_log.debug('ideviceinfo program found but not working with error: %s.' % err)
if not os.path.exists('/usr/local/bin/idevicescreenshot'):
error_msg = {"show_error": True,
"user_message": '<b>No idevicescreenshot program found, '
'dependencies with Homebrew, See <a href="https://github.com/'
'meituan/lyrebird-ios#%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98" '
'target="_blank">README 常见问题</a></b>'}
time.sleep(20)
_log.debug('No idevicescreenshot program found.')
idevice_id = '/usr/local/bin/idevice_id'
ideviceinstaller = Path(__file__).parent/'bin'/'ideviceinstaller'
ideviceinfo = '/usr/local/bin/ideviceinfo'
idevicesyslog = '/usr/local/bin/idevicesyslog'
idevicescreenshot = '/usr/local/bin/idevicescreenshot'
error_msg = {"show_error": False, "user_message": ""}
def read_plist(plist_path):
return plistlib.readPlist(plist_path)
class Apps:
"""
应用基类,属性为 plist, bundle_id,提供两个方法,获取app的列表,和获取指定app的详细信息
"""
def __init__(self):
self._plist = None
self.bundle_id = None
self.app_info = {}
@property
def plist(self):
return self._plist
@plist.setter
def plist(self, name):
plist_path = os.path.join(PLIST_PATH, name)
if os.path.exists(plist_path):
self._plist = plist_path
@property
def apps(self):
return read_plist(self.plist)
@property
def app_key(self):
return {
"CFBundleName": "AppName",
"CFBundleIdentifier": "BundleID",
"CFBundleShortVersionString": "VersionNumber",
"CFBundleVersion": "BuildNumber"
}
def app(self, bundle_id):
for app in self.apps:
if bundle_id in app.get('CFBundleIdentifier'):
return app
_log.debug(f'{bundle_id} is not found in this device!')
return {}
def get_app_list(self):
app_list = []
for app in self.apps:
tmp = {}
tmp["app_name"] = app.get('CFBundleName')
tmp['bundle_id'] = app.get('CFBundleIdentifier')
tmp['label'] = '%s %s' % (app.get('CFBundleName'), app.get('CFBundleIdentifier'))
app_list.append(tmp)
return app_list
def get_app_info(self, bundle_id):
for k, v in self.app_key.items():
self.app_info[v] = self.app(bundle_id).get(k)
return self.app_info
class Device:
"""
设备基类,主要属性包含 device_id, model, os_version等,主要方法包括截屏,获取信息等
"""
def __init__(self, device_id):
self.device_id = device_id
self.model = None
self.is_jailbreak = None
self.phone_number = None
self.os_version = None
self.device_name = None
self.sn = None
self._log_process = None
self._log_cache = []
self._log_crash_cache = []
self._log_file = None
self._screen_shot_file = None
self._anr_file = None
self._crash_file_list = []
self._device_info = None
self._apps_list = None
self.start_catch_log = False
self._pid = None
@property
def log_file(self):
return self._log_file
@property
def screen_shot_file(self):
return self._screen_shot_file
@property
def anr_file(self):
return self._anr_file
@property
def crash_file_list(self):
return self._crash_file_list
@classmethod
def read_line(cls, line):
res = subprocess.run(f'{ideviceinfo} -u {line}', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
lines = res.stdout.decode()
device_info = [info for info in lines.split('\n') if info]
_device = cls(line)
if len(device_info) < 2:
_log.error(f'Read device info line error. {lines}')
for info in device_info:
info_kv = info.split(':')
if info_kv[0] == 'ProductType':
_device.model = cls(line).convert_model(model=info_kv[1].strip())
if info_kv[0] == 'BrickState':
_device.is_jailbreak = info_kv[1].strip()
if info_kv[0] == 'PhoneNumber':
_device.phone_number = info_kv[1].strip()
if info_kv[0] == 'ProductVersion':
_device.os_version = info_kv[1].strip()
if info_kv[0] == 'DeviceName':
_device.device_name = info_kv[1].strip()
if info_kv[0] == 'SerialNumber':
_device.sn = info_kv[1].strip()
return _device
def convert_model(self, model):
model_dict = json.loads(codecs.open(model_json, 'r', 'utf-8').read())
return model_dict.get(model)
@property
def device_info(self):
if not self._device_info:
self._device_info = self.get_properties()
return self._device_info
def start_app(self, bundle_id, ip, port):
ios_driver.bundle_id = bundle_id
ios_driver.environment = {
'mock': f'http://{ip}:{port}/mock',
'closeComet': True,
'urlscheme': True
}
try:
ios_driver.start_app()
except Exception as e:
pass
return str(e)
return ''
def stop_app(self):
try:
ios_driver.stop_app()
except AttributeError as e:
pass
return str(e)
return
def get_properties(self):
p = subprocess.run(f'{ideviceinfo} -u {self.device_id}', shell=True, stdout=subprocess.PIPE)
if p.returncode == 0:
return p.stdout.decode().split('\n')
def get_app_info(self, bundle_id):
self.get_device_plist(self.device_id)
apps = Apps()
apps.plist = self.device_id + '.plist'
return apps.get_app_info(bundle_id)
def get_device_plist(self, device_id):
plist_path = '%s/%s.plist' % (PLIST_PATH, self.device_id)
if not os.path.exists(PLIST_PATH):
os.mkdir(PLIST_PATH)
_cmd = f'{ideviceinstaller} -u {self.device_id} -l -o xml > {plist_path}'
p = subprocess.Popen(_cmd, shell=True)
p.wait()
def get_apps_list(self, device_id):
self.get_device_plist(device_id)
apps = Apps()
apps.plist = self.device_id + '.plist'
return apps.get_app_list()
def stop_log(self):
if self._log_process:
self._log_process.kill()
self._log_process = None
def take_screen_shot(self):
if not os.path.exists(screenshot_dir):
os.makedirs(screenshot_dir)
file_name = self.model.replace(' ', '_')
timestamp = int(time.time())
screen_shot_file = os.path.abspath(os.path.join(screenshot_dir, f'{file_name}_{timestamp}.png'))
p = subprocess.run(f'{idevicescreenshot} -u {self.device_id} {screen_shot_file}',
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
err_str = p.stdout.decode()
if p.returncode == 0:
return dict({
'screen_shot_file': screen_shot_file,
'timestamp': timestamp
})
else:
_log.error(f'{err_str}')
return {}
def to_dict(self):
device_info = {k: self.__dict__[k] for k in self.__dict__ if not k.startswith('_')}
# get additional device info
prop_lines = self.device_info
if not prop_lines:
return device_info
return device_info
def devices():
"""
:type dict
:return: online_devices object of online devices
"""
check_environment()
res = subprocess.run(f'{idevice_id} -l', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = res.stdout.decode()
err_str = res.stderr.decode()
# 命令执行异常
if len(output) <= 0 < len(err_str):
print('Get devices list error', err_str)
return []
lines = [line for line in output.split('\n') if line]
online_devices = {}
if len(lines) == 0:
return online_devices
for line in lines:
device = Device.read_line(line)
online_devices[device.device_id] = device
return online_devices
| StarcoderdataPython |
1641264 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
if __name__ == '__main__':
m = int
x = int(input("Value of x? "))
if x < 50:
m = 30*x
elif x in range(50, 75):
m = 50*x
elif x in range(75, 90):
m = 65*x
else:
m = (70*x)+20
print(m)
exit(1)
| StarcoderdataPython |
1787551 | import openmdao.api as om
from turboshaft_generator_comp import TurboshaftGenerator
from propulsion_assembly_comp import PropulsionAssembly
class PropulsionGroupODE(om.Group):
def initialize(self):
self.options.declare('num_nodes', types=int, default = 1,
desc='Number of nodes to be evaluated in the RHS')
def setup(self):
nn = self.options['num_nodes']
self.add_subsystem(name='generator2',
subsys=TurboshaftGenerator(num_nodes=nn))
self.add_subsystem(name='generator1',
subsys=TurboshaftGenerator(num_nodes=nn))
self.add_subsystem(name='assembly',
subsys=PropulsionAssembly(num_nodes=nn))
self.connect('generator1.fuel_rate', 'assembly.fuel_rate_gen1')
self.connect('generator2.fuel_rate', 'assembly.fuel_rate_gen2') | StarcoderdataPython |
52318 | #!/usr/bin/env python
#=========================================================================
# This is OPEN SOURCE SOFTWARE governed by the Gnu General Public
# License (GPL) version 3, as described at www.opensource.org.
# Copyright (C)2021 <NAME> <<EMAIL>>
#=========================================================================
from __future__ import (absolute_import, division, print_function,
unicode_literals, generators, nested_scopes, with_statement)
from builtins import (bytes, dict, int, list, object, range, str, ascii,
chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)
# The above imports should allow this program to run in both Python 2 and
# Python 3. You might need to update your version of module "future".
import sys
import ProgramName
from GffTranscriptReader import GffTranscriptReader
from Gene import Gene
from BedReader import BedReader
from Bed6Record import Bed6Record
def processChrom(chrom,geneHash,dhsHash,maxDist):
genes=geneHash.get(chrom)
if(genes is None): return
dhss=dhsHash.get(chrom)
if(dhss is None): return
genes.sort(key=lambda x: x.getBegin())
dhss.sort(key=lambda x: x.getBegin())
proximity(genes,dhss,maxDist)
def distance(gene,dhs):
geneMid=(gene.getBegin()+gene.getEnd())/2
dhsMid=(dhs.getBegin()+dhs.getEnd())/2
gene.mid=geneMid; dhs.mid=dhsMid
d=geneMid-dhsMid
if(d<0): d=-d
return d
def proximity(genes,dhss,maxDist):
i=0; j=0
N_GENES=len(genes); N_DHS=len(dhss)
while(i<N_GENES and j<N_DHS):
gene=genes[i]; dhs=dhss[j]
d=distance(gene,dhs)
if(d<=maxDist):
print(dhs.name,gene.getID(),sep="\t")
if(gene.mid<dhs.mid): i+=1
else: j+=1
#=========================================================================
# main()
#=========================================================================
if(len(sys.argv)!=4):
exit(ProgramName.get()+" <dhs.bed> <genes.gff> <max-distance>\n")
(dhsFile,genesFile,maxDist)=sys.argv[1:]
maxDist=int(maxDist)
gffReader=GffTranscriptReader()
geneHash=gffReader.hashGenesBySubstrate(genesFile)
dhsHash=BedReader.hashBySubstrate(dhsFile)
keys=geneHash.keys()
for chrom in keys:
processChrom(chrom,geneHash,dhsHash,maxDist)
| StarcoderdataPython |
174007 | <filename>test/ResultsAndPrizes/matchball/test_matchball_results_of_the_draw_date_current_date.py
# matchball + Результаты тиража по дате + текущая дата
def test_matchball_results_draw_date_current_date(app):
app.ResultAndPrizes.open_page_results_and_prizes()
app.ResultAndPrizes.click_game_matchball()
app.ResultAndPrizes.click_the_results_of_the_draw_date()
app.ResultAndPrizes.click_ok_in_modal_window_current_date()
app.ResultAndPrizes.button_get_report_winners()
app.ResultAndPrizes.parser_report_text_winners()
assert "РЕЗУЛЬТАТЫ ТИРАЖА" in app.ResultAndPrizes.parser_report_text_winners()
app.ResultAndPrizes.message_id_33_matchball_results_draw_date_current_date()
app.ResultAndPrizes.comeback_main_page() | StarcoderdataPython |
3235816 | <filename>userbot/modules/allunban.py
# Port By @VckyouuBitch From GeezProjects
# Copyright © 2021 Geez-Projects
from telethon.tl.types import (
ChannelParticipantsKicked,
)
from userbot import CMD_HELP, CMD_HANDLER as cmd
from userbot.utils import flicks_cmd
@flicks_cmd(pattern="allunban(?:\\s|$)([\\s\\S]*)")
async def _(event):
await event.edit("`Sedang Mencari List Banning.`")
p = 0
(await event.get_chat()).title
async for i in event.client.iter_participants(
event.chat_id,
filter=ChannelParticipantsKicked,
aggressive=True,
):
try:
await event.client.edit_permissions(event.chat_id, i, view_messages=True)
p += 1
except BaseException:
pass
await event.edit("`Sukses Menghapus List Banning di grup ini`")
CMD_HELP.update(
{
"allunban": f"𝘾𝙤𝙢𝙢𝙖𝙣𝙙: `{cmd}allunban`\
\n↳ : Membatalkan semua Ban Di Anggota Grup."
}
)
| StarcoderdataPython |
3380482 | import numpy as np
from sklearn import tree
from IPython.display import Image
import pydotplus
data = np.loadtxt('spambase/q1.txt', dtype=str, delimiter=',')
np.place(data[:, 0], data[:, 0] == 'h', [0])
np.place(data[:, 0], data[:, 0] == 'c', [1])
np.place(data[:, 1], data[:, 1] == 'm', [0])
np.place(data[:, 1], data[:, 1] == 's', [1])
np.place(data[:, 3], data[:, 3] == 'o', [0])
np.place(data[:, 3], data[:, 3] == 'c', [1])
np.place(data[:, 4], data[:, 4] == 'l', [0])
np.place(data[:, 4], data[:, 4] == 'h', [1])
X = data[:, :-1]
y = data[:, -1]
# print X.shape, y.shape
# print X
# print y
clf = tree.DecisionTreeClassifier(criterion='entropy')
clf = clf.fit(X, y)
feature_names = ['Education Level', 'Career', 'Exp', 'Location']
target_names = ['Low', 'High']
dot_data = tree.export_graphviz(clf, out_file=None,
feature_names=feature_names,
class_names=target_names,
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data)
Image(graph.create_png())
| StarcoderdataPython |
3248999 | #
# normal_surfaces.py
#
from file_io import parse_data_file
from taut import isosig_to_tri_angle
import regina
def count_quads(surf):
count = 0
for i in range(surf.triangulation().countTetrahedra()):
for j in range(3):
count += surf.quads(i, j)
return count
def count_quad_types(surf):
count = 0
for i in range(surf.triangulation().countTetrahedra()):
for j in range(3):
if surf.quads(i, j) > 0:
count += 1
return count
def analyze_sig(sig):
# print(sig)
tri, angle = isosig_to_tri_angle(sig)
surfs = regina.NormalSurfaces.enumerate(tri, regina.NS_QUAD_CLOSED, regina.NS_FUNDAMENTAL)
if surfs != None:
two_quad_type_surfs = []
for i in range(surfs.size()):
surf = surfs.surface(i)
if count_quad_types(surf) <= 2:
two_quad_type_surfs.append(surf)
# print(count_quads(surf), sig, surf)
if len(two_quad_type_surfs) > 2:
print(sig)
for surf in two_quad_type_surfs:
print(surf)
def main(num_to_check = 10):
lines = parse_data_file('Data/veering_census.txt')
for line in lines[:num_to_check]:
sig = line.strip()
analyze_sig(sig)
| StarcoderdataPython |
1651336 | <reponame>boringlee24/keras_old<filename>examples/pwr_run/checkpointing/throughput/comparison/compare_final2_inverse/generate_csv.py
import glob
import json
import pdb
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import csv
with open('k80_only_JCT.json', 'r') as fp:
k80_only = json.load(fp)
with open('final2_inverse_JCT.json', 'r') as fp:
final2_inverse_only = json.load(fp)
with open('v100_only_JCT.json', 'r') as fp:
v100_only = json.load(fp)
with open('final2_inverse_K80_time.json', 'r') as fp:
final2_inverse_K80_only = json.load(fp)
with open('final2_inverse_V100_time.json', 'r') as fp:
final2_inverse_V100_only = json.load(fp)
with open('final2_inverse_overhead.json', 'r') as fp:
final2_inverse_overhead_only = json.load(fp)
with open('final2_inverse_epoch_waste.json', 'r') as fp:
final2_inverse_epoch_waste_only = json.load(fp)
with open('final2_inverse_num_mig.json', 'r') as fp:
final2_inverse_num_mig_only = json.load(fp)
with open('final2_inverse_ovhd_a.json', 'r') as fp:
final2_inverse_ovhd_a_only = json.load(fp)
with open('final2_inverse_ovhd_b.json', 'r') as fp:
final2_inverse_ovhd_b_only = json.load(fp)
with open('final2_inverse_ovhd_c.json', 'r') as fp:
final2_inverse_ovhd_c_only = json.load(fp)
with open('final2_inverse_ovhd_d.json', 'r') as fp:
final2_inverse_ovhd_d_only = json.load(fp)
with open('final2_inverse_k80_1st.json', 'r') as fp:
final2_inverse_k80_1st_only = json.load(fp)
with open('final2_inverse_v100_1st.json', 'r') as fp:
final2_inverse_v100_1st_only = json.load(fp)
with open('speedup.json', 'r') as fp:
speedup_only = json.load(fp)
with open('epoch_num.json', 'r') as fp:
epoch_num_only = json.load(fp)
with open('k80_time.json', 'r') as fp:
k80_time_only = json.load(fp)
with open('v100_time.json', 'r') as fp:
v100_time_only = json.load(fp)
job_list = []
final2_inverse = []
k80 = []
v100 = []
final2_inverse_K80 = []
final2_inverse_V100 = []
final2_inverse_overhead = []
final2_inverse_epoch_waste = []
final2_inverse_ovhd_a = []
final2_inverse_ovhd_b = []
final2_inverse_ovhd_c = []
final2_inverse_ovhd_d = []
final2_inverse_k80_1st = []
final2_inverse_v100_1st = []
final2_inverse_num_mig = []
speedup = []
epoch_num = []
k80_time = []
v100_time = []
for i in range(50):
job = str(i+1)
job_list.append('job'+job)
final2_inverse.append(final2_inverse_only[job])
k80.append(k80_only[job])
v100.append(v100_only[job])
final2_inverse_K80.append(final2_inverse_K80_only[job])
final2_inverse_V100.append(final2_inverse_V100_only[job])
final2_inverse_overhead.append(final2_inverse_overhead_only[job])
final2_inverse_epoch_waste.append(final2_inverse_epoch_waste_only['job'+job])
final2_inverse_num_mig.append(final2_inverse_num_mig_only[job])
if len(final2_inverse_ovhd_a_only[job]) > 0:
final2_inverse_ovhd_a.append(int(np.mean(final2_inverse_ovhd_a_only[job])))
else:
final2_inverse_ovhd_a.append(0)
if len(final2_inverse_ovhd_b_only[job]) > 0:
final2_inverse_ovhd_b.append(int(np.mean(final2_inverse_ovhd_b_only[job])))
else:
final2_inverse_ovhd_b.append(0)
if len(final2_inverse_ovhd_c_only[job]) > 0:
final2_inverse_ovhd_c.append(int(np.mean(final2_inverse_ovhd_c_only[job])))
else:
final2_inverse_ovhd_c.append(0)
if len(final2_inverse_ovhd_d_only[job]) > 0:
final2_inverse_ovhd_d.append(int(np.mean(final2_inverse_ovhd_d_only[job])))
else:
final2_inverse_ovhd_d.append(0)
if len(final2_inverse_k80_1st_only[job]) > 0:
final2_inverse_k80_1st.append(int(np.mean(final2_inverse_k80_1st_only[job])))
else:
final2_inverse_k80_1st.append(0)
if len(final2_inverse_v100_1st_only[job]) > 0:
final2_inverse_v100_1st.append(int(np.mean(final2_inverse_v100_1st_only[job])))
else:
final2_inverse_v100_1st.append(0)
speedup.append(round(speedup_only[job],2))
epoch_num.append(epoch_num_only[job])
k80_time.append(k80_time_only[job])
v100_time.append(v100_time_only[job])
job_list = np.asarray(job_list)
final2_inverse = np.asarray(final2_inverse)
k80 = np.asarray(k80)
v100 = np.asarray(v100)
final2_inverse_K80 = np.asarray(final2_inverse_K80)
final2_inverse_V100 = np.asarray(final2_inverse_V100)
final2_inverse_overhead = np.asarray(final2_inverse_overhead)
final2_inverse_epoch_waste = np.asarray(final2_inverse_epoch_waste)
final2_inverse_num_mig = np.asarray(final2_inverse_num_mig)
final2_inverse_ovhd_a = np.asarray(final2_inverse_ovhd_a)
final2_inverse_ovhd_b = np.asarray(final2_inverse_ovhd_b)
final2_inverse_ovhd_c = np.asarray(final2_inverse_ovhd_c)
final2_inverse_ovhd_d = np.asarray(final2_inverse_ovhd_d)
final2_inverse_k80_1st = np.asarray(final2_inverse_k80_1st)
final2_inverse_v100_1st = np.asarray(final2_inverse_v100_1st)
speedup = np.asarray(speedup)
epoch_num = np.asarray(epoch_num)
k80_time = np.asarray(k80_time)
v100_time = np.asarray(v100_time)
rows = zip(job_list, epoch_num, k80, v100, final2_inverse, final2_inverse_K80, final2_inverse_V100, final2_inverse_overhead, final2_inverse_epoch_waste, final2_inverse_num_mig,
final2_inverse_ovhd_a, final2_inverse_ovhd_b, final2_inverse_ovhd_c,final2_inverse_ovhd_d,final2_inverse_k80_1st,k80_time, final2_inverse_v100_1st,v100_time, speedup)
with open('comparison.csv', 'w') as f:
writer = csv.writer(f)
for row in rows:
writer.writerow(row)
#np.savetxt('comparison.csv', (job_list, k80, v100, final2_inverse, final2_inverse_K80, final2_inverse_V100, final2_inverse_overhead, final2_inverse_num_mig,
#total_time, save_time, load_time), fmt='%s')
| StarcoderdataPython |
81377 | # https://docs.aws.amazon.com/code-samples/latest/catalog/python-secretsmanager-secrets_manager.py.html
import boto3
from abc import ABC
import logging
import json
class SecretsManager(ABC):
def __init__(self, secret_id: str):
self._secret_id = secret_id
self._logger = logging.getLogger(SecretsManager.__name__)
def get_secrets(self):
session = boto3.session.Session()
region_name = session.region_name
session = boto3.session.Session()
client = session.client(service_name="secretsmanager", region_name=region_name)
try:
get_secret_value_response = client.get_secret_value(
SecretId=self._secret_id
)
except Exception as e:
self._logger.error(e)
raise e
self._logger.info(f"Secret with id: {self._secret_id} recovered successfully")
secret = get_secret_value_response["SecretString"]
return json.loads(secret)
class TwitterSecretsManager(SecretsManager):
def __init__(self):
super().__init__("sentinel/api_keys/twitter")
class RedditSecretsManager(SecretsManager):
def __init__(self):
super().__init__("sentinel/api_keys/reddit")
class GoogleNewsSecretsManager(SecretsManager):
def __init__(self):
super().__init__("sentinel/api_keys/google_news")
| StarcoderdataPython |
3320413 | <reponame>seebees/aws-encryption-sdk-python
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Helper utility functions for AWS Encryption SDK."""
import io
import logging
import os
import six
import aws_encryption_sdk.internal.defaults
from aws_encryption_sdk.exceptions import InvalidDataKeyError, SerializationError, UnknownIdentityError
from aws_encryption_sdk.identifiers import ContentAADString, ContentType
from aws_encryption_sdk.internal.str_ops import to_bytes
from aws_encryption_sdk.structures import EncryptedDataKey
from .streams import InsistentReaderBytesIO
_LOGGER = logging.getLogger(__name__)
def content_type(frame_length):
"""Returns the appropriate content type based on the frame length.
:param int frame_length: Message frame length
:returns: Appropriate content type based on frame length
:rtype: aws_encryption_sdk.identifiers.ContentType
"""
if frame_length == 0:
return ContentType.NO_FRAMING
else:
return ContentType.FRAMED_DATA
def validate_frame_length(frame_length, algorithm):
"""Validates that frame length is within the defined limits and is compatible with the selected algorithm.
:param int frame_length: Frame size in bytes
:param algorithm: Algorithm to use for encryption
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:raises SerializationError: if frame size is negative or not a multiple of the algorithm block size
:raises SerializationError: if frame size is larger than the maximum allowed frame size
"""
if frame_length < 0 or frame_length % algorithm.encryption_algorithm.block_size != 0:
raise SerializationError(
"Frame size must be a non-negative multiple of the block size of the crypto algorithm: {block_size}".format(
block_size=algorithm.encryption_algorithm.block_size
)
)
if frame_length > aws_encryption_sdk.internal.defaults.MAX_FRAME_SIZE:
raise SerializationError(
"Frame size too large: {frame} > {max}".format(
frame=frame_length, max=aws_encryption_sdk.internal.defaults.MAX_FRAME_SIZE
)
)
def message_id():
"""Generates a new message ID.
:returns: Message ID
:rtype: bytes
"""
return os.urandom(aws_encryption_sdk.internal.defaults.MESSAGE_ID_LENGTH)
def get_aad_content_string(content_type, is_final_frame):
"""Prepares the appropriate Body AAD Value for a message body.
:param content_type: Defines the type of content for which to prepare AAD String
:type content_type: aws_encryption_sdk.identifiers.ContentType
:param bool is_final_frame: Boolean stating whether this is the final frame in a body
:returns: Appropriate AAD Content String
:rtype: bytes
:raises UnknownIdentityError: if unknown content type
"""
if content_type == ContentType.NO_FRAMING:
aad_content_string = ContentAADString.NON_FRAMED_STRING_ID
elif content_type == ContentType.FRAMED_DATA:
if is_final_frame:
aad_content_string = ContentAADString.FINAL_FRAME_STRING_ID
else:
aad_content_string = ContentAADString.FRAME_STRING_ID
else:
raise UnknownIdentityError("Unhandled content type")
return aad_content_string
def prepare_data_keys(primary_master_key, master_keys, algorithm, encryption_context):
"""Prepares a DataKey to be used for encrypting message and list
of EncryptedDataKey objects to be serialized into header.
:param primary_master_key: Master key with which to generate the encryption data key
:type primary_master_key: aws_encryption_sdk.key_providers.base.MasterKey
:param master_keys: All master keys with which to encrypt data keys
:type master_keys: list of :class:`aws_encryption_sdk.key_providers.base.MasterKey`
:param algorithm: Algorithm to use for encryption
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:param dict encryption_context: Encryption context to use when generating data key
:rtype: tuple containing :class:`aws_encryption_sdk.structures.DataKey`
and set of :class:`aws_encryption_sdk.structures.EncryptedDataKey`
"""
encrypted_data_keys = set()
encrypted_data_encryption_key = None
data_encryption_key = primary_master_key.generate_data_key(algorithm, encryption_context)
_LOGGER.debug("encryption data generated with master key: %s", data_encryption_key.key_provider)
for master_key in master_keys:
# Don't re-encrypt the encryption data key; we already have the ciphertext
if master_key is primary_master_key:
encrypted_data_encryption_key = EncryptedDataKey(
key_provider=data_encryption_key.key_provider, encrypted_data_key=data_encryption_key.encrypted_data_key
)
encrypted_data_keys.add(encrypted_data_encryption_key)
continue
encrypted_key = master_key.encrypt_data_key(
data_key=data_encryption_key, algorithm=algorithm, encryption_context=encryption_context
)
encrypted_data_keys.add(encrypted_key)
_LOGGER.debug("encryption key encrypted with master key: %s", master_key.key_provider)
return data_encryption_key, encrypted_data_keys
def prep_stream_data(data):
"""Take an input and prepare it for use as a stream.
:param data: Input data
:returns: Prepared stream
:rtype: InsistentReaderBytesIO
"""
if isinstance(data, (six.string_types, six.binary_type)):
stream = io.BytesIO(to_bytes(data))
else:
stream = data
return InsistentReaderBytesIO(stream)
def source_data_key_length_check(source_data_key, algorithm):
"""Validates that the supplied source_data_key's data_key is the
correct length for the supplied algorithm's kdf_input_len value.
:param source_data_key: Source data key object received from MasterKey decrypt or generate data_key methods
:type source_data_key: :class:`aws_encryption_sdk.structures.RawDataKey`
or :class:`aws_encryption_sdk.structures.DataKey`
:param algorithm: Algorithm object which directs how this data key will be used
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:raises InvalidDataKeyError: if data key length does not match required kdf input length
"""
if len(source_data_key.data_key) != algorithm.kdf_input_len:
raise InvalidDataKeyError(
"Invalid Source Data Key length {actual} for algorithm required: {required}".format(
actual=len(source_data_key.data_key), required=algorithm.kdf_input_len
)
)
| StarcoderdataPython |
1687868 | from application.caches.cache import Cache
from google.appengine.api import memcache
class GoogleMemcache(Cache):
def __init__(self):
pass
def add(self, key, value):
return memcache.add(key, value)
def get(self, key):
return memcache.get(key)
| StarcoderdataPython |
3340500 | <reponame>doggy8088/azure-devops-cli-extension
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
try:
# Attempt to load mock (works on Python 3.3 and above)
from unittest.mock import patch
except ImportError:
# Attempt to load mock (works on Python version below 3.3)
from mock import patch
from .helper import UNIT_TEST_PAT_TOKEN
class AuthenticatedTests(unittest.TestCase):
def authentication_setup(self):
self.resolve_identity_patcher = patch('azext_devops.dev.common.identities.resolve_identity_as_id') # pylint: disable=attribute-defined-outside-init
self.get_credential_patcher = patch('azext_devops.dev.common.services.get_credential') # pylint: disable=attribute-defined-outside-init
self.validate_token_patcher = patch('azext_devops.dev.common.services.validate_token_for_instance') # pylint: disable=attribute-defined-outside-init
# start the patchers
self.mock_resolve_identity = self.resolve_identity_patcher.start() # pylint: disable=attribute-defined-outside-init
self.mock_get_credential = self.get_credential_patcher.start() # pylint: disable=attribute-defined-outside-init
self.mock_validate_token = self.validate_token_patcher.start() # pylint: disable=attribute-defined-outside-init
def authenticate(self):
# set return values
self.mock_validate_token.return_value = True
self.mock_get_credential.return_value = UNIT_TEST_PAT_TOKEN
| StarcoderdataPython |
44147 | <filename>Megatron-LM-v1.1.5-3D_parallelism/megatron/configs/realm.py
# network size
ict_head_size = None
# checkpointing
ict_load = None
bert_load = None
# data
titles_data_path = None
query_in_block_prob = 0.1
use_one_sent_docs = False
# training
report_topk_accuracies = []
# faiss index
faiss_use_gpu = False
block_data_path = None
# indexer
indexer_batch_size = 128
indexer_log_interval = 1000 | StarcoderdataPython |
1750156 | from rest_framework import generics, permissions, views, status
from rest_framework.response import Response
from .models import Course, Group, Code
from .serializers import CourseSerializer, GroupSerializer, CodeSerializer
from users.models import User
class CourseListView(generics.ListAPIView):
permission_classes = [permissions.AllowAny];
queryset = Course.objects.all()
serializer_class = CourseSerializer
def post(self, request, *args, **kwargs):
course = Course.objects.get(pk=request.data['id'])
user = request.user
operation = request.data['change']
if operation == 'enroll':
course.add_user(user)
elif operation == 'unenroll':
course.remove_user(user)
else:
content = {'error': '400, operation not allowed'}
return Response(content, status=status.HTTP_400_BAD_REQUEST)
group = Group(course=course)
group.save()
group.add_user(user)
return Response()
class CourseDetailView(views.APIView):
def get_object(self, pk):
try:
return Course.objects.get(pk=pk)
except Course.DoesNotExist:
content = {'error': '404, course object not found'}
return Response(content, status=status.HTTP_404_NOT_FOUND)
def get(self, request, pk, format=None):
course = self.get_object(pk)
serializer = CourseSerializer(course, context={'request': request})
return Response(serializer.data)
class GroupListView(generics.ListAPIView):
queryset = Group.objects.all()
serializer_class = GroupSerializer
class GroupDetailView(views.APIView):
def get_object(self, pk):
try:
return Group.objects.get(pk=pk)
except Group.DoesNotExist:
content = {'error': '404, group object not found'}
return Response(content, status=status.HTTP_404_NOT_FOUND)
def get(self, request, pk, format=None):
group = self.get_object(pk)
serializer = GroupSerializer(group, context={'request': request})
return Response(serializer.data)
class CodeListView(generics.ListAPIView):
queryset = Code.objects.all()
serializer_class = CodeSerializer
class CodeDetailView(views.APIView):
def get_object(self, pk):
try:
return Code.objects.get(pk=pk)
except Code.DoesNotExist:
content = {'error': '404, group object not found'}
return Response(content, status=status.HTTP_404_NOT_FOUND)
def get(self, request, pk, format=None):
code = self.get_object(pk)
serializer = CodeSerializer(code, context={'request': request})
return Response(serializer.data)
| StarcoderdataPython |
40963 | <reponame>Rhadow/leetcode
class Solution:
# @param num : a list of integer
# @return : a list of integer
def nextPermutation(self, num):
# write your code here
# Version 1
bp = -1
for i in range(len(num) - 1):
if (num[i] < num[i + 1]):
bp = i
if (bp == -1):
num.reverse()
return num
rest = num[bp:]
local_max = None
for i in rest:
if (i > rest[0] and (local_max is None or i < local_max)):
local_max = i
rest.pop(rest.index(local_max))
rest = sorted(rest)
return num[:bp] + [local_max] + rest
# Version 2
# i = len(num) - 1
# target_index = None
# second_index = None
# while (i > 0):
# if (num[i] > num[i - 1]):
# target_index = i - 1
# break
# i -= 1
# if (target_index is None):
# return sorted(num)
# i = len(num) - 1
# while (i > target_index):
# if (num[i] > num[target_index]):
# second_index = i
# break
# i -= 1
# temp = num[target_index]
# num[target_index] = num[second_index]
# num[second_index] = temp
# return num[:target_index] + [num[target_index]] + sorted(num[target_index + 1:])
| StarcoderdataPython |
3253331 | <reponame>CyberFlameGO/macropy<gh_stars>1000+
import macropy.core
import macropy.core.macros
macros = macropy.core.macros.Macros()
@macros.block
def my_macro(tree, target, **kw):
assert macropy.core.unparse(target) == "y"
assert macropy.core.unparse(tree).strip() == "x = (x + 1)", macropy.core.unparse(tree)
return tree * 3
| StarcoderdataPython |
3275140 | """Tests dla distutils.command.bdist_wininst."""
zaimportuj unittest
z test.support zaimportuj run_unittest
z distutils.command.bdist_wininst zaimportuj bdist_wininst
z distutils.tests zaimportuj support
klasa BuildWinInstTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_get_exe_bytes(self):
# issue5731: command was broken on non-windows platforms
# this test makes sure it works now dla every platform
# let's create a command
pkg_pth, dist = self.create_dist()
cmd = bdist_wininst(dist)
cmd.ensure_finalized()
# let's run the code that finds the right wininst*.exe file
# oraz make sure it finds it oraz returns its content
# no matter what platform we have
exe_file = cmd.get_exe_bytes()
self.assertGreater(len(exe_file), 10)
def test_suite():
zwróć unittest.makeSuite(BuildWinInstTestCase)
jeżeli __name__ == '__main__':
run_unittest(test_suite())
| StarcoderdataPython |
1612236 | # -*- coding: utf-8 -*-
# Scrapy settings for bankcrawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#from shutil import which
import os
from os.path import join, dirname
from dotenv import load_dotenv
BOT_NAME = 'bankcrawler'
SPIDER_MODULES = ['bankcrawler.spiders']
NEWSPIDER_MODULE = 'bankcrawler.spiders'
# Load env vars from .env file using dotenv
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'bankcrawler (+' + os.getenv('AGENT_DOMAIN') + ')'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 96
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 5
DOWNLOAD_TIMEOUT = 180
RANDOMIZE_DOWNLOAD_DELAY = True
# The download delay setting will honor only one of:
CONCURRENT_REQUESTS_PER_DOMAIN = 6
# CONCURRENT_REQUESTS_PER_IP = 32
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
'bankcrawler.middlewares.StartRequestsMiddleware': 543,
}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
# 'scrapy_selenium.SeleniumMiddleware': 800
# 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
# 'scrapy_user_agents.middlewares.RandomUserAgentMiddleware': 400,
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'bankcrawler.pipelines.BankcrawlerPipeline': 300,
#}
# Number of times to retry a request
RETRY_TIMES = 1
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 1
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 0.25
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 128
# Enable showing throttling stats for every response received:
AUTOTHROTTLE_DEBUG = True
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# Domain Depth Settings
#DEPTH_LIMIT = 1
# Output Settings
#FEED_FORMAT = 'csv'
#FEED_URI = 'linksOut.csv'
#LOG_LEVEL = 'INFO'
#LOG_FILE = 'links.log'
# Scrapy Splash Settings
# !!!!!!!!!!!!!!!!!!!!!!----IMPORTANT----!!!!!!!!!!!!!!!!!!!!!
# Set SPLASH_URL to 'splash' when using composing docker image, use 'localhost' for testing
#SPLASH_URL = 'http://splash:8050/'
#SPLASH_URL = 'http://{}:8050/'.format(os.environ['SPLASH_IP'])
SPLASH_URL = 'http://localhost:8050/'
SPLASH_COOKIES_DEBUG = True
DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
# Scrapy Selenium Settings
#SELENIUM_DRIVER_NAME = 'firefox'
#SELENIUM_DRIVER_EXECUTABLE_PATH = which('geckodriver')
#SELENIUM_DRIVER_ARGUMENTS=['-headless'] # '--headless' if using chrome instead of firefox
# Error code handling since Scrapy does not allow error codes 300+
# HTTPERROR_ALLOWED_CODES = [400, 404, 500]
# Logging settings for terminal, makes running spiders faster when False
#LOG_ENABLED = False
| StarcoderdataPython |
1795002 | <gh_stars>10-100
#!/usr/bin/env python
#
# Copyright (C) 2011, 2012, 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import vim
from ycm import vimsupport
from ycm import base
from ycm.completers.completer import Completer
from ycm.client.base_request import BuildRequestData
OMNIFUNC_RETURNED_BAD_VALUE = 'Omnifunc returned bad value to YCM!'
OMNIFUNC_NOT_LIST = ( 'Omnifunc did not return a list or a dict with a "words" '
' list when expected.' )
class OmniCompleter( Completer ):
def __init__( self, user_options ):
super( OmniCompleter, self ).__init__( user_options )
self._omnifunc = None
def SupportedFiletypes( self ):
return []
def ShouldUseCache( self ):
return bool( self.user_options[ 'cache_omnifunc' ] )
# We let the caller call this without passing in request_data. This is useful
# for figuring out should we even be preparing the "real" request_data in
# omni_completion_request. The real request_data is much bigger and takes
# longer to prepare, and we want to avoid creating it twice.
def ShouldUseNow( self, request_data = None ):
if not self._omnifunc:
return False
if not request_data:
request_data = _BuildRequestDataSubstitute()
if self.ShouldUseCache():
return super( OmniCompleter, self ).ShouldUseNow( request_data )
return self.ShouldUseNowInner( request_data )
def ShouldUseNowInner( self, request_data ):
if not self._omnifunc:
return False
return super( OmniCompleter, self ).ShouldUseNowInner( request_data )
def ComputeCandidates( self, request_data ):
if self.ShouldUseCache():
return super( OmniCompleter, self ).ComputeCandidates(
request_data )
else:
if self.ShouldUseNowInner( request_data ):
return self.ComputeCandidatesInner( request_data )
return []
def ComputeCandidatesInner( self, request_data ):
if not self._omnifunc:
return []
try:
return_value = int( vim.eval( self._omnifunc + '(1,"")' ) )
if return_value < 0:
return []
omnifunc_call = [ self._omnifunc,
"(0,'",
vimsupport.EscapeForVim( request_data[ 'query' ] ),
"')" ]
items = vim.eval( ''.join( omnifunc_call ) )
if 'words' in items:
items = items[ 'words' ]
if not hasattr( items, '__iter__' ):
raise TypeError( OMNIFUNC_NOT_LIST )
return filter( bool, items )
except ( TypeError, ValueError, vim.error ) as error:
vimsupport.PostVimMessage(
OMNIFUNC_RETURNED_BAD_VALUE + ' ' + str( error ) )
return []
def OnFileReadyToParse( self, request_data ):
self._omnifunc = vim.eval( '&omnifunc' )
def _BuildRequestDataSubstitute():
data = BuildRequestData( include_buffer_data = False )
data[ 'start_column' ] = base.CompletionStartColumn()
return data
| StarcoderdataPython |
66633 | def query(self, sql, *args):
'''Mixin method for the XXXBase class.
conn should be a cm.db.Connection instance.'''
conn = self._get_connection()
return conn.query(sql, *args)
def get_connection(self):
'''Mixin method for the XXXBase class.
Returns a cm.db.Connection instance.'''
raise 'No implementation.'
class ObjectBase:
'''I am the basis of the model class.'''
seq_name = None # The name of the autoincrement id column in the table.
def _set_defaults(self, kw):
'''Sets the default values for certain 'always there' columns.
This method is usually called in a update/insert operation.
Returns the modified keyword dictionary.
'''
return kw
def get_id(self):
'''Returns an ID (unique identifier) for the object instance.'''
if self.seq_name is None:
raise 'There is no autoincrement column.'
else:
sql = """select nextval('%s');""" % self.seq_name
rs = self._query(sql)
for r in rs:
_id = r.nextval
return _id
def _get_sort_order(self, kw, default=None):
'''Returns the sort order according to key:value parsed in.
This method is usually called in a get/select operation.
'''
order = kw.get('sort_order')
if order == 'up':
order_sql = 'asc'
elif order == 'down':
order_sql = 'desc'
else:
if default is None:
order_sql = 'asc'
elif default == 'up':
order_sql = 'asc'
elif default == 'down':
order_sql = 'desc'
else:
order_sql = 'asc'
return order_sql
_query = query
_get_connection = get_connection
class StatusBase:
'''I am the basis of the 'type'/'status' listing
class.
'''
def ids(self):
'''Returns a list of ID.'''
return [i for (i, s) in self._data]
def descs(self):
'''Returns a list of Description.'''
return [s for (i, s) in self._data]
def get_id(self, desc):
'''Given a description, returns the related ID.'''
ret = [i for (i, s) in self._data if desc == s]
if ret is not None:
return ret[0]
else:
raise 'No such desc <%s>.' % desc
_query = query
_get_connection = get_connection
| StarcoderdataPython |
4830977 | <reponame>lrei/text-classification
#!/usr/bin/env python
"""Merge text predictions into dev and test sets."""
import argparse
import pandas as pd
def parse_args():
parser = argparse.ArgumentParser(
description="merge predictions into dataset files"
)
parser.add_argument(
"--dataset", type=str, help="dataset CSV file", required=True
)
parser.add_argument(
"--field-img",
type=str,
help="predicted column",
default="img_prediction",
)
parser.add_argument(
"--field-txt",
type=str,
help="predicted column",
default="text_prediction",
)
args = parser.parse_args()
return args
def only_one_exists(df, args):
total = len(df)
one_exists = 0
df_one = df
df_one = df_one[df_one[args.field_img].notna()]
df_one = df_one[df_one[args.field_txt].isna()]
one_exists += len(df_one)
df_one = df
df_one = df_one[df_one[args.field_img].isna()]
df_one = df_one[df_one[args.field_txt].notna()]
one_exists += len(df_one)
one_exists /= total
print(f"only one exists: {one_exists:.2f}")
return one_exists
def if_both_exist_agree(df, args):
# same prediction
# discard any null
df_same = df
df_same = df_same[df_same[args.field_img].notna()]
df_same = df_same[df_same[args.field_txt].notna()]
total = len(df_same)
df_same = df_same[df_same[args.field_img] == df_same[args.field_txt]]
agree = len(df_same) / total
print(f"if both exist, agree ratio:{agree:.2f}")
return agree
def main():
args = parse_args()
# read base file
usecols = [args.field_img, args.field_txt]
df = pd.read_csv(args.dataset, delimiter="\t", usecols=usecols)
# existance overlap
only_one_exists(df, args)
if_both_exist_agree(df, args)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3261213 | <gh_stars>0
import numpy as np
from ..base import BaseEstimator
from typing import Callable, NoReturn
class AdaBoost(BaseEstimator):
"""
AdaBoost class for boosting a specified weak learner
Attributes
----------
self.wl_: Callable[[], BaseEstimator]
Callable for obtaining an instance of type BaseEstimator
self.iterations_: int
Number of boosting iterations to perform
self.models_: List[BaseEstimator]
List of fitted estimators, fitted along the boosting iterations
"""
def __init__(self, wl: Callable[[], BaseEstimator], iterations: int):
"""
Instantiate an AdaBoost class over the specified base estimator
Parameters
----------
wl: Callable[[], BaseEstimator]
Callable for obtaining an instance of type BaseEstimator
iterations: int
Number of boosting iterations to perform
"""
super().__init__()
self.wl_ = wl
self.iterations_ = iterations
self.models_ = [None] * iterations
self.D_ = None
self.weights_ = np.zeros(iterations)
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
Fit an AdaBoost classifier over given samples
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
"""
m = y.shape[0]
self.D_ = np.ones(m) / m
X_samp = X
y_samp = y
idxs = np.arange(0, m)
for t in range(self.iterations_):
if t == 0:
print(t, end=" ")
elif t % 25 == 0:
print(t)
else:
print(t, end=" ")
self.models_[t] = self.wl_().fit(X_samp, y_samp)
ht = self.models_[t].predict(X)
epsilon = np.sum( (y != ht) * self.D_)
self.weights_[t] = 0.5*np.log(1/epsilon - 1)
self.D_ *= np.exp(- y * self.weights_[t] * ht)
self.D_ /= np.sum(self.D_)
samp_idxs = np.random.choice(idxs, size=m, replace=True, p=self.D_)
samp_idxs.sort()
X_samp = X[samp_idxs,:]
y_samp = y[samp_idxs]
def _predict(self, X):
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
return self.partial_predict(X, self.iterations_)
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under misclassification loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under missclassification loss function
"""
return self.partial_loss(X, y, self.iterations_)
def partial_predict(self, X: np.ndarray, T: int) -> np.ndarray:
"""
Predict responses for given samples using fitted estimators
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
T: int
The number of classifiers (from 1,...,T) to be used for prediction
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
y_pred = np.zeros(X.shape[0])
for t in range(T):
y_pred += self.models_[t].predict(X) * self.weights_[t]
return np.sign(y_pred)
def partial_loss(self, X: np.ndarray, y: np.ndarray, T: int) -> float:
"""
Evaluate performance under misclassification loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
T: int
The number of classifiers (from 1,...,T) to be used for prediction
Returns
-------
loss : float
Performance under missclassification loss function
"""
from ..metrics import misclassification_error
y_pred = self.partial_predict(X, T)
loss = misclassification_error(y, y_pred)
return loss
| StarcoderdataPython |
3388526 | import numpy as np
import math
import sys
import pickle
def Coord2Pixels(lat, lon, min_lat, min_lon, max_lat, max_lon, sizex, sizey):
#print(max_lat, min_lat, sizex)
ilat = sizex - int((lat-min_lat) / ((max_lat - min_lat)/sizex))
#ilat = int((lat-min_lat) / ((max_lat - min_lat)/sizex))
ilon = int((lon-min_lon) / ((max_lon - min_lon)/sizey))
return ilat, ilon
def distance(p1, p2):
a = p1[0] - p2[0]
b = (p1[1] - p2[1])*math.cos(math.radians(p1[0]))
return np.sqrt(a*a + b*b)
class RoadGraph:
def __init__(self, filename=None, region = None):
self.nodeHash = {} # [tree_idx*10000000 + local_id] -> id
self.nodeHashReverse = {}
self.nodes = {} # id -> [lat,lon]
self.edges = {} # id -> [n1, n2]
self.nodeLink = {} # id -> list of next node
self.nodeID = 0
self.edgeID = 0
self.edgeHash = {} # [nid1 * 10000000 + nid2] -> edge id
self.edgeScore = {}
self.nodeTerminate = {}
self.nodeScore = {}
self.nodeLocations = {}
if filename is not None:
dumpDat = pickle.load(open(filename, "rb"))
forest = dumpDat[1]
self.forest = forest
tid = 0
for t in forest:
for n in t:
idthis = tid*10000000 + n['id']
thislat = n['lat']
thislon = n['lon']
if region is not None:
if thislat < region[0] or thislon < region[1] or thislat > region[2] or thislon > region[3]:
continue
#if n['edgeScore'] < 7.0 : # skip those low confidential edges
#
# continue
if n['similarWith'][0] != -1:
idthis = n['similarWith'][0]*10000000 + n['similarWith'][1]
thislat = forest[n['similarWith'][0]][n['similarWith'][1]]['lat']
thislon = forest[n['similarWith'][0]][n['similarWith'][1]]['lon']
if n['OutRegion'] == 1:
self.nodeTerminate[tid*10000000+n['parent']] = 1
idparent = tid*10000000 + n['parent']
parentlat = t[n['parent']]['lat']
parentlon = t[n['parent']]['lon']
if n['parent'] == 0:
print(tid, n['id'])
self.addEdge(idparent, parentlat, parentlon, idthis, thislat, thislon)
tid += 1
def addEdge(self, nid1,lat1,lon1,nid2,lat2,lon2, reverse=False, nodeScore1 = 0, nodeScore2 = 0, edgeScore = 0): #n1d1->n1d2
if nid1 not in self.nodeHash.keys():
self.nodeHash[nid1] = self.nodeID
self.nodeHashReverse[self.nodeID] = nid1
self.nodes[self.nodeID] = [lat1, lon1]
self.nodeLink[self.nodeID] = []
#self.nodeLinkReverse[self.nodeID] = []
self.nodeScore[self.nodeID] = nodeScore1
self.nodeID += 1
if nid2 not in self.nodeHash.keys():
self.nodeHash[nid2] = self.nodeID
self.nodeHashReverse[self.nodeID] = nid2
self.nodes[self.nodeID] = [lat2, lon2]
self.nodeLink[self.nodeID] = []
#self.nodeLinkReverse[self.nodeID] = []
self.nodeScore[self.nodeID] = nodeScore2
self.nodeID += 1
localid1 = self.nodeHash[nid1]
localid2 = self.nodeHash[nid2]
if localid1 * 10000000 + localid2 in self.edgeHash.keys():
print("Duplicated Edge !!!", nid1, nid2)
return
self.edges[self.edgeID] = [localid1, localid2]
self.edgeHash[localid1 * 10000000 + localid2] = self.edgeID
self.edgeScore[self.edgeID] = edgeScore
self.edgeID += 1
if localid2 not in self.nodeLink[localid1]:
self.nodeLink[localid1].append(localid2)
if reverse == True:
if localid2 not in self.nodeLinkReverse.keys():
self.nodeLinkReverse[localid2] = []
if localid1 not in self.nodeLinkReverse[localid2]:
self.nodeLinkReverse[localid2].append(localid1)
def addEdgeToOneExistedNode(self, nid1,lat1,lon1,nid2, reverse=False, nodeScore1 = 0, edgeScore = 0): #n1d1->n1d2
if nid1 not in self.nodeHash.keys():
self.nodeHash[nid1] = self.nodeID
self.nodeHashReverse[self.nodeID] = nid1
self.nodes[self.nodeID] = [lat1, lon1]
self.nodeLink[self.nodeID] = []
self.nodeLinkReverse[self.nodeID] = []
self.nodeScore[self.nodeID] = nodeScore1
self.nodeID += 1
localid1 = self.nodeHash[nid1]
localid2 = nid2
self.edges[self.edgeID] = [localid1, localid2]
self.edgeHash[localid1 * 10000000 + localid2] = self.edgeID
self.edgeScore[self.edgeID] = edgeScore
self.edgeID += 1
if localid2 not in self.nodeLink[localid1]:
self.nodeLink[localid1].append(localid2)
if localid1 not in self.nodeLinkReverse[localid2]:
self.nodeLinkReverse[localid2].append(localid1)
def BiDirection(self):
edgeList = list(self.edges.values())
for edge in edgeList:
localid1 = edge[1]
localid2 = edge[0]
self.edges[self.edgeID] = [localid1, localid2]
self.edgeHash[localid1 * 10000000 + localid2] = self.edgeID
self.edgeScore[self.edgeID] = self.edgeScore[self.edgeHash[localid2 * 10000000 + localid1]]
self.edgeID += 1
if localid2 not in self.nodeLink[localid1]:
self.nodeLink[localid1].append(localid2)
def ReverseDirectionLink(self):
edgeList = list(self.edges.values())
self.nodeLinkReverse = {}
for edge in edgeList:
localid1 = edge[1]
localid2 = edge[0]
if localid1 not in self.nodeLinkReverse :
self.nodeLinkReverse[localid1] = [localid2]
else:
if localid2 not in self.nodeLinkReverse[localid1]:
self.nodeLinkReverse[localid1].append(localid2)
for nodeId in self.nodes.keys():
if nodeId not in self.nodeLinkReverse.keys():
self.nodeLinkReverse[nodeId] = []
# DFS
def TOPOWalkDFS(self, nodeid, step = 0.00005, r = 0.00300, direction = False):
localNodeList = {}
localNodeDistance = {}
mables = []
localEdges = {}
#localNodeList[nodeid] = 1
#localNodeDistance[nodeid] = 0
def explore(node_cur, node_prev, dist):
old_node_dist = 1
if node_cur in localNodeList.keys():
old_node_dist = localNodeDistance[node_cur]
if localNodeDistance[node_cur] <= dist:
return
if dist > r :
return
lat1 = self.nodes[node_cur][0]
lon1 = self.nodes[node_cur][1]
localNodeList[node_cur] = 1
localNodeDistance[node_cur] = dist
#mables.append((lat1, lon1))
if node_cur not in self.nodeLinkReverse.keys():
self.nodeLinkReverse[node_cur] = []
reverseList = []
if direction == False:
reverseList = self.nodeLinkReverse[node_cur]
for next_node in self.nodeLink[node_cur] + reverseList:
edgeS = 0
if node_cur * 10000000 + next_node in self.edgeHash.keys():
edgeS = self.edgeScore[self.edgeHash[node_cur * 10000000 + next_node]]
if next_node * 10000000 + node_cur in self.edgeHash.keys():
edgeS = max(edgeS, self.edgeScore[self.edgeHash[next_node * 10000000 + node_cur]])
if self.nodeScore[next_node] > 0 and edgeS > 0:
pass
else:
continue
if next_node == node_prev :
continue
lat0 = 0
lon0 = 0
lat1 = self.nodes[node_cur][0]
lon1 = self.nodes[node_cur][1]
lat2 = self.nodes[next_node][0]
lon2 = self.nodes[next_node][1]
#TODO check angle of next_node
localEdgeId = node_cur * 10000000 + next_node
# if localEdgeId not in localEdges.keys():
# localEdges[localEdgeId] = 1
l = distance((lat2,lon2), (lat1,lon1))
num = int(math.ceil(l / step))
bias = step * math.ceil(dist / step) - dist
cur = bias
if old_node_dist + l < r :
explore(next_node, node_cur, dist + l)
else:
while cur < l:
alpha = cur / l
#for a in range(1,num):
# alpha = float(a)/num
if dist + l * alpha > r :
break
latI = lat2 * alpha + lat1 * (1-alpha)
lonI = lon2 * alpha + lon1 * (1-alpha)
if (latI, lonI) not in mables:
mables.append((latI, lonI))
cur += step
l = distance((lat2,lon2), (lat1,lon1))
explore(next_node, node_cur, dist + l)
explore(nodeid, -1, 0)
return mables
def distanceBetweenTwoLocation(self, loc1, loc2, max_distance):
localNodeList = {}
localNodeDistance = {}
#mables = []
localEdges = {}
edge_covered = {} # (s,e) --> distance from s and distance from e
if loc1[0] == loc2[0] and loc1[1] == loc2[1] :
return abs(loc1[2] - loc2[2])
elif loc1[0] == loc2[1] and loc1[1] == loc2[0]:
return abs(loc1[2] - loc2[3])
ans_dist = 100000
Queue = [(loc1[0], -1, loc1[2]), (loc1[1], -1, loc1[2])]
while True:
if len(Queue) == 0:
break
args = Queue.pop(0)
node_cur, node_prev, dist = args[0], args[1], args[2]
old_node_dist = 1
if node_cur in localNodeList.keys():
old_node_dist = localNodeDistance[node_cur]
if localNodeDistance[node_cur] <= dist:
continue
if dist > max_distance :
continue
lat1 = self.nodes[node_cur][0]
lon1 = self.nodes[node_cur][1]
localNodeList[node_cur] = 1
localNodeDistance[node_cur] = dist
#mables.append((lat1, lon1))
if node_cur not in self.nodeLinkReverse.keys():
self.nodeLinkReverse[node_cur] = []
reverseList = []
reverseList = self.nodeLinkReverse[node_cur]
visited_next_node = []
for next_node in self.nodeLink[node_cur] + reverseList:
if next_node == node_prev:
continue
if next_node == node_cur :
continue
if next_node == loc1[0] or next_node == loc1[1] :
continue
if next_node in visited_next_node:
continue
visited_next_node.append(next_node)
edgeS = 0
lat0 = 0
lon0 = 0
lat1 = self.nodes[node_cur][0]
lon1 = self.nodes[node_cur][1]
lat2 = self.nodes[next_node][0]
lon2 = self.nodes[next_node][1]
localEdgeId = node_cur * 10000000 + next_node
# if localEdgeId not in localEdges.keys():
# localEdges[localEdgeId] = 1
if node_cur == loc2[0] and next_node == loc2[1]:
new_ans = dist + loc2[2]
if new_ans < ans_dist :
ans_dist = new_ans
elif node_cur == loc2[1] and next_node == loc2[0]:
new_ans = dist + loc2[3]
if new_ans < ans_dist :
ans_dist = new_ans
l = distance((lat2,lon2), (lat1,lon1))
Queue.append((next_node, node_cur, dist + l))
return ans_dist
# BFS (much faster)
def TOPOWalk(self, nodeid, step = 0.00005, r = 0.00300, direction = False, newstyle = False, nid1=0, nid2=0, dist1=0, dist2= 0, bidirection = False, CheckGPS = None, metaData = None):
localNodeList = {}
localNodeDistance = {}
mables = []
localEdges = {}
edge_covered = {} # (s,e) --> distance from s and distance from e
#localNodeList[nodeid] = 1
#localNodeDistance[nodeid] = 0
if newstyle == False:
Queue = [(nodeid, -1, 0)]
else:
Queue = [(nid1, -1, dist1), (nid2, -1, dist2)]
# Add holes between nid1 and nid2
lat1 = self.nodes[nid1][0]
lon1 = self.nodes[nid1][1]
lat2 = self.nodes[nid2][0]
lon2 = self.nodes[nid2][1]
l = distance((lat2,lon2), (lat1,lon1))
num = int(math.ceil(l / step))
alpha = 0
while True:
latI = lat1*alpha + lat2*(1-alpha)
lonI = lon1*alpha + lon2*(1-alpha)
d1 = distance((latI,lonI),(lat1,lon1))
d2 = distance((latI,lonI),(lat2,lon2))
if dist1 - d1 < r or dist2 -d2 < r:
if (latI, lonI, lat2 - lat1, lon2 - lon1) not in mables:
mables.append((latI, lonI, lat2 - lat1, lon2 - lon1)) # add direction
if bidirection == True:
if nid1 in self.nodeLink[nid2] and nid2 in self.nodeLink[nid1]:
mables.append((latI+0.00001, lonI+0.00001, lat2 - lat1, lon2 - lon1)) #Add another mables
alpha += step/l
if alpha > 1.0:
break
while True:
if len(Queue) == 0:
break
args = Queue.pop(0)
node_cur, node_prev, dist = args[0], args[1], args[2]
old_node_dist = 1
if node_cur in localNodeList.keys():
old_node_dist = localNodeDistance[node_cur]
if localNodeDistance[node_cur] <= dist:
continue
if dist > r :
continue
lat1 = self.nodes[node_cur][0]
lon1 = self.nodes[node_cur][1]
localNodeList[node_cur] = 1
localNodeDistance[node_cur] = dist
#mables.append((lat1, lon1))
if node_cur not in self.nodeLinkReverse.keys():
self.nodeLinkReverse[node_cur] = []
reverseList = []
if direction == False:
reverseList = self.nodeLinkReverse[node_cur]
visited_next_node = []
for next_node in self.nodeLink[node_cur] + reverseList:
if next_node == node_prev:
continue
if next_node == node_cur :
continue
if next_node == nid1 or next_node == nid2 :
continue
if next_node in visited_next_node:
continue
visited_next_node.append(next_node)
edgeS = 0
# if node_cur * 10000000 + next_node in self.edgeHash.keys():
# edgeS = self.edgeScore[self.edgeHash[node_cur * 10000000 + next_node]]
# if next_node * 10000000 + node_cur in self.edgeHash.keys():
# edgeS = max(edgeS, self.edgeScore[self.edgeHash[next_node * 10000000 + node_cur]])
# if self.nodeScore[next_node] > 0 and edgeS > 0:
# pass
# else:
# continue
# if next_node == node_prev :
# continue
lat0 = 0
lon0 = 0
lat1 = self.nodes[node_cur][0]
lon1 = self.nodes[node_cur][1]
lat2 = self.nodes[next_node][0]
lon2 = self.nodes[next_node][1]
#TODO check angle of next_node
localEdgeId = node_cur * 10000000 + next_node
# if localEdgeId not in localEdges.keys():
# localEdges[localEdgeId] = 1
l = distance((lat2,lon2), (lat1,lon1))
num = int(math.ceil(l / step))
bias = step * math.ceil(dist / step) - dist
cur = bias
if old_node_dist + l < r :
Queue.append((next_node, node_cur, dist + l))
#explore(next_node, node_cur, dist + l)
else:
start_limitation = 0
end_limitation = l
if (node_cur, next_node) in edge_covered.keys():
start_limitation = edge_covered[(node_cur, next_node)]
#if next_node == node_cur :
#print("BUG")
if (next_node, node_cur) in edge_covered.keys():
end_limitation = l-edge_covered[(next_node, node_cur)]
#end_limitation = l
#if next_node not in localNodeDistance.keys(): # Should we remove this ?
turnnel_edge = False
if metaData is not None:
nnn1 = self.nodeHashReverse[next_node]
nnn2 = self.nodeHashReverse[node_cur]
if metaData.edgeProperty[metaData.edge2edgeid[(nnn1,nnn2)]]['layer'] < 0:
turnnel_edge = True
while cur < l:
alpha = cur / l
if dist + l * alpha > r :
break
if l * alpha < start_limitation:
cur += step
continue
if l * alpha > end_limitation:
break
latI = lat2 * alpha + lat1 * (1-alpha)
lonI = lon2 * alpha + lon1 * (1-alpha)
if (latI, lonI, lat2 - lat1, lon2 - lon1) not in mables and turnnel_edge is False:
mables.append((latI, lonI, lat2 - lat1, lon2 - lon1)) # add direction
if bidirection == True:
if next_node in self.nodeLink[node_cur] and node_cur in self.nodeLink[next_node] and turnnel_edge is False:
mables.append((latI+0.00001, lonI+0.00001, lat2 - lat1, lon2 - lon1)) #Add another mables
cur += step
if (node_cur, next_node) in edge_covered.keys():
#if cur-step < edge_covered[(node_cur, next_node)]:
# print(node_cur, edge_covered[(node_cur, next_node)], cur-step)
edge_covered[(node_cur, next_node)] = cur - step #max(cur, edge_covered[(node_cur, next_node)])
#edge_covered[(node_cur, next_node)] = cur
else:
edge_covered[(node_cur, next_node)] = cur - step
#edge_covered[(node_cur, next_node)] = cur
l = distance((lat2,lon2), (lat1,lon1))
Queue.append((next_node, node_cur, dist + l))
#explore(next_node, node_cur, dist + l)
result_marbles = []
if CheckGPS is None:
result_marbles = mables
else:
for mable in mables:
if CheckGPS(mable[0], mable[1]) == True:
result_marbles.append(mable)
#explore(nodeid, -1, 0)
return result_marbles
def removeNode(self, nodeid):
for next_node in self.nodeLink[nodeid]:
edgeid = self.edgeHash[nodeid * 10000000 + next_node]
del self.edges[edgeid]
del self.edgeScore[edgeid]
del self.edgeHash[nodeid * 10000000 + next_node]
if nodeid in self.nodeLinkReverse[next_node]:
self.nodeLinkReverse[next_node].remove(nodeid)
for prev_node in self.nodeLinkReverse[nodeid]:
edgeid = self.edgeHash[prev_node * 10000000 + nodeid]
del self.edges[edgeid]
del self.edgeScore[edgeid]
del self.edgeHash[prev_node * 10000000 + nodeid]
if nodeid in self.nodeLink[prev_node]:
self.nodeLink[prev_node].remove(nodeid)
del self.nodes[nodeid]
del self.nodeScore[nodeid]
del self.nodeLink[nodeid]
del self.nodeLinkReverse[nodeid]
def removeDeadEnds(self, oneround = False):
deleted = 0
for nodeid in self.nodes.keys():
if self.nodeHashReverse[nodeid] in self.nodeTerminate.keys():
continue
if self.nodeHashReverse[nodeid] % 10000000 == 0:
continue
d = self.NumOfNeighbors(nodeid)
if d == 1 or len(self.nodeLink[nodeid]) == 0 or len(self.nodeLinkReverse[nodeid]) == 0:
self.removeNode(nodeid)
deleted += 1
return deleted
def NumOfNeighbors(self, nodeid):
neighbor = {}
for next_node in self.nodeLink[nodeid] + self.nodeLinkReverse[nodeid]:
neighbor[next_node] = 1
return len(neighbor.keys())
def getNeighbors(self,nodeid):
neighbor = {}
for next_node in self.nodeLink[nodeid] + self.nodeLinkReverse[nodeid]:
if next_node != nodeid:
neighbor[next_node] = 1
return neighbor.keys()
def edgeIntersection(baseX, baseY, dX, dY, n1X, n1Y, n2X, n2Y):
t = dX * n1Y + dY * n2X - dX * n2Y - dY * n1X
c = n2X * n1Y - n1X * n2Y + baseX * (n2Y - n1Y) + baseY * (n1X -n2X)
if t == 0 :
return 0,0,0,0
alpha = c / t
if alpha < 0 :
return 0,0,0,0
iX = baseX + alpha * dX
iY = baseY + alpha * dY
d = (iX - n1X)*(n2X - iX) + (iY - n1Y) * (n2Y - iY)
if d < 0 :
return 0,0,0,0
extend_length = np.sqrt(alpha * dX * alpha * dX + alpha * dY * alpha * dY)
return iX, iY, extend_length, 1
if __name__ == "__main__":
dumpDat = pickle.load(open(sys.argv[1], "rb"))
| StarcoderdataPython |
1635557 | #Code By <NAME> (aadiupadhyay)
from sys import stdin,stdout
#Fast input output
st=lambda:list(stdin.readline().strip())
li=lambda:list(map(int,stdin.readline().split()))
mp=lambda:map(int,stdin.readline().split())
inp=lambda:int(stdin.readline())
pr=lambda n: stdout.write(str(n)+"\n")
mod=1000000007
def solve():
n,k=mp() #Taking space separted input
i=1
ma=float('-inf') #assigning minimum value to ma
#calculating maximum factor of n which is less than or equal to k
while i*i<=n:
if n%i==0:
if i!=(n//i):
if i<=k:
ma=max(ma,i)
if n//i <=k:
ma=max(ma,n//i)
else:
if i<=k:
ma=max(ma,i)
i+=1
#ma stores the maximum factor of n which is less than or equal to k
pr(n//ma)
#test cases
for _ in range(inp()):
solve()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.