id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
5041297 | <reponame>VictorKazankov/Python_training
import random
import string
from model.contact import Contact
def random_string(prefix,maxlen):
symbols = string.ascii_letters + string.digits
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
def random_string_only_digits(maxlen):
digits_ = string.digits
return "".join([random.choice(digits_) for i in range(random.randrange(maxlen))])
testdata = [
Contact(firstname="Victor", middlename="Ivanovich", lastname="Kazankov", nickname="Six",
title = "MyTitle", company = "MyCompany", address = "MyAddress", homephone = "+380555555",
mobilephone = "8099258789", workphone = "9874444444", fax = "285999999", email = "<EMAIL>",
email2 = "<EMAIL>", email3 = "<EMAIL>", homepage = "myhomepage",
bday = "8", bmonth = "March", byear = "1909", aday = "15", amonth = "December", ayear = "2010", address2 = "MyAddress2",
secondaryphone = "2345678", notes = "MyNotes"),
Contact(firstname="Victor2", middlename="Ivanovich2", lastname="Kazankov2", nickname="Six",
title = "MyTitle", company = "MyCompany", address = "MyAddress", homephone = "+380555555",
mobilephone = "8099258789", workphone = "9874444444", fax = "285999999", email = "<EMAIL>",
email2 = "<EMAIL>", email3 = "<EMAIL>", homepage = "myhomepage",
bday = "8", bmonth = "March", byear = "1909", aday = "15", amonth = "December", ayear = "2010", address2 = "MyAddress2",
secondaryphone = "2345678", notes = "MyNotes")
] | StarcoderdataPython |
1858848 | <filename>PycharmProjects/untitled1/firstPython.py<gh_stars>0
print("Hello world.",end="")
print('hello python')
# Pycharm是集成开发环境
# create New Project
"""
标识符由数字字母下划线组成
数字不能开头
不能用内置关键字
区分大小写
见名知意
大驼峰 MyName
小驼峰 myName
下划线连接 my_name
""" | StarcoderdataPython |
5080578 | <reponame>ankit27kh/Xanadu-Quantum-Codebook
"""The code template to supply to the front end. This is what the user will
be asked to complete and submit for grading.
Do not include any imports.
This is not a REPL environment so include explicit 'print' statements
for any outputs you want to be displayed back to the user.
Use triple single quotes to enclose the formatted code block.
"""
challenge_code = '''def extract_qubit_state(input_state):
"""Extract the state of the third qubit from the combined state after teleportation.
Args:
input_state (array[complex]): A 3-qubit state of the form
(1/2)(|00> + |01> + |10> + |11>) (a|0> + b|1>)
obtained from the teleportation protocol.
Returns:
array[complex]: The state vector np.array([a, b]) of the third qubit.
"""
##################
# YOUR CODE HERE #
##################
# DETERMINE THE STATE OF THE THIRD QUBIT
return
# Here is the teleportation routine for you
dev = qml.device("default.qubit", wires=3)
#################
# YOUR CODE HERE #
##################
# OPTIONALLY UPDATE THIS STATE PREPARATION ROUTINE
def state_preparation():
qml.Hadamard(wires=0)
qml.Rot(0.1, 0.2, 0.3, wires=0)
@qml.qnode(dev)
def teleportation():
state_preparation()
entangle_qubits()
rotate_and_controls()
return qml.state()
# Print the extracted state after teleportation
full_state = teleportation()
print(extract_qubit_state(full_state))
'''
| StarcoderdataPython |
4887051 | from django.core.management.base import BaseCommand
from ...models import DSO
from ...finder import create_dso_finder_chart
class Command(BaseCommand):
help = 'Create DSO finder charts'
def add_arguments(self, parser):
parser.add_argument('--dso_list', dest='dso_list', nargs='+', type=int)
parser.add_argument('--all', dest='all', action='store_true')
parser.add_argument('--test', action='store_true')
def handle(self, *args, **options):
"""
Three ways this can run:
- Create all new maps for all DSOs (all = True)
- Create/Update maps for a subset of DSOs (dso_list=[ list of PKs ])
- Create maps for DSOs that don't already have one (all=False, no dso_list)
"""
dso_list = None
all = False
just_new = False
if options['all']:
all = options['all']
print ("ALL is true")
elif options['dso_list']:
dso_list = options['dso_list']
print ("GOT DSOs: ", dso_list)
else:
just_new = True
print ("Running new DSOs")
if dso_list:
dsos = DSO.objects.filter(pk__in=dso_list)
else:
dsos = DSO.objects.all()
for dso in dsos:
if just_new and dso.dso_finder_chart:
continue
# Otherwise operate!
print("Creating/Updating Finder Chart for {}: {}".format(dso.pk, dso.shown_name))
fn = create_dso_finder_chart(dso, test=options['test'], save_file=True)
if not options['test']:
dso.dso_finder_chart = 'dso_charts/{}'.format(fn)
#print ("\tFN: ", fn)
dso.save() | StarcoderdataPython |
11262357 | #!/usr/bin/env python
"""This is a wrapper around the real compiler.
It first invokes a real compiler to generate
an object file. Then it invokes a bitcode
compiler to generate a parallel bitcode file.
It records the location of the bitcode in an
ELF section of the object file so that it can be
found later after all of the objects are
linked into a library or executable.
"""
import sys
from .compilers import wcompile
def main():
""" The entry point to wllvm.
"""
return wcompile("wllvm")
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
11371000 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from import_export import resources
from .models import TrainingAttendance, Distribution, Individual
class TrainingAttendanceResource(resources.ModelResource):
class Meta:
model = TrainingAttendance
class DistributionResource(resources.ModelResource):
class Meta:
model = Distribution
class IndividualResource(resources.ModelResource):
class Meta:
model = Individual
| StarcoderdataPython |
266428 | import numpy as np
import pandas as pd
def dummy_prep(data, method=None):
varlist = data.columns[(data.dtypes == "category").values]
if not method:
return pd.get_dummies(data.loc[:, data.dtypes == "category"])
if method == "drop_first":
return pd.get_dummies(data.loc[:, data.dtypes == "category"], drop_first=True)
if method == "deviation":
dummies = pd.get_dummies(data.loc[:, data.dtypes == "category"])
dummylist = {i: [x for x in dummies.columns if i in x] for i in varlist}
for var in dummylist:
if (dummies.values == 255).any():
print(f"{var} before")
import q
q.d()
dropout = dummylist[var][0]
keepers = dummylist[var][1:]
dummies.loc[dummies[dropout] == 1, keepers] = -1
del dummies[dropout]
if (dummies.values == 255).any():
print(f"{var} after")
import q
q.d()
return dummies
test1 = pd.DataFrame()
test1["cat2"] = pd.Categorical(np.random.randint(low=0, high=2, size=100))
test1["cat3"] = pd.Categorical(np.random.randint(low=0, high=3, size=100))
test1["cat4"] = pd.Categorical(np.random.randint(low=0, high=4, size=100))
print(test1.groupby("cat4").cat3.count())
print(test1.head())
dummy_prep(test1[["cat4", "cat3", "cat2"]], method="deviation").head()
| StarcoderdataPython |
353897 | <filename>script/filterlog.py
#!/usr/bin/env python
import sys
import getopt
def main(argv):
inp = ''
outp = ''
tag = ''
force = 'a'
try:
opts, args = getopt.getopt(
argv, "i:o:w:f", ["input=", "output=", "word=" ])
except getopt.GetoptError:
print ('filterlog.py -i <input> -o <output> <word> -f')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print ('filterlog.py -i <input> -o <output> <word> -f')
sys.exit()
elif opt in ("-i", "--input"):
inp = arg
elif opt in ("-o", "--output"):
outp = arg
elif opt in ("--word"):
tag = arg
#print arg
elif opt in ("-f"):
force = 'w'
if inp == '':
print ('filterlog.py -i <input> -o <output> <word> -f')
sys.exit()
if tag == '':
print ('filterlog.py -i <input> -o <output> <word> -f')
sys.exit()
if outp == '':
print ('filterlog.py -i <input> -o <output> <word> -f')
print ('Warning: the output file is named output.txt')
#sys.exit()
outp = 'output.txt'
f = open(inp)
f1 = open(outp, force)
print ("Looking for '" + tag +"'")
if force == 'w' :
print('Warning: The '+outp+ ' file is going to be deleted if there is any !!')
else :
print('Warning: If the '+outp+ ' file exists, we can not delete it, force the script (-f) or delete the file')
doIHaveToCopyTheLine=False
for line in f.readlines():
#print (line)
doIHaveToCopyTheLine=False
if tag in line:
doIHaveToCopyTheLine=True
#if ('I native' in line) or ('W native' in line) or ('E native' in line) or ('F native' in line):
#doIHaveToCopyTheLine=True
#print (doIHaveToCopyTheLine)
if doIHaveToCopyTheLine:
f1.write(line)
f1.close()
f.close()
print ("Success ")
if __name__ == "__main__":
main(sys.argv[1:])
| StarcoderdataPython |
3342401 | from .app import start_web_app
| StarcoderdataPython |
6503703 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__init__.py
-----------
TxTrader module init
Copyright (c) 2015 Reliance Systems Inc. <<EMAIL>>
Licensed under the MIT license. See LICENSE for details.
"""
__all__ = ['version', 'tcpserver', 'webserver',
'tws', 'cqg', 'client', 'monitor']
| StarcoderdataPython |
9616043 | <reponame>opensanctions/nomenklatura
from nomenklatura.matching.model import compare_scored, explain_matcher
__all__ = ["compare_scored", "explain_matcher"]
| StarcoderdataPython |
5183999 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow/python/util/stack_trace.h."""
import traceback
from tensorflow.python.platform import test
from tensorflow.python.util import _stack_trace_test_lib
class StackTraceTest(test.TestCase):
def testStackTraceMatchesPy(self):
cc_stack_trace = _stack_trace_test_lib.GetStackTraceString()
py_stack_trace = ''.join(traceback.format_stack())
# Should be same except at the end where the stace trace generation calls
# are made.
self.assertEqual(
cc_stack_trace.split('\n')[:-4],
py_stack_trace.split('\n')[:-3])
if __name__ == '__main__':
test.main()
| StarcoderdataPython |
8080423 | <reponame>hashnfv/hashnfv-bottlenecks
##############################################################################
# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# ------------------------------------------------------
# Directories and file locations
# ------------------------------------------------------
HEAT_DIR = 'heat/'
TEST_TEMPLATE_NAME = 'test_template'
TEMPLATE_EXTENSION = '.yaml'
| StarcoderdataPython |
3519987 | N, M = map(int, input().split())
# 偶数回裏返す->表
# 奇数回裏返す->裏
# 四隅->4回->表
# 四隅以外の外枠->6回->表
# 内側->9回->裏
# 1マスの場合
if N == 1 and M == 1:
print(1)
exit(0)
# 1行又は1列の場合->両端以外
if N == 1 or M == 1:
print(max(N, M)-2)
exit(0)
# 内側のカードの枚数を求める
print((N-2)*(M-2))
| StarcoderdataPython |
1611560 | import argparse
import multiprocessing
from pyfiglet import Figlet
from rich.console import Console
from poc import nc_beanshell_rce,nc_upload_rce,nc_erp_sql,nc_u8_test_sql,nc_erp_directory
console = Console()
def main(target_url):
if target_url[:4] != 'http':
target_url = 'http://' + target_url
if target_url[-1] != '/':
target_url += '/'
nc_beanshell_rce.main(target_url)
nc_upload_rce.main(target_url)
nc_u8_test_sql.main(target_url)
nc_erp_sql.POC_1(target_url)
nc_erp_directory.main(target_url)
if __name__ == '__main__':
console.print(Figlet(font='slant').renderText('NC OA exp'), style='bold blue')
console.print(' Author: iamzhaoxin \n', style='bold blue')
try:
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--url', dest='url', help='Target Url')
parser.add_argument('-f', '--file', dest='file', help='Target Url File', type=argparse.FileType('r'))
args = parser.parse_args()
if args.file:
pool = multiprocessing.Pool()
for url in args.file:
pool.apply_async(main, args=(url.strip('\n'),))
pool.close()
pool.join()
elif args.url:
if "http://" in args.url or "https://" in args.url:
main(args.url)
else:
console.print('缺少HTTP头,例如:http://127.0.0.1')
else:
console.print('缺少URL目标, 请使用 [-u URL] or [-f FILE]')
except KeyboardInterrupt:
console.console.print('\nCTRL+C 退出', style='reverse bold red')
| StarcoderdataPython |
295021 | <reponame>smayya337/libcodebusters-python<filename>libcodebusters/encrypt.py
import string
import random
from numpy import array, matmul
from libcodebusters import utils
import math
def aristocrat(plaintext: str) -> str:
"""Encodes text with the Aristocrat cipher."""
ciphertext: str = ""
alph_map: list = utils.alph_map()
plaintext = plaintext.strip().upper()
for i in plaintext:
ascii_val: int = ord(i)
if utils.is_letter(ascii_val):
ciphertext += chr(alph_map[ascii_val - 65] + 65)
else:
ciphertext += chr(ascii_val)
return ciphertext
def rot(plaintext: str, val: int) -> str:
"""Encodes text with a ROT/Caesar cipher."""
plaintext = plaintext.strip().upper()
return utils.rot(plaintext, val)
def random_rot(plaintext: str) -> str:
"""Encodes text with a random ROT/Caesar cipher."""
val: int = random.randint(0, 25)
return rot(plaintext, val)
def rail_fence(plaintext: str, val: int) -> str:
"""Encodes text with the rail fence cipher."""
plaintext = plaintext.strip().upper()
return utils.rail(plaintext, val, "encrypt")
def vigenere(plaintext: str, key: str) -> str:
"""Encodes text with the Vigenere cipher."""
ciphertext: str = ""
plaintext = plaintext.strip().upper()
key = key.upper()
plain_char: list = [ord(letter) for letter in plaintext]
key_char: list = utils.vig_key_array(key, plain_char)
for i in range(0, len(plain_char)):
if utils.is_letter(plain_char[i]):
ciphertext += chr((plain_char[i] + key_char[i] - 130) % 26 + 65)
else:
ciphertext += chr(plain_char[i])
return ciphertext
def patristocrat(plaintext: str) -> str:
"""Encodes text with the Patristocrat cipher."""
ciphertext: str = ""
new_cipher: str = ""
alph_map: list = utils.alph_map()
plaintext = plaintext.strip().upper()
for i in range(0, len(plaintext)):
ascii_val: int = ord(plaintext[i])
if utils.is_letter(ascii_val):
new_cipher += chr(ascii_val)
letters: int = 0
for i in range(0, len(new_cipher)):
ascii_val: int = ord(new_cipher[i])
ciphertext += chr(alph_map[ascii_val - 65] + 65)
letters += 1
if letters == 5 and i != len(new_cipher) - 1:
ciphertext += " "
letters = 0
return ciphertext
def atbash(plaintext: str) -> str:
"""Encodes text with the Atbash cipher."""
return utils.atbash(plaintext)
def hill(plaintext: str, key: str) -> str:
"""Encodes text with the Hill cipher."""
plaintext = utils.letters_only(plaintext.strip().upper())
key = utils.letters_only(key.upper())
len_val: int = math.ceil(math.sqrt(len(key)))
square_val: int = int(math.pow(len_val, 2))
while len(key) < square_val:
key += "Z"
while len(plaintext) % len_val != 0:
plaintext += "Z"
plain_matrix: array = array(list(plaintext)).reshape((int(len(plaintext) / len_val), len_val)).T
key_matrix: array = array(list(key)).reshape(len_val, len_val)
product_matrix = matmul(utils.char_to_num_matrix(key_matrix), utils.char_to_num_matrix(plain_matrix))
return utils.num_to_string(product_matrix)
def affine(plaintext: str, a: int, b: int) -> str:
"""Encodes text with the Affine cipher."""
alph_map: list = [chr((a * i + b) % 26 + 65) for i in range(0, 26)]
return utils.convert(plaintext, alph_map)
def baconian(plaintext: str) -> str:
"""Encodes text with the Baconian cipher."""
plaintext = utils.letters_only(plaintext.strip().upper())
punctuation: list = [c for c in string.punctuation]
numbers = [bin(c) for c in range(0, 26)]
numbers[9] = numbers[8]
numbers[21] = numbers[20]
a_sym, b_sym = random.sample(punctuation, 2)
ciphertext: str = ""
for letter in plaintext:
binary = numbers[ord(letter) - 65][2:]
while len(binary) < 5:
binary = "0" + binary
ciphertext += (binary.replace("0", a_sym).replace("1", b_sym) + " ")
return ciphertext.strip()
def morbit(plaintext: str, friendly: bool) -> str:
"""Encodes text with the Morbit cipher."""
ciphertext: str = utils.morse(plaintext)
numbers: list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
random.shuffle(numbers)
chunks: tuple = ("..", ".-", ".x", "-.", "--", "-x", "x.", "x-", "xx")
pairings = {chunk: num for chunk, num in zip(chunks, numbers)}
out: str = ""
for count in range(0, len(ciphertext), 2):
phrase = ciphertext[count:count + 2]
out += str(pairings[phrase])
pair_list = list(pairings)
random.shuffle(pair_list)
clues: list = list((pairings[k], k) for k in pair_list[:-3])
random.shuffle(clues)
return utils.morse_out(out, clues, friendly)
def pollux(plaintext: str, friendly: bool) -> str:
"""Encodes text with the Morbit cipher."""
ciphertext: str = utils.morse(plaintext)
numbers: list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
random.shuffle(numbers)
num_lists: tuple = (numbers[0:3], numbers[3:6], numbers[6:9])
chunks: tuple = (".", "-", "x")
pairings = {chunk: num for chunk, num in zip(chunks, num_lists)}
out: str = ""
for count in range(0, len(ciphertext)):
phrase = ciphertext[count]
out += str(random.choice(pairings[phrase]))
clues: list = [(value, key) for key in pairings.keys() for value in pairings[key]]
random.shuffle(clues)
clues = clues[:-3]
return utils.morse_out(out, clues, friendly)
def xenocrypt(plaintext: str) -> str:
"""Encodes text with the Xenocrypt/Spanish Aristocrat cipher."""
ciphertext: str = ""
used = []
for i in range(0, 27):
used.append(False)
alph_map: list = []
for i in range(0, 26):
newval: int = random.randint(0, 26)
while newval == i or used[newval]:
newval: int = random.randint(0, 26)
used[newval] = True
alph_map.append(newval)
plaintext = plaintext.strip().upper()
letters: str = "ABCDEFGHIJKLMNOPQRSTUVWXYZÑ"
for i in plaintext:
ascii_val: int = ord(i)
if utils.is_letter(ascii_val):
ciphertext += letters[alph_map[ascii_val - 65]]
elif ascii_val == 165:
ciphertext += letters[26]
else:
ciphertext += chr(ascii_val)
return ciphertext
def tjso_atbash(plaintext: str) -> str:
"""Encodes text with the TJSO Atbash cipher."""
return utils.tjso_atbash(plaintext)
| StarcoderdataPython |
6421139 | # Generated by Django 2.2.6 on 2020-04-20 18:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('KnowledgeBase', '0018_auto_20200420_1759'),
]
operations = [
migrations.AlterField(
model_name='vulnerability',
name='ciaaKey',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='KnowledgeBase.CiaaCategory'),
),
migrations.AlterField(
model_name='vulnerability',
name='countermeasureKey',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='KnowledgeBase.Countermeasure'),
),
migrations.AlterField(
model_name='vulnerability',
name='severityLevelKey',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='KnowledgeBase.SeverityLevel'),
),
]
| StarcoderdataPython |
1648307 | <filename>hadoop_script_2.0/common/valid/validator.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import abc
__author__ = "jiandequn"
""""
抽象验证器实现
"""
class Validator(object):
_metaclass__ = abc.ABCMeta
@abc.abstractproperty
def vlidate(self, val):
"""
定义当前操作表
"""
raise NotImplementedError
def vlidateParam(self, key, params={}):
val = '';
if params.has_key(key):
val = params.get(key)
while not self.vlidate(val):
val = raw_input("%s:" % key);
print val;
return self.formatValue(val);
@abc.abstractproperty
def formatValue(self, val):
"""
定义当前操作表
"""
raise NotImplementedError
| StarcoderdataPython |
29222 | <reponame>vsukhor/cytoskeleton-analyser
# Copyright (c) 2021 <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
"""Subclasses of container base class for tailored to cell components.
"""
from collections import namedtuple
from . import container
from .. import models
class MembraneNucleus(container.Optional):
"""Container class for configuration of cell nuclear membrane.
"""
model = models.ConfigNucleus
class MembranePlasma(container.Optional):
"""Container class for configuration of cell plasma membrane.
"""
model = models.ConfigPlasma
class InSpace(container.Base):
"""Container class for configuration of unanchored microtubule MTOC.
"""
model = models.ConfigMtocInSpace
class Golgi(container.Base):
"""Container class for configuration of Golgi-type MTOC.
"""
model = models.ConfigMtocGolgi
class Centrosome(container.Base):
"""Container class for configuration of centrosome-type MTOC.
"""
model = models.ConfigMtocCentrosome
class Nucleus(container.Base):
"""Container class for configuration of Nucleus-type MTOC.
"""
model = models.ConfigMtocNucleus
#: Types of Microtubule Organizing Centers (MTOCs).
Mtoc = namedtuple('Mtoc', 'InSpace Golgi Centrosome Nucleus')
# Microtubule Organizing Centers (MTOCs).
mtoc = Mtoc(InSpace, Golgi, Centrosome, Nucleus)
| StarcoderdataPython |
133854 | <gh_stars>1-10
import torch
import torch.distributions as TD
import numpy as np
import numpy.matlib as nm
class DataPosteriorTarget:
def __init__(self):
pass
def sample_init(self, n):
'''Samples from the prior distribution
'''
raise NotImplementedError()
def sample_data(self):
'''Draws batch sample from the dataset
'''
raise NotImplementedError()
@property
def len_dataset(self):
raise NotImplementedError()
def est_log_init_prob(self, X):
'''Estimates \int \log(p_{prior}(x)) p_{curr}(x) dx
based on batch sample from p_{curr}
'''
raise NotImplementedError()
def est_log_data_prob(self, X, S):
'''Estimates (1/|Data|)\sum_{s \in Data} \int \log(p(s | x)) p_{curr}(x) dx
based on batch sample X from p_{curr}
and data sample S from Data (drawn uniformly from the dataset)
'''
raise NotImplementedError()
class LogRegDPTarget(DataPosteriorTarget):
class _InitSampler:
def __init__(self, data_posterior_target):
self.dpt = data_posterior_target
def sample_n(self, n):
return self.dpt.sample_init(n)
def sample(self, n):
return self.sample_n(n)
def __init__(
self, dataloader, n_features,
device='cpu', g_alpha=1., g_beta=100.0, clip_alpha=None):
super().__init__()
self.device = device
self.a0, self.b0 = g_alpha, g_beta # alpha, beta (size, 1/scale)
self.gamma0 = TD.Gamma(
torch.tensor(self.a0, dtype=torch.float32, device=device),
torch.tensor(self.b0, dtype=torch.float32, device=device))
self.normal0 = TD.Normal(
torch.tensor(0., dtype=torch.float32, device=device),
torch.tensor(1., dtype=torch.float32, device=device))
self.n_features = n_features # num features in the dataset
self.dataloader = dataloader
self._dataloader_iter = iter(self.dataloader)
self.n_data_samples_drawn = 0
self.n_data_epochs_drawn = 0
self.clip_alpha = clip_alpha
def reset(self):
self._dataloader_iter = iter(self.dataloader)
self.n_data_samples_drawn = 0
self.n_data_epochs_drawn = 0
def sample_init(self, n):
alpha_sample = self.gamma0.sample((n,)).view(-1, 1)
if self.clip_alpha is not None:
alpha_sample = torch.clamp(alpha_sample, np.exp(-self.clip_alpha), np.exp(self.clip_alpha))
theta_sample = self.normal0.sample((n, self.n_features)) / torch.sqrt(alpha_sample)
return torch.cat([theta_sample, torch.log(alpha_sample)], dim=-1)
def sample_data(self):
try:
data, classes = next(self._dataloader_iter)
assert data.size(1) == self.n_features
self.n_data_samples_drawn += 1
except StopIteration:
self._dataloader_iter = iter(self.dataloader)
self.n_data_epochs_drawn += 1
data, classes = next(self._dataloader_iter)
batch = torch.cat([
classes.view(-1, 1).type(torch.float32),
data.type(torch.float32)], dim=-1).to(self.device)
return batch
@property
def len_dataset(self):
return len(self.dataloader.dataset)
def est_log_init_prob(self, X, reduction='mean'):
assert len(X.shape) == 2
assert X.size(1) == self.n_features + 1 # featrues + alpha
log_alpha_sample = X[:, -1].view(-1, 1)
if self.clip_alpha is not None:
log_alpha_sample = torch.clamp(log_alpha_sample, -self.clip_alpha, self.clip_alpha)
alpha_sample = torch.exp(log_alpha_sample)
theta_sample = X[:, :-1]
log_p_w_cond_alp = torch.sum(
self.normal0.log_prob(theta_sample * torch.sqrt(alpha_sample)) + log_alpha_sample/2., dim=-1)
log_p_alp = self.gamma0.log_prob(alpha_sample) + log_alpha_sample
log_p = log_p_alp.view(-1) + log_p_w_cond_alp
if reduction=='mean':
return torch.mean(log_p)
if reduction=='sum':
return torch.sum(log_p)
raise Exception(f"Reduction '{reduction}' not defined")
def est_log_data_prob(self, X, S, reduction='mean'):
# S[0] is class label -1 or 1
assert X.size(1) == S.size(1)
assert X.size(1) == self.n_features + 1
probas = torch.sigmoid(torch.matmul(X[:, :-1], S[:, 1:].T)) # (x_bs, s_bs)
classes = S[:, 0].view(1, -1)
probas = (1. - classes)/2. + classes * probas
probas = torch.clamp(probas, 1e-5)
log_probas = torch.log(probas)
mean_log_probas = torch.mean(log_probas, dim=-1)
assert mean_log_probas.size(0) == X.size(0)
if reduction == 'mean':
return torch.mean(mean_log_probas)
if reduction == 'sum':
return torch.sum(mean_log_probas)
raise Exception(f"Reduction '{reduction}' not defined")
def create_init_sampler(self):
return self._InitSampler(self)
def posterior_sample_evaluation(theta, X_test, y_test):
theta = theta[:, :-1]
M, n_test = theta.shape[0], len(y_test)
prob = np.zeros([n_test, M])
for t in range(M):
coff = np.multiply(y_test, np.sum(-1 * np.multiply(nm.repmat(theta[t, :], n_test, 1), X_test), axis=1))
prob[:, t] = np.divide(np.ones(n_test), (1 + np.exp(coff)))
prob = np.mean(prob, axis=1)
acc = np.mean(prob > 0.5)
llh = np.mean(np.log(prob))
return acc, llh
| StarcoderdataPython |
4914644 | <filename>suitcase/utils/tests/conftest.py
import bluesky
from bluesky.tests.conftest import RE # noqa
from bluesky.plans import count
from bluesky.plan_stubs import trigger_and_read, configure
from ophyd.sim import SynGauss, SynAxis
import numpy as np
try:
from ophyd.sim import DirectImage
except ImportError:
from ophyd import Device, Component as Cpt
from ophyd.sim import SynSignal
class DirectImage(Device):
img = Cpt(SynSignal, kind="hinted")
def __init__(self, *args, func=None, **kwargs):
super().__init__(*args, **kwargs)
if func is not None:
self.img._func = func
def trigger(self):
return self.img.trigger()
import event_model
import pytest
from .. import UnknownEventType
import warnings
if not hasattr(SynGauss, "configure"):
class SynGauss(SynGauss):
def configure(self, d):
if d:
raise ValueError
return {}, {}
# This line is used to ignore the deprecation warning for bulk_events in tests
warnings.filterwarnings("ignore", message="The document type 'bulk_events'*")
_md = {"reason": "test", "user": "temp user", "beamline": "test_beamline"}
# Some useful plans for use in testing
def simple_plan(dets):
"""A simple plane which runs count with num=5"""
md = {**_md, **{"test_plan_name": "simple_plan"}}
yield from count(dets, num=5, md=md)
def multi_stream_one_descriptor_plan(dets):
"""A plan that has two streams but on descriptor per stream)"""
md = {**_md, **{"test_plan_name": "multi_stream_one_descriptor_plan"}}
@bluesky.preprocessors.baseline_decorator(dets)
def _plan(dets):
yield from count(dets, md=md)
yield from _plan(dets)
def one_stream_multi_descriptors_plan(dets):
'''A plan that has one stream but two descriptors per stream)'''
md = {**_md, **{'test_plan_name': 'simple_plan'}}
@bluesky.preprocessors.run_decorator(md=md)
def _internal_plan(dets):
yield from trigger_and_read(dets)
for det in dets:
yield from configure(det, {})
yield from trigger_and_read(dets)
yield from _internal_plan(dets)
def _make_single(ignore):
if ignore:
pytest.skip()
motor = SynAxis(name="motor", labels={"motors"})
det = SynGauss(
"det", motor, "motor", center=0, Imax=1, sigma=1, labels={"detectors"}
)
return [det]
def _make_image(ignore):
if ignore:
pytest.skip()
direct_img = DirectImage(
func=lambda: np.array(np.ones((10, 10))), name="direct", labels={"detectors"}
)
return [direct_img]
def _make_image_list(ignore):
if ignore:
pytest.skip()
direct_img_list = DirectImage(
func=lambda: [[1] * 10] * 10, name="direct", labels={"detectors"}
)
direct_img_list.img.name = "direct_img_list"
return [direct_img_list]
@pytest.fixture(
params=[
_make_single,
_make_image,
_make_image_list,
lambda ignore: _make_image(ignore) + _make_image_list(ignore),
],
scope="function",
)
def detector_list(request): # noqa
return request.param
@pytest.fixture(params=["event", "bulk_events", "event_page"], scope="function")
def event_type(request):
def _event_type_func(ignore):
if request.param in ignore:
pytest.skip()
return request.param
return _event_type_func
@pytest.fixture(params=[simple_plan, multi_stream_one_descriptor_plan,
one_stream_multi_descriptors_plan],
scope='function')
def plan_type(request):
'''Returns a function that provides plan_types for testing.'''
def _plan_type_func(skip_tests_with=None):
'''Skips the current test or returns the plan_type in request.param for
a number of test cases.
skip_tests_with : list optional
pytest.skip() any test with request.param in this list
'''
if skip_tests_with is None:
skip_tests_with = []
if request.param in skip_tests_with:
pytest.skip()
return request.param
return _plan_type_func
@pytest.fixture(params=['test-', 'scan_{start[uid]}-'],
scope='function')
def file_prefix_list(request): # noqa
'''Returns a function that provides file_prefixes for testing.
'''
def _file_prefix_list_func(skip_tests_with=None):
'''Skips the current test or returns the file prefix in request.param for
a number of test cases.
skip_tests_with : list optional
pytest.skip() any test with request.param in this list
'''
if skip_tests_with is None:
skip_tests_with = []
if request.param in skip_tests_with:
pytest.skip()
return request.param
return _file_prefix_list_func
@pytest.fixture()
def generate_data(RE, detector_list, event_type): # noqa
'''A fixture that returns event data for a number of test cases.
Returns a list of (name, doc) tuples for the plan passed in as an arg.
Parameters
----------
RE : object
pytest fixture object imported from `bluesky.test.conftest`
detector_list : list
pytest fixture defined in `suitcase.utils.conftest` which returns a
list of detectors
event_type : list
pytest fixture defined in `suitcase.utils.conftest` which returns a
list of 'event_types'.
'''
def _generate_data_func(plan, skip_tests_with=None, md=None):
'''Generates data to be used for testing of suitcase.*.export(..)
functions
Parameters
----------
plan : the plan to use to generate the test data
Returns
-------
collector : list
A list of (name, doc) tuple pairs generated by the run engine.
skip_tests_with : list, optional
any test having request.param in this list will be skipped
md : dict, optional
metadata to be passed to the RunEngine
'''
if skip_tests_with is None:
skip_tests_with = []
if md is None:
md = {}
# define the output lists and an internal list.
collector = []
event_list = []
# define the collector function depending on the event_type
if event_type(skip_tests_with) == 'event':
def collect(name, doc):
collector.append((name, doc))
if name == 'event':
event_list.append(doc)
elif event_type(skip_tests_with) == 'event_page':
def collect(name, doc):
if name == 'event':
event_list.append(doc)
elif name == 'stop':
collector.append(('event_page',
event_model.pack_event_page(
*event_list)))
collector.append((name, doc))
else:
collector.append((name, doc))
elif event_type(skip_tests_with) == 'bulk_events':
def collect(name, doc):
if name == 'event':
event_list.append(doc)
elif name == 'stop':
collector.append(('bulk_events', {'primary': event_list}))
collector.append((name, doc))
else:
collector.append((name, doc))
else:
raise UnknownEventType('Unknown event_type kwarg passed to '
'suitcase.utils.events_data')
# collect the documents
RE(plan(detector_list(skip_tests_with)), collect, md=md)
return collector
return _generate_data_func
@pytest.fixture
def example_data(generate_data, plan_type):
'''A fixture that returns event data for a number of test cases.
Returns a function that returns a list of (name, doc) tuples for each of
the plans in plan_type.
.. note::
It is recommended that you use this fixture for testing of
``suitcase-*`` export functions, for an example see
``suitcase-tiff.tests``. This will mean that future additions to the
test suite here will be automatically applied to all ``suitcase-*``
repos. Some important implementation notes:
1. These fixtures are imported into other suitcase libraries via those
libraries' ``conftest.py`` file. This is automatically set up by
suitcases-cookiecutter, and no additional action is required.
2. If any of the fixture parameters above are not valid for
the suitcase you are designing and cause testing issues please skip
them internally by adding them to the ``skip_tests_with`` kwarg list
via the line
``collector = example_data(skip_tests_with=[param_to_ignore, ...])``.
Take note ``param_to_ignore`` is the exact parameter, i.e. in case you
want to ignore the tests against ``simple_plan`` in ``plan_type``
``param_to_ignore`` must actually be the function, not a string
reference, which will need to be imported using:
``from suitcase.utils.tests.conftest import simple_plan``
Parameters
----------
generate_data : list
pytest fixture defined in `suitcase.utils.conftest` which returns a
function that accepts a plan as an argument and returns name, doc pairs
plan_type : list
pytest fixture defined in `suitcase.utils.conftest` which returns a
list of 'plans' to test against.
'''
def _example_data_func(skip_tests_with=None, md=None):
'''returns a list of (name, doc) tuples for a number of test cases
skip_tests_with : list optional
any test having request.param in this list will be skipped
md : dict optional
dict or dict-like object containing metadata to be added to the
RunEngine
'''
return generate_data(
plan_type(skip_tests_with), skip_tests_with=skip_tests_with, md=md)
return _example_data_func
| StarcoderdataPython |
3457530 | from enum import IntEnum
class ServerCode(IntEnum):
DefaultServerError = -32000
VmValidationError = -32001
VmVerificationError = -32002
VmInvariantViolationError = -32003
VmDeserializationError = -32004
VmExecutionError = -32005
VmUnknownError = -32006
MempoolInvalidSeqNumber = -32007
MempoolIsFull = -32008
MempoolTooManyTransactions = -32009
MempoolInvalidUpdate = -32010
MempoolVmError = -32011
MempoolUnknownError = -32012
VmStatusError = -33000
WaitTimeoutError = -33001
class StatusCode(IntEnum):
FETCH_ERROR_MESSAGE = 10001
WAIT_TIME_OUT = 10002
ENSURE_ERROR = 10003
UnknownAuthor = 20001
TooLittleVotePower = 2002
TooManySignatures = 20003
InvalidSignature = 20004
SerializationError = 30001
DeserializationError= 30002
ValidationError = 30003
WrongLengthError = 3004
CanonicalRepresentationError = 30005
SmallSubgroupError = 30006
PointNotOnCurveError = 30007
BitVecError = 30008 | StarcoderdataPython |
1960435 | <filename>projects/dataprep/manage_background_overlaps_in_the_table.py
"""
Manage component overlaps:
Prepare list of stars that don't have component overlaps yet
Compute bg ols with the external code (multiprocessing)
Insert bg ols back into the file
"""
import numpy as np
from astropy.table import Table, vstack, unique
import os
import sys
sys.path.insert(0, '/Users/marusa/chronostar/')
from chronostar import tabletool
def prepare_stars_that_need_bg_ols():
"""
Bg ols for some stars have been computed in the earlier analyses. Use these bg ols and insert them into the table.
Find stars that don't have bg ols yet.
"""
wanted = Table.read('/priv/mulga1/marusa/chronostar_projects/solar_neighbourhood/data/ScoCen_box_result_15M_ready_for_bg_ols.fits')
old = Table.read('../scocen/data/data_table_cartesian_including_tims_stars_with_bg_ols_and_component_overlaps.fits')
old_scocen = Table.read('/priv/mulga1/marusa/chronostar_projects/scocen/data/scocen_candidates_300k_only_spatial_cut.fits')
old_solar_neighbourhood_bg_ols = np.loadtxt('bgols_multiprocessing_0.dat')
wanted0 = wanted[:len(old_solar_neighbourhood_bg_ols)]
# DELETE THIS!!!
wanted = wanted[len(old_solar_neighbourhood_bg_ols):]
mask = np.in1d(wanted['source_id'], old['source_id'])
mask = np.logical_or(mask, np.in1d(wanted['source_id'], old_scocen['source_id']))
#~ mask = np.logical_or(mask, np.in1d(wanted['source_id'], old_solar_neighbourhood['source_id']))
# Looking for stars that do NOT have bg ols yet
mask = ~mask
todo = wanted[mask]
print len(todo)
print len(old), len(wanted), len(wanted)-len(old)
todo.write('solar_neighbourhood_determine_bg_ols_for_these_stars.fits', format='fits')
def match_bg_ols_from_textfile_and_sobject_id():
n = [0, 1, 2, 3] # filenumbers
root = '/priv/mulga1/marusa/chronostar_projects/solar_neighbourhood'
datafile = os.path.join(root, 'solar_neighbourhood_determine_bg_ols_for_these_stars.fits')
data0 = Table.read(datafile)
N=10 # that many chunks. DON'T CHANGE THIS, this number should be the same as in the bg_ols_multiprocessing.py!!
indices_chunks = np.array_split(range(len(data0)), N)
for NI in n:
bg_ols_filename = os.path.join(root, 'bgols_multiprocessing_round2_%d.dat'%NI)
bgols = np.loadtxt(bg_ols_filename)
data=data0[indices_chunks[NI]]
ids = data['source_id']
print len(bgols), len(ids)
if NI==0:
tab = Table([ids], names=['source_id'])
tab['background_log_overlap'] = bgols
else:
tab0 = Table([ids], names=['source_id'])
tab0['background_log_overlap'] = bgols
tab = vstack([tab, tab0])
print tab
old = Table.read('/priv/mulga1/marusa/chronostar_projects/scocen/data/data_table_cartesian_including_tims_stars_with_bg_ols_and_component_overlaps.fits')
old_data = old[['source_id', 'background_log_overlap']]
print 'old_data', old_data
tab = vstack(tab, old_data)
tab = unique(tab, keys='source_id')
print tab
s = set(tab['source_id'])
print len(s)
tab.write('background_log_overlap_merged.fits', format='fits', overwrite=True)
def insert_bg_ols_into_table():
bg_ols_filename = 'bgols_multiprocessing_0.dat'
ln_bg_ols = np.loadtxt(bg_ols_filename)
# for
datafile='data/ScoCen_box_result_15M_ready_for_bg_ols.fits'
data_table = Table.read(datafile)
bg_lnol_colname = 'background_log_overlap'
print('Background overlaps: insert column')
tabletool.insert_column(data_table, ln_bg_ols, bg_lnol_colname, filename=datafile)
print('Print bg ols to cartesian table')
data_table.write(datafile, overwrite=True, format='fits')
def merge_both_tables():
"""
Use bg ols from 'old' and 'additional' to insert into 'wanted'.
"""
old = Table.read('data/data_table_cartesian_including_tims_stars_with_bg_ols_and_component_overlaps.fits')
wanted = Table.read('data/scocen_candidates_300k_only_spatial_cut.fits')
additional = Table.read('data/scocen_candidates_300k_only_spatial_cut_200k_to_determine_bg_ols.fits')
d_old = dict(zip(old['source_id'], old['background_log_overlap']))
d_add = dict(zip(additional['source_id'], additional['background_log_overlap']))
d_old.update(d_add)
dct=d_old
ln_bg_ols = [dct[source_id] for source_id in wanted['source_id']]
print len(ln_bg_ols), len(wanted)
wanted['background_log_overlap'] = ln_bg_ols
print wanted
wanted.write('data/scocen_candidates_300k_only_spatial_cut.fits', overwrite=True, format='fits')
if __name__ == '__main__':
#~ prepare_stars_that_need_bg_ols()
#~ insert_bg_ols_into_table()
#merge_both_tables()
match_bg_ols_from_textfile_and_sobject_id()
| StarcoderdataPython |
6489085 | <filename>libfennel/gateprover.py
#!/usr/bin/python
#
# (C) 2017 <NAME> <<EMAIL>>
#
# gate provers
#
# pylint: disable=arguments-differ
import libfennel.util as util
from libfennel.defs import Defs
import libfennel.arithcircuit as ac
class _GateProver(object):
gate_type_idx = None
def __init__(self, isEarly, in0, in1, out, layer, muxbit=0):
self.accum_z1 = self.accum_in0 = self.accum_in1 = None
self.roundNum = 0
self.layer = layer
self.isEarly = isEarly
self.in0 = in0
self.in1 = in1
self.out = out
self.muxbit = muxbit
self.output = []
self.rec = layer.circuit.comp_out
if self.rec is None:
self.costfn = lambda _: None
else:
self.costfn = self.costfn_
# reset gate prover to beginning of sumcheck
def reset(self):
self.accum_z1 = None
self.roundNum = 0
if self.isEarly:
self.output = [0, 0, 0, 0]
self.accum_in0 = None
self.accum_in1 = None
else:
self.output = [0, 0, 0]
self.accum_in0 = self.layer.compute_v_final.outputs[self.in0]
self.accum_in1 = self.layer.compute_v_final.outputs[self.in1]
# switch gate from "early" to "late" mode
def set_early(self, isEarly):
self.isEarly = isEarly
# set z value from Verifier
def set_z(self):
self.reset()
self.accum_z1 = self.layer.compute_z1chi[self.out]
if self.layer.compute_z1chi_2 is not None:
self.accum_z1 += self.layer.compute_z1chi_2[self.out]
self.accum_z1 %= Defs.prime
if self.rec:
self.rec.did_add()
# update output of this gate prover
def compute_outputs(self, *args):
if self.isEarly:
assert len(args) == 1
self.compute_outputs_early(args[0])
else:
assert not args
self.compute_outputs_late()
def compute_outputs_early(self, copy):
assert self.roundNum < self.layer.circuit.nCopyBits
assert (copy % 2) == 0
# evaluate gatefn for copy and copy+1 simultaneously
out = [0, 0, 0, 0]
out[0] = self.gatefn(self.layer.compute_v[self.in0].outputs[copy],
self.layer.compute_v[self.in1].outputs[copy])
out[0] *= self.accum_z1
out[0] %= Defs.prime
out[1] = self.gatefn(self.layer.compute_v[self.in0].outputs[copy+1],
self.layer.compute_v[self.in1].outputs[copy+1])
out[1] *= self.accum_z1
out[1] %= Defs.prime
# evaluate gatefn at 3rd and 4th points
# note that we use [copy >> 1] because compute_v has expand_outputs = False
# note that we don't multiply by p or (1-p) because we're summing x*p + x*(1-p), which is just x
out[2] = self.gatefn(self.layer.compute_v[self.in0].outputs_fact[0][copy >> 1],
self.layer.compute_v[self.in1].outputs_fact[0][copy >> 1])
out[2] *= self.accum_z1
out[2] %= Defs.prime
out[3] = self.gatefn(self.layer.compute_v[self.in0].outputs_fact[1][copy >> 1],
self.layer.compute_v[self.in1].outputs_fact[1][copy >> 1])
out[3] *= self.accum_z1
out[3] %= Defs.prime
if self.rec:
self.rec.did_mul(4)
self.output = out
def compute_outputs_late(self):
assert self.roundNum < 2 * self.layer.nInBits
# evaluate gatefn at third point (-1)
if self.roundNum < self.layer.nInBits:
isOneVal = util.bit_is_set(self.in0, self.roundNum)
leftVal = self.layer.compute_v_final.outputs_fact[0][self.in0]
valForTwo = self.gatefn(leftVal, self.accum_in1)
else:
isOneVal = util.bit_is_set(self.in1, self.roundNum - self.layer.nInBits)
rightVal = self.layer.compute_v_final.outputs_fact[0][self.in1]
valForTwo = self.gatefn(self.accum_in0, rightVal)
# evaluate addmul at third point
valForTwo *= util.third_eval_point(self.accum_z1, isOneVal)
valForTwo %= Defs.prime
# produce outputs for 0, 1, 2
out = [0, 0, valForTwo]
valForZeroOne = self.accum_z1 * self.gatefn(self.accum_in0, self.accum_in1)
valForZeroOne %= Defs.prime
if isOneVal:
out[1] = valForZeroOne
else:
out[0] = valForZeroOne
if self.rec:
self.rec.did_mul(3)
if not isOneVal:
self.rec.did_add()
self.output = out
# update values internal to this gate prover upon receiving a new tau value from V
def next_round(self, val):
# early rounds: no gate-internal state
if self.isEarly:
return
if self.roundNum >= 2 * self.layer.nInBits:
# no changes after the first 2 * g' rounds
return
# figure out how to update GateProver's state this round
isOneVal = False
if self.roundNum < self.layer.nInBits:
### updating omega_1 value
# first, figure out how to update wiring predicate
isOneVal = util.bit_is_set(self.in0, self.roundNum)
# second, update appropriate V value
if self.roundNum < self.layer.nInBits - 1:
self.accum_in0 = self.layer.compute_v_final.outputs[self.in0]
else:
self.accum_in0 = self.layer.compute_v_final.prevPassValue
else:
### updating omega_2 value
# first, figure out how to update wiring predicate
isOneVal = util.bit_is_set(self.in1, self.roundNum - self.layer.nInBits)
# second, update appropriate V value
if self.roundNum < 2 * self.layer.nInBits - 1:
self.accum_in1 = self.layer.compute_v_final.outputs[self.in1]
else:
self.accum_in1 = self.layer.compute_v_final.prevPassValue
self.accum_z1 *= val if isOneVal else (1 - val)
self.accum_z1 %= Defs.prime
if self.rec:
self.rec.did_mul()
if not isOneVal:
self.rec.did_add()
self.roundNum += 1
def gatefn(self, x, y):
self.costfn(self.rec)
return self.gatefn_(x, y)
@staticmethod
def costfn_(_): # pylint: disable=unused-argument
pass
@staticmethod
def gatefn_(_, __): # pylint: disable=unused-argument
assert False
class _FirstOrderGateProver(_GateProver):
pass
class _SecondOrderGateProver(_GateProver):
pass
class MulGateProver(_SecondOrderGateProver):
gate_type = "mul"
gate_type_idx = 0
cgate = ac.CMulGate
@staticmethod
def gatefn_(x, y):
return (x * y) % Defs.prime
@staticmethod
def costfn_(rec):
rec.did_mul()
@staticmethod
def v_com_gatefn_(_, __, ___, xy, ____): # pylint: disable=unused-argument
return xy
@staticmethod
def p_com_gatefn_(_, __, xy, ___): # pylint: disable=unused-argument
return xy
@staticmethod
def pv_com_gatefn_(val, j, xyzvals, rec):
xyzvals[2] += val * j
xyzvals[2] %= Defs.prime
if rec is not None:
rec.did_mul()
rec.did_add()
class AddGateProver(_FirstOrderGateProver):
gate_type = "add"
gate_type_idx = 1
cgate = ac.CAddGate
@staticmethod
def gatefn_(x, y):
return (x + y) % Defs.prime
@staticmethod
def costfn_(rec):
rec.did_add()
@staticmethod
def v_com_gatefn_(gops, x, y, _, rec):
if rec is not None:
rec.did_mul()
return gops.mul(x, y)
@staticmethod
def p_com_gatefn_(x, y, _, rec): # pylint: disable=unused-argument
if rec is not None:
AddGateProver.costfn_(rec)
return AddGateProver.gatefn_(x, y)
@staticmethod
def pv_com_gatefn_(val, j, xyzvals, rec):
jval = (j * val) % Defs.prime
xyzvals[0] += jval
xyzvals[0] %= Defs.prime
xyzvals[1] += jval
xyzvals[1] %= Defs.prime
if rec is not None:
rec.did_mul()
rec.did_add(2)
class SubGateProver(_FirstOrderGateProver):
gate_type = "sub"
gate_type_idx = 2
cgate = ac.CSubGate
@staticmethod
def gatefn_(x, y):
return (x - y) % Defs.prime
@staticmethod
def costfn_(rec):
rec.did_sub()
@staticmethod
def v_com_gatefn_(gops, x, y, _, rec):
if rec:
rec.did_inv()
rec.did_mul()
return gops.div(x, y)
@staticmethod
def p_com_gatefn_(x, y, _, rec): # pylint: disable=unused-argument
if rec is not None:
SubGateProver.costfn_(rec)
return SubGateProver.gatefn_(x, y)
@staticmethod
def pv_com_gatefn_(val, j, xyzvals, rec):
jval = (j * val) % Defs.prime
xyzvals[0] += jval
xyzvals[0] %= Defs.prime
xyzvals[1] -= jval
xyzvals[1] %= Defs.prime
if rec is not None:
rec.did_mul()
rec.did_add()
rec.did_sub()
class MuxGateProver(_FirstOrderGateProver):
gate_type = "mux"
# NOTE 3 and 4 are muxL and muxR, respectively
gate_type_idx = 3
cgate = ac.CMuxGate
@staticmethod
def gatefn_(x, y, bit):
if bit:
return y
return x
def gatefn(self, x, y):
bit = self.layer.circuit.muxbits[self.muxbit]
return self.gatefn_(bit, x, y)
@staticmethod
def gatefn_left_(x, _): # pylint: disable=unused-argument
return x
@staticmethod
def v_com_gatefn_left_(_, x, __, ___, ____): # pylint: disable=unused-argument
return x
@staticmethod
def p_com_gatefn_left_(x, _, __, ___): # pylint: disable=unused-argument
return x
@staticmethod
def gatefn_right_(_, y): # pylint: disable=unused-argument
return y
@staticmethod
def v_com_gatefn_right_(_, __, y, ___, ____): # pylint: disable=unused-argument
return y
@staticmethod
def p_com_gatefn_right_(_, y, __, ___): # pylint: disable=unused-argument
return y
@staticmethod
def pv_com_gatefn_left_(val, j, xyzvals, rec):
xyzvals[0] += j * val
xyzvals[0] %= Defs.prime
if rec is not None:
rec.did_mul()
rec.did_add()
@staticmethod
def pv_com_gatefn_right_(val, j, xyzvals, rec):
xyzvals[1] += j * val
xyzvals[1] %= Defs.prime
if rec is not None:
rec.did_mul()
rec.did_add()
class OrGateProver(_SecondOrderGateProver):
gate_type = "or"
gate_type_idx = 5
cgate = ac.COrGate
@staticmethod
def gatefn_(x, y):
return (x + y - x * y) % Defs.prime
@staticmethod
def costfn_(rec):
rec.did_add()
rec.did_mul()
rec.did_sub()
@staticmethod
def v_com_gatefn_(gops, x, y, xy, rec):
if rec:
rec.did_mul(2)
rec.did_inv()
return gops.div(gops.mul(x, y), xy)
@staticmethod
def p_com_gatefn_(x, y, xy, rec):
if rec is not None:
rec.did_add()
rec.did_sub()
return (x + y - xy) % Defs.prime
@staticmethod
def pv_com_gatefn_(val, j, xyzvals, rec):
jval = (j * val) % Defs.prime
xyzvals[0] += jval
xyzvals[0] %= Defs.prime
xyzvals[1] += jval
xyzvals[1] %= Defs.prime
xyzvals[2] -= jval
xyzvals[2] %= Defs.prime
if rec is not None:
rec.did_mul()
rec.did_add(2)
rec.did_sub()
class XorGateProver(_SecondOrderGateProver):
gate_type = "xor"
gate_type_idx = 6
cgate = ac.CXorGate
@staticmethod
def gatefn_(x, y):
return (x + y - 2 * x * y) % Defs.prime
@staticmethod
def costfn_(rec):
rec.did_add(2)
rec.did_sub()
rec.did_mul()
@staticmethod
def v_com_gatefn_(gops, x, y, xy, rec):
if rec:
rec.did_mul(3)
rec.did_inv()
return gops.div(gops.mul(x, y), gops.sqr(xy))
@staticmethod
def p_com_gatefn_(x, y, xy, rec):
if rec is not None:
rec.did_add(2)
rec.did_sub()
return (x + y - 2 * xy) % Defs.prime
@staticmethod
def pv_com_gatefn_(val, j, xyzvals, rec):
jval = (j * val) % Defs.prime
xyzvals[0] += jval
xyzvals[0] %= Defs.prime
xyzvals[1] += jval
xyzvals[1] %= Defs.prime
xyzvals[2] -= 2 * jval
xyzvals[2] %= Defs.prime
if rec is not None:
rec.did_mul()
rec.did_add(3)
rec.did_sub()
class NotGateProver(_FirstOrderGateProver):
gate_type = "not"
gate_type_idx = 7
cgate = ac.CNotGate
@staticmethod
def gatefn_(x, _):
return (1 - x) % Defs.prime
@staticmethod
def costfn_(rec):
rec.did_add()
@staticmethod
def v_com_gatefn_(gops, x, __, ___, rec): # pylint: disable=unused-argument
if rec:
rec.did_mul()
rec.did_inv()
return gops.div(gops.gh, x)
@staticmethod
def p_com_gatefn_(x, _, __, rec): # pylint: disable=unused-argument
if rec is not None:
NotGateProver.costfn_(rec)
return NotGateProver.gatefn_(x, None)
@staticmethod
def pv_com_gatefn_(val, j, xyzvals, rec):
jval = (j * val) % Defs.prime
xyzvals[0] -= jval
xyzvals[0] %= Defs.prime
xyzvals[3] += jval
xyzvals[3] %= Defs.prime
if rec is not None:
rec.did_mul()
rec.did_add()
rec.did_sub()
class NandGateProver(_SecondOrderGateProver):
gate_type = "nand"
gate_type_idx = 8
cgate = ac.CNandGate
@staticmethod
def gatefn_(x, y):
return (1 - x * y) % Defs.prime
@staticmethod
def costfn_(rec):
rec.did_add()
rec.did_mul()
@staticmethod
def v_com_gatefn_(gops, _, __, xy, rec): # pylint: disable=unused-argument
if rec:
rec.did_mul()
rec.did_inv()
return gops.div(gops.gh, xy)
@staticmethod
def p_com_gatefn_(_, __, xy, rec): # pylint: disable=unused-argument
if rec is not None:
rec.did_add()
return (1 - xy) % Defs.prime
@staticmethod
def pv_com_gatefn_(val, j, xyzvals, rec):
jval = (j * val) % Defs.prime
xyzvals[2] -= jval
xyzvals[2] %= Defs.prime
xyzvals[3] += jval
xyzvals[3] %= Defs.prime
if rec is not None:
rec.did_mul()
rec.did_add()
rec.did_sub()
class NorGateProver(_SecondOrderGateProver):
gate_type = "nor"
gate_type_idx = 9
cgate = ac.CNorGate
@staticmethod
def gatefn_(x, y):
return (1 + x * y - x - y) % Defs.prime
@staticmethod
def costfn_(rec):
rec.did_add(2)
rec.did_sub()
rec.did_mul()
@staticmethod
def v_com_gatefn_(gops, x, y, xy, rec):
if rec:
rec.did_mul(3)
rec.did_inv()
return gops.div(gops.mul(gops.gh, xy), gops.mul(x, y))
@staticmethod
def p_com_gatefn_(x, y, xy, rec):
if rec is not None:
rec.did_add(2)
rec.did_sub()
return (1 + xy - x - y) % Defs.prime
@staticmethod
def pv_com_gatefn_(val, j, xyzvals, rec):
jval = (j * val) % Defs.prime
xyzvals[0] -= jval
xyzvals[0] %= Defs.prime
xyzvals[1] -= jval
xyzvals[1] %= Defs.prime
xyzvals[2] += jval
xyzvals[2] %= Defs.prime
xyzvals[3] += jval
xyzvals[3] %= Defs.prime
if rec is not None:
rec.did_mul()
rec.did_add(2)
rec.did_sub(2)
class NxorGateProver(_SecondOrderGateProver):
gate_type = "nxor"
gate_type_idx = 10
cgate = ac.CNxorGate
@staticmethod
def gatefn_(x, y):
return (1 + 2 * x * y - x - y) % Defs.prime
@staticmethod
def costfn_(rec):
rec.did_add(3)
rec.did_mul()
rec.did_sub()
@staticmethod
def v_com_gatefn_(gops, x, y, xy, rec):
if rec:
rec.did_mul(4)
rec.did_inv()
return gops.div(gops.mul(gops.gh, gops.sqr(xy)), gops.mul(x, y))
@staticmethod
def p_com_gatefn_(x, y, xy, rec):
if rec is not None:
rec.did_add(3)
rec.did_sub()
return (1 + 2 * xy - x - y) % Defs.prime
@staticmethod
def pv_com_gatefn_(val, j, xyzvals, rec):
jval = (j * val) % Defs.prime
xyzvals[0] -= jval
xyzvals[0] %= Defs.prime
xyzvals[1] -= jval
xyzvals[1] %= Defs.prime
xyzvals[2] += 2 * jval
xyzvals[2] %= Defs.prime
xyzvals[3] += jval
xyzvals[3] %= Defs.prime
if rec is not None:
rec.did_mul()
rec.did_add(3)
rec.did_sub(2)
class NaabGateProver(_SecondOrderGateProver):
gate_type = "naab"
gate_type_idx = 11
cgate = ac.CNaabGate
@staticmethod
def gatefn_(x, y):
return ((1 - x) * y) % Defs.prime
@staticmethod
def costfn_(rec):
rec.did_add()
rec.did_mul()
@staticmethod
def v_com_gatefn_(gops, _, y, xy, rec): # pylint: disable=unused-argument
if rec:
rec.did_mul()
rec.did_inv()
return gops.div(y, xy)
@staticmethod
def p_com_gatefn_(_, y, xy, rec): # pylint: disable=unused-argument
if rec is not None:
rec.did_sub()
return (y - xy) % Defs.prime
@staticmethod
def pv_com_gatefn_(val, j, xyzvals, rec):
jval = (j * val) % Defs.prime
xyzvals[1] += jval
xyzvals[1] %= Defs.prime
xyzvals[2] -= jval
xyzvals[2] %= Defs.prime
if rec is not None:
rec.did_mul()
rec.did_add()
rec.did_sub()
class PassGateProver(_FirstOrderGateProver):
gate_type = "pass"
gate_type_idx = 12
cgate = ac.CPassGate
@staticmethod
def gatefn_(x, _):
return x
@staticmethod
def v_com_gatefn_(_, x, __, ___, ____): # pylint: disable=unused-argument
return x
@staticmethod
def p_com_gatefn_(x, _, __, ___): # pylint: disable=unused-argument
return x
@staticmethod
def pv_com_gatefn_(val, j, xyzvals, rec): # pylint: disable=unused-argument
xyzvals[0] += val * j
xyzvals[2] %= Defs.prime
if rec is not None:
rec.did_mul()
rec.did_add()
# magic so that GateFunction is statically indexable
class GateFunctionsMeta(type):
def __getitem__(cls, idx):
return cls._gatemethods[idx]
def __len__(cls):
return len(cls._gatemethods)
class GateFunctions(object):
__metaclass__ = GateFunctionsMeta
_gatemethods = [ MulGateProver.gatefn_
, AddGateProver.gatefn_
, SubGateProver.gatefn_
, MuxGateProver.gatefn_left_
, MuxGateProver.gatefn_right_
, OrGateProver.gatefn_
, XorGateProver.gatefn_
, NotGateProver.gatefn_
, NandGateProver.gatefn_
, NorGateProver.gatefn_
, NxorGateProver.gatefn_
, NaabGateProver.gatefn_
, PassGateProver.gatefn_
]
class GateFunctionsPC(object):
__metaclass__ = GateFunctionsMeta
_gatemethods = [ MulGateProver.p_com_gatefn_
, AddGateProver.p_com_gatefn_
, SubGateProver.p_com_gatefn_
, MuxGateProver.p_com_gatefn_left_
, MuxGateProver.p_com_gatefn_right_
, OrGateProver.p_com_gatefn_
, XorGateProver.p_com_gatefn_
, NotGateProver.p_com_gatefn_
, NandGateProver.p_com_gatefn_
, NorGateProver.p_com_gatefn_
, NxorGateProver.p_com_gatefn_
, NaabGateProver.p_com_gatefn_
, PassGateProver.p_com_gatefn_
]
class GateFunctionsPVC(object):
__metaclass__ = GateFunctionsMeta
_gatemethods = [ MulGateProver.pv_com_gatefn_
, AddGateProver.pv_com_gatefn_
, SubGateProver.pv_com_gatefn_
, MuxGateProver.pv_com_gatefn_left_
, MuxGateProver.pv_com_gatefn_right_
, OrGateProver.pv_com_gatefn_
, XorGateProver.pv_com_gatefn_
, NotGateProver.pv_com_gatefn_
, NandGateProver.pv_com_gatefn_
, NorGateProver.pv_com_gatefn_
, NxorGateProver.pv_com_gatefn_
, NaabGateProver.pv_com_gatefn_
, PassGateProver.pv_com_gatefn_
]
class GateFunctionsVC(object):
__metaclass__ = GateFunctionsMeta
_gatemethods = [ MulGateProver.v_com_gatefn_
, AddGateProver.v_com_gatefn_
, SubGateProver.v_com_gatefn_
, MuxGateProver.v_com_gatefn_left_
, MuxGateProver.v_com_gatefn_right_
, OrGateProver.v_com_gatefn_
, XorGateProver.v_com_gatefn_
, NotGateProver.v_com_gatefn_
, NandGateProver.v_com_gatefn_
, NorGateProver.v_com_gatefn_
, NxorGateProver.v_com_gatefn_
, NaabGateProver.v_com_gatefn_
, PassGateProver.v_com_gatefn_
]
| StarcoderdataPython |
1697834 | <filename>tests/test_envtool.py<gh_stars>1-10
import os.path
# from sys import version_info
# if version_info[0] == 2:
# from mock import patch, mock_open, call
# else:
# from unittest.mock import patch, mock_open, call
from pytest import fail
from py.path import local
from click.testing import CliRunner
import envtool
def _fixture(name):
return os.path.join(os.path.dirname(__file__), 'fixtures', name)
def test_envfile_to_dict():
assert envtool.envfile_to_dict(_fixture('basic_envfile')) == {'A': 'abcde', 'B': 'def'}
def test_parse_envfile_contents():
assert envtool.parse_envfile_contents("""
# Comment
a=b
""") == {'a': 'b'}
def test_parse_invalid_envfile_contents():
try:
envtool.parse_envfile_contents("""
a
""")
fail()
except IOError:
assert True
def test_envdir_to_dict():
res = envtool.envdir_to_dict(_fixture('basic_envdir'))
assert res == {'A': 'abcde', 'B': 'def'}
def test_dict_to_envdir(tmpdir):
output = tmpdir.join('saved_envdir')
envtool.dict_to_envdir({'A': 'secret', 'B': 'plethora'}, str(output))
assert output.join('A').read() == 'secret'
assert output.join('B').read() == 'plethora'
def test_dict_to_envdir_preexisting(tmpdir):
output = tmpdir.join('saved_envdir')
output.mkdir()
envtool.dict_to_envdir({'A': 'secret', 'B': 'plethora'}, str(output))
assert output.join('A').read() == 'secret'
assert output.join('B').read() == 'plethora'
def test_dict_to_envfile(tmpdir):
output = tmpdir.join('saved_envfile')
envtool.dict_to_envfile({'A': 'secret', 'B': 'plethora'}, str(output))
assert output.read() == """A=secret
B=plethora
"""
def test_convert_to_envfile(tmpdir):
output = tmpdir.join('converted_envfile')
envtool.convert_to_envfile(_fixture('basic_envdir'), str(output))
assert output.read() == """A=abcde
B=def
"""
def test_convert_to_envdir(tmpdir):
output = tmpdir.join('converted_envdir')
envtool.convert_to_envdir(_fixture('basic_envfile'), str(output))
assert output.join('A').read() == 'abcde'
assert output.join('B').read() == 'def'
def test_parse_missing_file(tmpdir):
output = tmpdir.join('non-existent')
assert not output.check()
try:
envtool.parse_env(str(output))
assert False, "Parsing a missing env should fail with IOError"
except IOError:
assert True
def test_cli_envdir():
runner = CliRunner()
with runner.isolated_filesystem():
output = local('cli_output')
result = runner.invoke(envtool.main, ['convert', _fixture('basic_envfile'), str('cli_output')])
assert result.exit_code == 0
assert output.join('A').read() == 'abcde'
assert output.join('B').read() == 'def'
def test_cli_envfile():
runner = CliRunner()
with runner.isolated_filesystem():
output = local('cli_output')
result = runner.invoke(envtool.main, ['convert', _fixture('basic_envdir'), str('cli_output')])
assert result.exit_code == 0
assert output.read() == """A=abcde
B=def
"""
def test_cli_missing_source():
runner = CliRunner()
with runner.isolated_filesystem():
non_existent = local('non-existent')
assert not non_existent.check()
result = runner.invoke(envtool.main, ['convert', str(non_existent), 'cli_output'])
assert result.exit_code == 2
def test_cli_incorrect_param():
runner = CliRunner()
with runner.isolated_filesystem():
src = local('src')
dest = local('dest')
src.mkdir()
dest.mkdir()
result = runner.invoke(envtool.main, ['convert', str(src), str(dest)])
assert result.exit_code == -1
if __name__ == '__main__':
pytest.main()
| StarcoderdataPython |
3345740 | # coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Explore hparams on a single machine."""
import time
from typing import Any, Callable, Mapping, MutableMapping, Sequence, Tuple
import gin
from learned_optimization.population import population
import numpy as onp
BranchingState = Mapping[str, Any]
@gin.configurable
class BranchingSingleMachine(population.Mutate):
r"""Explore hparams on a single machine!
This is a simple statemachine based mutator.
First, we perturb a hparam in some direction (governed by `mutate_fn`) and try
training in this direction for `explore_steps`.
Once done, we reset, and explore the opposite direction (also governed by
`mutate_fn`) for explore_steps`.
Once done, we select the best direction, reset to the end of that
corresponding explore phase, and continue training for `exploit\_steps`.
This process then repeats.
"""
# this is a simple state machine.
def __init__(self, mutate_fn: Callable[[Any, str, int], Any],
exploit_steps: int, explore_steps: int):
"""Initializer.
Args:
mutate_fn: A deterministic function mapping from hyper parameters, phase
(either "pos" or "neg"), and phase_indx -- or the number of previous
branchings. This should return a new hyper-parameter value.
exploit_steps: number of steps in exploit phase
explore_steps: number of steps in explore phase
"""
self._mutate_fn = mutate_fn
self._exploit_steps = exploit_steps
self._explore_steps = explore_steps
def init(self) -> BranchingState:
return {
"neg": None,
"pos": None,
"center": None,
"center_meta_params": None,
"branch_checkpoint": None,
"start_params": None,
"start_exploit": 0,
"phase": "explore_center",
"phase_idx": 0,
}
def update(
self, state: BranchingState,
current_workers: Sequence[population.ActiveWorker],
cache: MutableMapping[population.GenerationID,
MutableMapping[int, population.Checkpoint]]
) -> Tuple[BranchingState, Sequence[population.ActiveWorker]]:
# copy dict to make pytype happy
state = {**state} # type: MutableMapping[str, Any]
assert len(current_workers) == 1
worker = current_workers[0]
steps = cache[worker.generation_id]
if not steps:
return state, current_workers
def add_worker_to_cache(from_checkpoint: population.Checkpoint,
worker: population.ActiveWorker):
"""Helper function to add a new checkpoint to the cache."""
checkpoint = population.Checkpoint(
generation_id=worker.generation_id,
params=worker.params,
meta_params=worker.meta_params,
parent=(from_checkpoint.generation_id, from_checkpoint.step),
step=worker.step,
value=None,
time=time.time(),
)
if worker.generation_id not in cache:
cache[worker.generation_id] = population.IntKeyDict()
cache[worker.generation_id][worker.step] = checkpoint
if state["branch_checkpoint"] is None:
state["branch_checkpoint"] = steps[0]
state["center"] = steps[0].generation_id
last_checkpoint = steps.values()[-1]
if state["phase"] == "exploit":
# switch to center.
if last_checkpoint.step - state["start_exploit"] >= self._exploit_steps:
meta_params = last_checkpoint.meta_params
genid = population.make_gen_id()
next_workers = [
population.ActiveWorker(last_checkpoint.params, meta_params, genid,
last_checkpoint.step)
]
state["branch_checkpoint"] = last_checkpoint
state["center"] = genid
state["phase"] = "explore_center"
add_worker_to_cache(state["branch_checkpoint"], next_workers[0])
return state, next_workers
else:
return state, current_workers
else:
should_switch = last_checkpoint.step - state[
"branch_checkpoint"].step >= self._explore_steps
if should_switch:
segment = state["phase"].split("_")[-1]
if segment == "center":
# next state is neg
genid = population.make_gen_id()
state["neg"] = genid
state["phase"] = "explore_neg"
meta_params = state["branch_checkpoint"].meta_params
meta_params = self._mutate_fn(meta_params, "pos", state["phase_idx"])
next_workers = [
population.ActiveWorker(state["branch_checkpoint"].params,
meta_params, genid,
state["branch_checkpoint"].step)
]
add_worker_to_cache(state["branch_checkpoint"], next_workers[0])
return state, next_workers
elif segment == "neg":
# next state is pos
genid = population.make_gen_id()
state["pos"] = genid
state["phase"] = "explore_pos"
meta_params = state["branch_checkpoint"].meta_params
meta_params = self._mutate_fn(meta_params, "neg", state["phase_idx"])
next_workers = [
population.ActiveWorker(state["branch_checkpoint"].params,
meta_params, genid,
state["branch_checkpoint"].step)
]
add_worker_to_cache(state["branch_checkpoint"], next_workers[0])
return state, next_workers
# next state is exploit
elif segment == "pos":
take_values_from = state[
"branch_checkpoint"].step + self._explore_steps
center_steps = cache[state["center"]]
neg_steps = cache[state["neg"]]
pos_steps = cache[state["pos"]]
state["center"] = None
state["neg"] = None
state["pos"] = None
state["start_exploit"] = last_checkpoint.step
state["phase"] = "exploit"
state["phase_idx"] += 1
if take_values_from not in center_steps:
raise ValueError(
f"The eval @ step {take_values_from} not there for center? \n {center_steps}"
)
if take_values_from not in neg_steps:
raise ValueError(
f"The eval @ step {take_values_from} not there for neg? \n {neg_steps}"
)
if take_values_from not in pos_steps:
raise ValueError(
f"The eval @ step {take_values_from} not there for pos? \n {pos_steps}"
)
center_score = center_steps[take_values_from].value
neg_score = neg_steps[take_values_from].value
pos_score = pos_steps[take_values_from].value
scores = [center_score, neg_score, pos_score]
idx = onp.nanargmin(scores)
best_checkpoint = [center_steps, neg_steps,
pos_steps][idx].values()[-1]
meta_params = best_checkpoint.meta_params
genid = population.make_gen_id()
next_workers = [
population.ActiveWorker(best_checkpoint.params, meta_params,
genid, best_checkpoint.step)
]
add_worker_to_cache(best_checkpoint, next_workers[0])
return state, next_workers
else:
raise ValueError(f"unknown phase {state['phase']}")
else:
return state, current_workers
| StarcoderdataPython |
8032680 | import sys
weights = {}
def wRec(name):
if (weights[name]['sumW'] == 0):
ch = set()
weights[name]['sumW'] = weights[name]['w']
for n in weights[name]['child']:
nw = wRec(n)
ch.add(nw)
weights[name]['sumW'] += nw
if (len(ch)>1):
for c in weights[name]['child']:
print(c, weights[c]['w'], weights[c]['sumW'])
sys.exit()
return weights[name]['sumW']
with open('Day7_input') as f:
l = [x.strip() for x in f.readlines()]
for x in l:
a = x.split()
weights[a[0]] = {'w': int(a[1][1:a[1].index(')')]), 'child': [], 'sumW': 0}
if (len(a) > 2):
weights[a[0]]['child'] = [s.strip(',') for s in a[3:]]
for d in weights:
wRec(d)
| StarcoderdataPython |
3368804 | import os
import tensorflow as tf
import math
import numpy as np
import pandas as pd
import itertools
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.animation as animation
from matplotlib.lines import Line2D
import matplotlib.patheffects as path_effects
import matplotlib as mpl
import cv2
import glob
from scipy.interpolate import CubicSpline
import scipy.interpolate
from scipy import signal
import scipy.stats as stats
import seaborn as sns
from sklearn.linear_model import LinearRegression
from gekko import GEKKO
import pywt
# import waymo dataset related modules
from waymo_open_dataset import dataset_pb2 as open_dataset
from waymo_open_dataset.utils import range_image_utils
from waymo_open_dataset.utils import transform_utils
from waymo_open_dataset.utils import frame_utils
from tqdm import tqdm
from waymo_open_dataset.protos.scenario_pb2 import Scenario
import time
def veh_trj_collect(scenario, file_index, scenario_label):
global turn_left_scnerio_veh_list
single_segment_all_scenario = []
time_stamp_all = scenario.timestamps_seconds # 整个时间片段的列表
single_scenario_AV_index = scenario.sdc_track_index # 单个场景下的自动驾驶车辆的ID
tracks_all = scenario.tracks
tracks_label = 0
object_of_interest = scenario.objects_of_interest
# print(scenario_label)
for single_track in tracks_all: #一辆车
turn_left_scnerio_veh_dict = {}
state_index = 0
tracks_label += 1
heading_one_veh_one_scenario = []
for single_state in single_track.states: #一辆车的一个时刻
single_scenario_single_track = {} # 单个场景下一辆车的所有轨迹信息,包含整个9s
single_scenario_single_track['segment_index'] = file_index
single_scenario_single_track['scenario_label'] = scenario_label
single_scenario_single_track['tracks_label'] = tracks_label
single_scenario_single_track['obj_id'] = single_track.id
single_scenario_single_track['obj_type'] = single_track.object_type
# TYPE_UNSET = 0;
# TYPE_VEHICLE = 1;
# TYPE_PEDESTRIAN = 2;
# TYPE_CYCLIST = 3;
# TYPE_OTHER = 4;
if single_track.id == single_scenario_AV_index: # 判断是否为AV
single_scenario_single_track['is_AV'] = 1
else:
single_scenario_single_track['is_AV'] = 0
if single_track.id in object_of_interest:
single_scenario_single_track['is_interest'] = 1
else:
single_scenario_single_track['is_interest'] = 0
try:
single_scenario_single_track['time_stamp'] = time_stamp_all[state_index]
except:
continue
# single_scenario_single_track['dynamic_map_states'] = scenario.dynamic_map_states[state_index] #动态地图信息状态,大多数为空
single_scenario_single_track['frame_label'] = state_index + 1
single_scenario_single_track['valid'] = single_state.valid
if single_state.valid == True:
single_scenario_single_track['center_x'] = single_state.center_x
single_scenario_single_track['center_y'] = single_state.center_y
single_scenario_single_track['center_z'] = single_state.center_z
single_scenario_single_track['length'] = single_state.length
single_scenario_single_track['width'] = single_state.width
single_scenario_single_track['height'] = single_state.height
single_scenario_single_track['heading'] = single_state.heading
single_scenario_single_track['velocity_x'] = single_state.velocity_x
single_scenario_single_track['velocity_y'] = single_state.velocity_y
heading_one_veh_one_scenario.append(float(single_state.heading) * 180 / np.pi)
state_index += 1
single_segment_all_scenario.append(single_scenario_single_track)
try:
range_heading = max(heading_one_veh_one_scenario) - min(heading_one_veh_one_scenario)
if range_heading > 80:
turn_left_scnerio_veh_dict['file_index'] = file_index
turn_left_scnerio_veh_dict['scenario_index'] = scenario_label
turn_left_scnerio_veh_dict['obj_id'] = single_track.id
turn_left_scnerio_veh_dict['obj_type'] = single_track.object_type
turn_left_scnerio_veh_dict['heading_range'] = range_heading
turn_left_scnerio_veh_list.append(turn_left_scnerio_veh_dict)
except:
continue
return single_segment_all_scenario
# Generate visualization images.
def create_figure_and_axes(size_pixels):
"""Initializes a unique figure and axes for plotting."""
fig, ax = plt.subplots(1, 1, num=uuid.uuid4())
# Sets output image to pixel resolution.
dpi = 100
size_inches = size_pixels / dpi
fig.set_size_inches([size_inches, size_inches])
fig.set_dpi(dpi)
fig.set_facecolor('white')
ax.set_facecolor('white')
ax.xaxis.label.set_color('black')
ax.tick_params(axis='x', colors='black')
ax.yaxis.label.set_color('black')
ax.tick_params(axis='y', colors='black')
fig.set_tight_layout(True)
ax.grid(False)
return fig, ax
def plot_top_view_single_pic_map(trj_in, scenario_id_in, frame_id_in,scenario):
plt.figure(figsize=(10, 7))
plt.figure()
plt.xlabel('global center x (m)', fontsize=10)
plt.ylabel('global center y (m)', fontsize=10)
plt.axis('square')
plt.xlim([trj_in['center_x'].min() - 1, trj_in['center_x'].max() + 1])
plt.ylim([trj_in['center_y'].min() - 1, trj_in['center_y'].max() + 1])
title_name = 'Scenario ' + str(scenario_id_in)
plt.title(title_name, loc='left')
plt.xticks(
np.arange(round(float(trj_in['center_x'].min())), round(float(trj_in['center_x'].max())), 20),
fontsize=5)
plt.yticks(
np.arange(round(float(trj_in['center_y'].min())), round(float(trj_in['center_y'].max())), 20),
fontsize=5)
ax = plt.gca()
map_features = scenario.map_features
for single_feature in map_features:
id_ = single_feature.id
if single_feature.road_edge:
single_line_x = []
single_line_y = []
# print("road_edge id is %d"%single_feature.id)
for polyline in single_feature.road_edge.polyline:
single_line_x.append(polyline.x)
single_line_y.append(polyline.y)
ax.plot(single_line_x, single_line_y, color='black', linewidth=1) # 道路边界为黑色
if single_feature.lane:
single_line_x = []
single_line_y = []
for polyline in single_feature.lane.polyline:
single_line_x.append(polyline.x)
single_line_y.append(polyline.y)
ax.plot(single_line_x, single_line_y, color='blue', linewidth=0.5) # 道路中心线为蓝色
if single_feature.road_line:
single_line_x = []
single_line_y = []
for polyline in single_feature.road_line.polyline:
single_line_x.append(polyline.x)
single_line_y.append(polyline.y)
ax.plot(single_line_x, single_line_y, color='black', linestyle='-', linewidth=0.3) # 道路标线为 虚线
#trj_in['center_x'] = trj_in['center_x'] - trj_in['center_x'].min()
#trj_in['center_y'] = trj_in['center_y'] - trj_in['center_y'].min()
unique_veh_id = pd.unique(trj_in['obj_id'])
for single_veh_id in unique_veh_id:
single_veh_trj = trj_in[trj_in['obj_id'] == single_veh_id]
single_veh_trj = single_veh_trj[single_veh_trj['frame_label'] == frame_id_in]
# print(single_veh_trj)
if len(single_veh_trj) > 0 and single_veh_trj['valid'].iloc[0] == True:
ts = ax.transData
coords = [single_veh_trj['center_x'].iloc[0], single_veh_trj['center_y'].iloc[0]]
if single_veh_trj['is_AV'].iloc[0] == 1:
temp_facecolor = 'black'
temp_alpha = 0.99
heading_angle = single_veh_trj['heading'].iloc[0] * 180 / np.pi
tr = mpl.transforms.Affine2D().rotate_deg_around(coords[0], coords[1], heading_angle)
else:
if single_veh_trj['is_interest'].iloc[0] == 1:
temp_facecolor = 'red' # 有交互行为的车辆变为红色
else:
if single_veh_trj['obj_type'].iloc[0] == 1:
temp_facecolor = 'blue'
elif single_veh_trj['obj_type'].iloc[0] == 2:
temp_facecolor = 'green'
else:
temp_facecolor = 'magenta'
temp_alpha = 0.5
heading_angle = single_veh_trj['heading'].iloc[0] * 180 / np.pi
# transform for other vehicles, note that the ego global heading should be added to current local heading
tr = mpl.transforms.Affine2D().rotate_deg_around(coords[0], coords[1], heading_angle)
t = tr + ts
# note that exact xy needs to to calculated
veh_length = single_veh_trj['length'].iloc[0]
veh_width = single_veh_trj['width'].iloc[0]
ax.add_patch(patches.Rectangle(
xy=(single_veh_trj['center_x'].iloc[0] - 0.5 * veh_length,
single_veh_trj['center_y'].iloc[0] - 0.5 * veh_width),
width=veh_length,
height=veh_width,
linewidth=0.1,
facecolor=temp_facecolor,
edgecolor='black',
alpha=temp_alpha,
transform=t))
# add vehicle local id for only vehicle object
if single_veh_trj['obj_type'].iloc[0] == 1:
temp_text = plt.text(single_veh_trj['center_x'].iloc[0],
single_veh_trj['center_y'].iloc[0], str(single_veh_id), style='italic',
weight='heavy', ha='center', va='center', color='white', rotation=heading_angle,
size=2.5)
temp_text.set_path_effects([path_effects.Stroke(linewidth=0.7, foreground='black'), path_effects.Normal()])
#plt.show()
fig_save_name = '../Result_save/figure_save/temp_top_view_figure/top_view_segment_' + '__' + 'scenario_' + str(
scenario_id_in) + '_frame_' + str(
frame_id_in) + '_trajectory.jpg'
plt.savefig(fig_save_name, dpi=300)
plt.close('all')
def top_view_video_generation(path_2, scenario_id_in):
# this function generates one top view video based on top view figures from one segment
img_array = []
for num in range(1, len(os.listdir('../Result_save/figure_save/temp_top_view_figure/')) + 1):
image_filename = '../Result_save/figure_save/temp_top_view_figure/' + 'top_view_segment_' + '__' + 'scenario_' + str(
scenario_id_in) + '_frame_' + str(num) + '_trajectory.jpg'
img = cv2.imread(image_filename)
height, width, layers = img.shape
size = (width, height)
img_array.append(img)
video_save_name = '../Result_save/figure_save/top_view_video/' + path_2 + '_scenario_' + str(scenario_id_in) + '.avi'
out = cv2.VideoWriter(video_save_name, cv2.VideoWriter_fourcc(*'DIVX'), 10, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
print('No %d top view video made success' % scenario_id_in)
# after making the video, delete all the frame jpgs
filelist = glob.glob(os.path.join('../Result_save/figure_save/temp_top_view_figure/', "*.jpg"))
for f in filelist:
os.remove(f)
def get_file_list(filepath):
all_files = sorted(glob.glob(filepath))
segs_name_all = []
segs_name_index = []
for file in all_files:
segment_name = os.path.basename(file)
segs_name_all.append(file)
segs_name_index.append(segment_name[-14:-9])
#print(segs_name_all)
print(segs_name_index)
return segs_name_all,segs_name_index
if __name__ == '__main__':
test_state = 0
# *********************** Stage 1 : collect tracks information from original dataset
# path_2_list = ['00043','00044','00045','00046','00047','00048','00049']
file_path_1 = 'D:/LJQ/'
files_path_2 = ['WaymoData_motion_3']
file_path_3 = '/training_20s.tfrecord-*-of-01000'
for file_path_2 in files_path_2:
filepath = file_path_1 + file_path_2 + file_path_3
all_file_list,file_index_list = get_file_list(filepath)
turn_left_scnerio_veh_list = [] #记录文件内所有segment 所有scenario 所有左转场景以及对应的左转车辆ID
for i in range(len(file_index_list)):
file_index = file_index_list[i]
segment_file = all_file_list[i]
print('Now is the file:%s' % file_index)
segment_name_list = []
interactive_behavior = pd.DataFrame(
columns=['id', 'time_segment', 'veh_1', 'veh_2']) # 记录所有的交互行为,记录交互的时间范围以及两辆车的id信息
segment_dataset = tf.data.TFRecordDataset(segment_file)
segment_dataset = segment_dataset.apply(tf.data.experimental.ignore_errors())
scenario_label = 0 # 所有场景的ID数量记录
scenario_valid_label = 0 # 包含交互行为的场景ID行为记录
single_segment_scenario_valid_dict = {} # 对于单个片段,所有有效的场景的车辆ID信息记录
all_segment_scenario_valid_dict = [] # 所有车辆
all_segment_all_scenario_all_object_info = []
for one_record in segment_dataset: # one_record 就是一个scenario
single_segment_all_scenario = [] # 单个片段的所有场景信息
scenario_label += 1
if test_state == 1:
if scenario_label == 3:
break
scenario = Scenario()
scenario.ParseFromString(one_record.numpy()) # 数据格式转化
# print(scenario.timestamps_seconds)
# data = open("D:/Data/WaymoData/test.txt", 'w+')
# print(scenario, file=data)
# data.close()
# break
if scenario.objects_of_interest != []: # 对包含交互行为的scenario 进行记录
scenario_valid_label += 1
single_segment_scenario_valid_dict['file_id'] = file_index
single_segment_scenario_valid_dict['scenario_valid_label'] = scenario_valid_label
single_segment_scenario_valid_dict['scenario_label'] = scenario_label
single_segment_scenario_valid_dict['scenario_id'] = scenario.scenario_id
single_segment_scenario_valid_dict['objects_of_interest'] = scenario.objects_of_interest # 一个列表,包含两个交互对象的ID
single_valid_scenario_time_range = str(scenario.timestamps_seconds[0]) + '-' + str(
scenario.timestamps_seconds[-1])
single_segment_scenario_valid_dict['time_range'] = single_valid_scenario_time_range
all_segment_scenario_valid_dict.append(single_segment_scenario_valid_dict)
single_segment_scenario_valid_dict = {}
# print(scenario.objects_of_interest)
# print(scenario.timestamps_seconds)
#print(scenario_label)
# -----------------trajectory extraction -----------------
single_segment_all_scenario = veh_trj_collect(scenario, file_index, scenario_label) # 返回单个scenario的所有场景信息
all_segment_all_scenario_all_object_info += single_segment_all_scenario
# ********************Stage 2: visulization of information **********************
'''
filelist = glob.glob(os.path.join('../Result_save/figure_save/temp_top_view_figure/', '*.jpg'))
for f in filelist:
os.remove(f)
seg_trj = pd.DataFrame(single_segment_all_scenario) #不包含地图信息
single_seg_all_scenario_id = pd.unique(seg_trj['scenario_label'])
for i in tqdm(range(len(single_seg_all_scenario_id))): # 一个scenario 生成一个video
single_scenario_id = single_seg_all_scenario_id[i]
scenario_trj = seg_trj[seg_trj['scenario_label'] == single_scenario_id]
scenario_print = 'Top view video now in scenario: ' + str(single_scenario_id)
print(scenario_print)
top_view_trj = scenario_trj
total_frame_num = scenario_trj['frame_label'].max()
for frame_id in range(1, total_frame_num + 1):
if test_state == 1:
if frame_id == 5:
break
plot_top_view_single_pic_map(top_view_trj, single_scenario_id, frame_id, scenario)
print('No.%d scenario fig has been made,now begin to generate top view viedo.' % single_scenario_id)
# ----------video generation------------
top_view_video_generation(file_index, single_scenario_id)
'''
#---------------保存数据----------------------
save_step = 100
if scenario_label % save_step == 0:
print('print')
filename = '../Result_save/data_save/all_scenario_all_objects_info/' + file_index + '_all_scenario_all_object_info' + '_' + str(
(scenario_label // save_step)) + '.csv'
all_segment_all_scenario_all_object_info_pd = pd.DataFrame(all_segment_all_scenario_all_object_info)
all_segment_all_scenario_all_object_info_pd.to_csv(filename, index=False)
all_segment_all_scenario_all_object_info = []
filename = '../Result_save/data_save/all_scenario_all_objects_info/' + file_index + '_all_scenario_all_object_info' + '_' + str(
(scenario_label // save_step) + 1) + '.csv'
all_segment_all_scenario_all_object_info_pd = pd.DataFrame(all_segment_all_scenario_all_object_info)
all_segment_all_scenario_all_object_info_pd.to_csv(filename, index=False)
all_segment_all_scenario_all_object_info = []
filename_2 = '../Result_save/data_save/objects_of_interest_info/' + file_index + '_all_scenario_objects_of_interest_info' + '.csv'
all_segment_scenario_valid_dict_pd = pd.DataFrame(all_segment_scenario_valid_dict)
all_segment_scenario_valid_dict_pd.to_csv(filename_2, index=False)
filename_3 = '../Result_save/data_save/' + file_path_2 + '_all_segment_all_scenario_turn_left_info' + '.csv'
turn_left_scnerio_veh_list_pd = pd.DataFrame(turn_left_scnerio_veh_list)
print(turn_left_scnerio_veh_list_pd)
turn_left_scnerio_veh_list_pd.to_csv(filename_3, index=False)
| StarcoderdataPython |
5009693 | <reponame>bjackman/millhouse
#!/usr/bin/env python
# Copyright 2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from setuptools import setup, find_packages
REQUIRES = [
'trappy', # TODO version?
'wrapt'
]
LONG_DESCRIPTION = "todo"
setup(name='millhouse',
version='0.0.1',
description='todo',
long_description=LONG_DESCRIPTION,
author='ARM-MILLHOUSE',
url='https://github.com/ARM-Software/millhouse',
packages=find_packages(),
classifiers=[
"Environment :: Console",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.7",
# As we depend on trace data from the Linux Kernel/FTrace
"Topic :: System :: Operating System Kernels :: Linux",
],
install_requires=REQUIRES)
| StarcoderdataPython |
9773877 | ##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd. and others
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import sys
from rubbos_collector import RubbosCollector
from uploader import Uploader
def printUsage():
print ("Usage: python process_data.py required_params(**)"
" optional_params([])")
print " ** -i|--input input_data_dir"
print " ** -s|--suite suite_name"
print " ** -c|--conf conf_file"
print " [] -o|--output output_file"
print " [] -u|--upload yes|no"
def process(input_dir, suite_name):
result = dict()
if suite_name == "rubbos":
result = RubbosCollector().collect_data(input_dir)
return result
def writeResult(output_file, result):
f = open(output_file, "w")
if isinstance(result, list):
for elem in result:
f.write(str(elem) + "\n")
f.close()
def uploadResult(conf, suite_name, result):
Uploader(conf).upload_result(suite_name, result)
def main():
if len(sys.argv) < 7 or len(sys.argv) % 2 == 0:
printUsage()
exit(1)
i = 1
params = dict()
while (i < len(sys.argv)):
if sys.argv[i] == "-i" or sys.argv[i] == "--input":
params["input"] = sys.argv[i + 1]
if sys.argv[i] == "-s" or sys.argv[i] == "--suite":
params["suite"] = sys.argv[i + 1]
if sys.argv[i] == "-c" or sys.argv[i] == "--conf":
params["conf"] = sys.argv[i + 1]
if sys.argv[i] == "-o" or sys.argv[i] == "--output":
params["output"] = sys.argv[i + 1]
if sys.argv[i] == "-u" or sys.argv[i] == "--upload":
params["upload"] = sys.argv[i + 1]
i = i + 2
if not("input" in params and "suite" in params and "conf" in params):
print "Lack some required parameters."
exit(1)
result = process(params["input"], params["suite"])
print "Results:"
for elem in result:
print elem
if "output" in params:
writeResult(params["output"], result)
if "upload" in params and params["upload"].lower() == "yes":
uploadResult(params["conf"], params["suite"], result)
if __name__ == "__main__":
main()
| StarcoderdataPython |
9741271 | <gh_stars>0
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.collections import PolyCollection
import numpy as np
filelist = [
"audio",
"hologram",
]
data = []
components = {}
comp_tag = 1
colormapping = {}
for file in filelist:
with open(file, 'r') as timelineFile:
name = file
line = timelineFile.readline()
while line:
start, end = line.split(',')
data.append((name, float(start), float(end)))
if name not in components:
components[name] = comp_tag
colormapping[name] = 'C'+str(comp_tag)
comp_tag += 1
line = timelineFile.readline()
verts = []
colors = []
for d in data:
v = [(d[1], components[d[0]]-.4),
(d[1], components[d[0]]+.4),
(d[2], components[d[0]]+.4),
(d[2], components[d[0]]-.4),
(d[1], components[d[0]]-.4)]
verts.append(v)
colors.append(colormapping[d[0]])
bars = PolyCollection(verts, facecolors=colors)
fig, ax = plt.subplots()
ax.add_collection(bars)
ax.autoscale()
#loc = mdates.MinuteLocator(byminute=[0,15,30,45])
#ax.xaxis.set_major_locator(loc)
#ax.xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
ax.set_yticks(range(1, comp_tag, 1))
ax.set_yticklabels(components.keys())
# for ddl in np.arange(0, 3*1000.0, 1024.0/48000.0*1000.0):
# ax.axvline(ddl, color='grey')
plt.show() | StarcoderdataPython |
12842234 | import os
import index.MovieDAO as movieDAO
from pprint import pprint
from index import SysConst
import shutil
def getAllMovies(path):
movieTypes = set(["avi", "mp4", "mkv", "rmvb", "wmv", "txt"])
results = []
for fpath, dirs, fs in os.walk(path):
for filename in fs:
fullpath = os.path.join(fpath, filename)
suffix = filename[-3:]
if filename[0:1] != "." and len(filename) > 4 and suffix in movieTypes:
# print(fullpath + " | " + filename)
result = {"fullpath": fullpath, "filename": filename}
results.append(result)
return results
def getAllImages(path):
movieTypes = set(["jpg"])
results = []
for fpath, dirs, fs in os.walk(path):
for filename in fs:
fullpath = os.path.join(fpath, filename)
suffix = filename[-3:]
if filename[0:1] != "." and len(filename) > 4 and suffix in movieTypes:
# print(fullpath + " | " + filename)
result = {"fullpath": fullpath, "filename": filename}
results.append(result)
return results
def getMovies(path):
movieTypes = set(["avi", "mp4", "mkv", "rmvb", "wmv"])
results = []
for fpath, dirs, fs in os.walk(path):
for filename in fs:
fullpath = os.path.join(fpath, filename)
suffix = filename[-3:]
if filename[0:1] != "." and len(filename) > 4 and suffix in movieTypes:
# print(fullpath + " | " + filename)
result = {"fullpath": fullpath, "filename": filename}
results.append(result)
return results
def getTxts(path):
movieTypes = set(["txt"])
results = []
for fpath, dirs, fs in os.walk(path):
for filename in fs:
fullpath = os.path.join(fpath, filename)
suffix = filename[-3:]
if filename[0:1] != "." and len(filename) > 4 and suffix in movieTypes:
# print(fullpath + " | " + filename)
result = {"fullpath": fullpath, "filename": filename}
results.append(result)
return results
def findLocalMovies(avList, allFiles):
# allFiles = getAllMovies(path)
for av in avList:
avNumber = av["av_number"].lower()
for file in allFiles:
filename = file["filename"]
if filename.lower().find(avNumber) > -1:
av["local_movie"] = file["fullpath"]
break;
return avList
def deleteSmallImages(path):
allImages = getAllImages(path)
for image in allImages:
size = os.path.getsize(image["fullpath"])
if size < 5000:
print("delete " + image["fullpath"])
os.remove(image["fullpath"])
def copyImageToTemp(movieNumbers):
if not os.path.exists(SysConst.getImageTempPath()):
os.mkdir(SysConst.getImageTempPath())
allImages = getAllImages(SysConst.getImageCachePath())
for num in movieNumbers:
for image in allImages:
if num in image["fullpath"]:
shutil.copy(image["fullpath"], SysConst.getImageTempPath() + image["filename"])
break;
def copyOneImageToTemp(actor, avNumber):
if not os.path.exists(SysConst.getImageTempPath()):
os.mkdir(SysConst.getImageTempPath())
source = SysConst.getImageCachePath() + actor + "//" + avNumber + ".jpg"
to = SysConst.getImageTempPath() + avNumber + ".jpg"
shutil.copy(source, to)
# avList = [{"av_number": "ABS-072"}]
# pprint(findLocalMovies(avList=avList, path="G://Game//File//"))
#deleteSmallImages(SysConst.getImageCachePath())
#copyImageToTemp(["ABS-072"])
#copyOneImageToTemp("阿部乃美久", "ARMG-274") | StarcoderdataPython |
11375482 | <reponame>ertis-research/reliable-iot
from my_db.restful_API.modular_views import web_login_register_view
from my_db.restful_API.modular_views import docker_commands_view
from my_db.restful_API.modular_views import app_resource_usage
from my_db.restful_API.modular_views import application_view
from my_db.restful_API.modular_views import register_view
from my_db.restful_API.modular_views import endpoint_view
from my_db.restful_API.modular_views import resource_view
from my_db.restful_API.modular_views import token_view
from my_db.restful_API.modular_views import shadow_view
from my_db.restful_API.modular_views import user_view
from django.urls import path
urlpatterns = [
# THESE ROUTES ARE FOR REAL DEVICES
path('register/', register_view.register_device),
path('getPhysicalDevice/<str:dev_id>/', register_view.get_physical_device),
path('deletePhysicalDevice/<str:dev_id>/', register_view.delete_device),
path('updatePhysicalDevice/<str:dev_id>/', register_view.update_device),
path('getDeviceStatus/<str:dev_id>/', register_view.device_status),
# THESE ROUTES ARE FOR ENDPOINTS
path('storeEndpoint/', endpoint_view.store_endpoint),
path('getEndpointById/<str:ep_id>/', endpoint_view.get_endpoint_by_id),
path('getEndpointByLeshanId/<str:leshan_id>/', endpoint_view.get_endpoint_by_leshanid),
path('updateEndpoint/<str:ep_id>/', endpoint_view.update_endpoint),
# THESE ROUTES ARE FOR RESOURCES
path('storeResource/', resource_view.store_resource),
path('getResource/<str:res_id>/', resource_view.get_resource),
path('deleteResource/<str:res_id>/', resource_view.delete_resource),
path('updateResource/<str:endpoint_id>/', resource_view.update_resource),
path('getShadowResources/<str:shdw_id>/', resource_view.get_shadow_resources),
path('getDeviceResources/<str:dev_id>/', resource_view.get_device_resources),
path('getResourceStatus/<str:res_id>/', resource_view.resource_status),
path('getSimilarResource/<str:res_code>/', resource_view.get_similar_resource),
path('getSimilarResource/<str:res_code>/<str:shdw_id>/', resource_view.get_similar_resource),
# THESE ROUTES ARE FOR DOCKER COMMANDS
path('storeType/', docker_commands_view.store_type),
path('getTypeCommand/<str:d_type>/', docker_commands_view.get_connector_by_type),
path('getAllConnectors/', docker_commands_view.get_all),
# THESE ROUTES ARE FOR TOKEN CRUD
path('getTokenById/<str:token_id>/', token_view.get_token_by_id),
path('getTokenByUser/<str:user_id>/', token_view.get_tokens_by_user_id),
path('getTokenByShadow/<str:shadow_id>/', token_view.get_tokens_by_shadow),
path('generateToken/', token_view.generate_token),
# THESE ROUTES ARE FOR TOKEN AUTH
path('validateToken/', token_view.validate_token),
path('revokeToken/', token_view.revoke_token),
# THESE ROUTES ARE FOR SHADOW CRUD
path('createShadow/', shadow_view.create_shadow),
path('updateShadow/<str:shdw_id>/', shadow_view.update_shadow),
path('deleteShadow/<str:shdw_id>/', shadow_view.delete_shadow),
path('getShadowsByUser/<str:user_id>/', shadow_view.get_shadows_by_user_id),
path('getShadowById/<str:shdw_id>/', shadow_view.get_shadow_by_id),
path('getShadowTokens/<str:shdw_id>/', shadow_view.get_shadow_tokens),
path('getShadowDevices/<str:shdw_id>/', shadow_view.get_shadow_devices),
# THESE ROUTES ARE FOR APPS
path('getApp/<str:app_id>/', application_view.get_app),
path('getAllApps/', application_view.get_all),
path('storeOrUpdateApp/<str:name>/', application_view.store_or_update_app),
path('deleteApp/<str:app_id>/', application_view.delete_app),
# THESE ROUTES ARE FOR RESOURCE USAGE
path('getUsageByEpShadow/<str:ep_id>/<str:shdw_id>/', app_resource_usage.get_resource_use_by_epid_shdwid),
path('getCreatedLogic/<str:res_code>/<str:operation>/<str:shdw_id>/', app_resource_usage.get_similar_logic),
path('getCreatedLogic/<str:res_code>/<str:operation>/', app_resource_usage.get_similar_logic),
path('createUsageResource/', app_resource_usage.create),
path('deleteUsageResource/<str:usage_id>/', app_resource_usage.delete),
path('updateUsageResource/<str:usage_id>/', app_resource_usage.update),
# THESE ROUTES ARE FOR USER CRUD
path('updateUser/<str:usr_id>/', user_view.update_user),
# THESE ROUTES ARE FOR LOGIN / LOG OUT AND REGISTER
path('login/', web_login_register_view.login),
path('registerUser/', web_login_register_view.register),
]
| StarcoderdataPython |
3507269 | <reponame>mrichar1/alchemyjsonschema
# -*- coding:utf-8 -*-
def _callFUT(*args, **kwargs):
from alchemyjsonschema.dictify import objectify
return objectify(*args, **kwargs)
def test_it__simple():
from alchemyjsonschema import SchemaFactory, ForeignKeyWalker
from alchemyjsonschema.dictify import ModelLookup
import alchemyjsonschema.tests.models as models
from datetime import datetime
factory = SchemaFactory(ForeignKeyWalker)
user_schema = factory(models.User)
created_at = datetime(2000, 1, 1)
user_dict = dict(pk=1, name="foo", created_at=created_at, group_id=10)
modellookup = ModelLookup(models)
result = _callFUT(user_dict, user_schema, modellookup)
assert isinstance(result, models.User)
assert result.pk == 1
assert result.name == "foo"
assert result.created_at == datetime(2000, 1, 1)
assert result.group_id == 10
def test_it__strict_true__then__required_are_notfound__error_raised():
from alchemyjsonschema import SchemaFactory, ForeignKeyWalker, InvalidStatus
from alchemyjsonschema.dictify import ModelLookup
import alchemyjsonschema.tests.models as models
from datetime import datetime
import pytest
factory = SchemaFactory(ForeignKeyWalker)
user_schema = factory(models.User)
created_at = datetime(2000, 1, 1)
user_dict = dict(name="foo", created_at=created_at) # pk is not found
modellookup = ModelLookup(models)
with pytest.raises(InvalidStatus):
_callFUT(user_dict, user_schema, modellookup, strict=True)
def test_it__strict_false__then__required_are_notfound__ok():
from alchemyjsonschema import SchemaFactory, ForeignKeyWalker
from alchemyjsonschema.dictify import ModelLookup
import alchemyjsonschema.tests.models as models
from datetime import datetime
factory = SchemaFactory(ForeignKeyWalker)
user_schema = factory(models.User)
created_at = datetime(2000, 1, 1)
user_dict = dict(name="foo", created_at=created_at) # pk is not found
modellookup = ModelLookup(models)
result = _callFUT(user_dict, user_schema, modellookup, strict=False)
assert isinstance(result, models.User)
assert result.pk is None
assert result.name == "foo"
assert result.created_at == datetime(2000, 1, 1)
assert result.group_id is None
def test_it_complex__relation_decision():
from alchemyjsonschema import SchemaFactory, StructuralWalker, RelationDesicion
from alchemyjsonschema.dictify import ModelLookup
import alchemyjsonschema.tests.models as models
from datetime import datetime
factory = SchemaFactory(StructuralWalker, relation_decision=RelationDesicion())
user_schema = factory(models.User)
created_at = datetime(2000, 1, 1)
created_at2 = datetime(2001, 1, 1)
group_dict = dict(name="ravenclaw", color="blue", created_at=created_at2)
user_dict = dict(
name="foo", created_at=created_at, group=group_dict
) # pk is not found
modellookup = ModelLookup(models)
result = _callFUT(user_dict, user_schema, modellookup, strict=False)
assert isinstance(result, models.User)
assert result.pk is None
assert result.name == "foo"
assert result.created_at == datetime(2000, 1, 1)
assert result.group_id is None
assert isinstance(result.group, models.Group)
assert result.group.name == "ravenclaw"
assert result.group.color == "blue"
assert result.group.created_at == created_at2
assert modellookup.name_stack == []
def test_it_complex__fullset_decision():
from alchemyjsonschema import (
SchemaFactory,
StructuralWalker,
UseForeignKeyIfPossibleDecision,
)
from alchemyjsonschema.dictify import ModelLookup
import alchemyjsonschema.tests.models as models
from datetime import datetime
factory = SchemaFactory(
StructuralWalker, relation_decision=UseForeignKeyIfPossibleDecision()
)
user_schema = factory(models.User)
created_at = datetime(2000, 1, 1)
user_dict = dict(name="foo", created_at=created_at, group_id=1) # pk is not found
modellookup = ModelLookup(models)
result = _callFUT(user_dict, user_schema, modellookup, strict=False)
assert isinstance(result, models.User)
assert result.pk is None
assert result.name == "foo"
assert result.created_at == datetime(2000, 1, 1)
assert result.group_id is 1
assert modellookup.name_stack == []
def test_it_complex2():
from alchemyjsonschema import SchemaFactory, StructuralWalker
from alchemyjsonschema.dictify import ModelLookup
import alchemyjsonschema.tests.models as models
from datetime import datetime
factory = SchemaFactory(StructuralWalker)
group_schema = factory(models.Group)
created_at = datetime(2000, 1, 1)
created_at2 = datetime(2001, 1, 1)
user_dict = dict(name="foo", created_at=created_at) # pk is not found
group_dict = dict(
name="ravenclaw", color="blue", created_at=created_at2, users=[user_dict]
)
modellookup = ModelLookup(models)
result = _callFUT(group_dict, group_schema, modellookup, strict=False)
assert isinstance(result, models.Group)
assert result.pk is None
assert result.name == "ravenclaw"
assert result.color == "blue"
assert result.created_at == datetime(2001, 1, 1)
assert isinstance(result.users[0], models.User)
assert result.users[0].name == "foo"
assert result.users[0].created_at == created_at
assert modellookup.name_stack == []
def test_it_complex__partial():
from alchemyjsonschema import SchemaFactory, StructuralWalker
from alchemyjsonschema.dictify import ModelLookup
import alchemyjsonschema.tests.models as models
from datetime import datetime
factory = SchemaFactory(StructuralWalker)
user_schema = factory(models.User)
created_at = datetime(2000, 1, 1)
user_dict = dict(name="foo", created_at=created_at)
modellookup = ModelLookup(models)
result = _callFUT(user_dict, user_schema, modellookup, strict=False)
assert isinstance(result, models.User)
assert result.pk is None
assert result.name == "foo"
assert result.created_at == datetime(2000, 1, 1)
assert result.group_id is None
assert modellookup.name_stack == []
def test_it_complex__partial2():
from alchemyjsonschema import SchemaFactory, StructuralWalker
from alchemyjsonschema.dictify import ModelLookup
import alchemyjsonschema.tests.models as models
from datetime import datetime
factory = SchemaFactory(StructuralWalker)
group_schema = factory(models.Group)
created_at2 = datetime(2001, 1, 1)
group_dict = dict(name="ravenclaw", color="blue", created_at=created_at2)
modellookup = ModelLookup(models)
result = _callFUT(group_dict, group_schema, modellookup, strict=False)
assert isinstance(result, models.Group)
assert result.pk is None
assert result.name == "ravenclaw"
assert result.color == "blue"
assert result.created_at == datetime(2001, 1, 1)
assert modellookup.name_stack == []
def test_it_complex__partia3():
from alchemyjsonschema import SchemaFactory, StructuralWalker
from alchemyjsonschema.dictify import ModelLookup
import alchemyjsonschema.tests.models as models
from datetime import datetime
factory = SchemaFactory(StructuralWalker)
user_schema = factory(models.User)
created_at = datetime(2000, 1, 1)
user_dict = dict(name="foo", created_at=created_at, group={})
modellookup = ModelLookup(models)
result = _callFUT(user_dict, user_schema, modellookup, strict=False)
assert isinstance(result, models.User)
assert result.pk is None
assert result.name == "foo"
assert result.created_at == datetime(2000, 1, 1)
assert result.group_id is None
assert modellookup.name_stack == []
def test_it_complex__partial4():
from alchemyjsonschema import SchemaFactory, StructuralWalker
from alchemyjsonschema.dictify import ModelLookup
import alchemyjsonschema.tests.models as models
from datetime import datetime
factory = SchemaFactory(StructuralWalker)
group_schema = factory(models.Group)
created_at2 = datetime(2001, 1, 1)
group_dict = dict(name="ravenclaw", color="blue", created_at=created_at2, users=[])
modellookup = ModelLookup(models)
result = _callFUT(group_dict, group_schema, modellookup, strict=False)
assert isinstance(result, models.Group)
assert result.pk is None
assert result.name == "ravenclaw"
assert result.color == "blue"
assert result.created_at == datetime(2001, 1, 1)
assert modellookup.name_stack == []
def test_it_nested():
from alchemyjsonschema.tests import models
from alchemyjsonschema import SchemaFactory, StructuralWalker
from alchemyjsonschema.dictify import ModelLookup
factory = SchemaFactory(StructuralWalker)
a_schema = factory(models.A0)
modellookup = ModelLookup(models)
params = {
"name": "a0",
"children": [
{
"name": "a00",
"children": [{"name": "a000"}, {"name": "a001"}, {"name": "a002"}],
},
{"name": "a10", "children": [{"name": "a010"}]},
],
}
result = _callFUT(params, a_schema, modellookup, strict=False)
assert len(result.children) == 2
assert len(result.children[0].children) == 3
assert len(result.children[1].children) == 1
| StarcoderdataPython |
11226777 | #!/usr/bin/env python3
import os
import time
import yaml
import lights
def wait():
args_file = "args.yml"
print("Daemon started")
while True:
print("Waiting...")
try:
with open(args_file) as f:
a = yaml.safe_load(f)
scene = a["scene"]
duration = a["duration"]
steps = a["steps"]
lights.runner.main(scene, duration, steps)
except FileNotFoundError:
pass
finally:
try:
os.remove(args_file)
except FileNotFoundError:
pass
time.sleep(1)
if __name__ == "__main__":
wait()
| StarcoderdataPython |
6643563 | import logging
import os
import random
from collections import OrderedDict
from itertools import chain
from typing import Callable, Dict, List, Tuple
import torch
import torch.nn as nn
from sklearn.metrics import accuracy_score
from rationale_benchmark.utils import Annotation
from rationale_benchmark.models.pipeline.pipeline_utils import (
SentenceEvidence,
annotations_to_evidence_identification,
make_preds_epoch,
score_rationales,
)
def _get_sampling_method(training_pars: dict) -> Callable[
[List[SentenceEvidence], Dict[str, List[SentenceEvidence]]], List[SentenceEvidence]]:
"""Generates a sampler that produces (positive, negative) sentence-level examples
Returns a function that takes a document converted to sentence level
annotations and a dictionary of docid -> sentence level annotations, and
returns a set of sentence level annotations.
This sampling method is necessary as we can have far too many negative
examples in our training data (almost nothing is actually evidence).
n.b. this factory is clearly crying for modularization, again into
something that would call for dependency injection, but for the duration of
this project, this will be fine.
"""
# TODO implement sampling for nearby sentences (within the document)
if training_pars['sampling_method'] == 'random':
sampling_ratio = training_pars['sampling_ratio']
logging.info(f'Setting up random sampling with negative/positive ratio = {sampling_ratio}')
def random_sampler(document: List[SentenceEvidence], _: Dict[str, List[SentenceEvidence]]) -> \
List[SentenceEvidence]:
"""Takes all the positives from a document, and a random choice over negatives"""
positives = list(filter(lambda s: s.kls == 1 and len(s.sentence) > 0, document))
if any(map(lambda s: len(s.sentence) == 0, positives)):
raise ValueError("Some positive sentences are of zero length!")
all_negatives = list(filter(lambda s: s.kls == 0 and len(s.sentence) > 0, document))
# handle an edge case where a document can be only or mostly evidence for a statement
num_negatives = min(len(all_negatives), round(len(positives) * sampling_ratio))
random_negatives = random.choices(all_negatives, k=num_negatives)
# sort the results so the next step is deterministic,
results = sorted(positives + random_negatives)
# this is an inplace shuffle.
random.shuffle(results)
return results
return random_sampler
elif training_pars['sampling_method'] == 'everything':
def everything_sampler(document: List[SentenceEvidence],
_: Dict[str, List[SentenceEvidence]]) -> List[SentenceEvidence]:
return document
return everything_sampler
else:
raise ValueError(f"Unknown sampling method for training: {training_pars['sampling_method']}")
def train_evidence_identifier(evidence_identifier: nn.Module,
save_dir: str,
train: List[Annotation],
val: List[Annotation],
documents: Dict[str, List[List[int]]],
model_pars: dict,
optimizer=None,
scheduler=None,
tensorize_model_inputs: bool = True) -> Tuple[nn.Module, dict]:
"""Trains a module for rationale identification.
This method tracks loss on the entire validation set, saves intermediate
models, and supports restoring from an unfinished state. The best model on
the validation set is maintained, and the model stops training if a patience
(see below) number of epochs with no improvement is exceeded.
As there are likely too many negative examples to reasonably train a
classifier on everything, every epoch we subsample the negatives.
Args:
evidence_identifier: a module like the AttentiveClassifier
save_dir: a place to save intermediate and final results and models.
train: a List of interned Annotation objects.
val: a List of interned Annotation objects.
documents: a Dict of interned sentences
model_pars: Arbitrary parameters directory, assumed to contain an "evidence_identifier" sub-dict with:
lr: learning rate
batch_size: an int
sampling_method: a string, plus additional params in the dict to define creation of a sampler
epochs: the number of epochs to train for
patience: how long to wait for an improvement before giving up.
max_grad_norm: optional, clip gradients.
optimizer: what pytorch optimizer to use, if none, initialize Adam
scheduler: optional, do we want a scheduler involved in learning?
tensorize_model_inputs: should we convert our data to tensors before passing it to the model?
Useful if we have a model that performs its own tokenization (e.g. BERT as a Service)
Returns:
the trained evidence identifier and a dictionary of intermediate results.
"""
def _prep_data_for_epoch(evidence_data: Dict[str, Dict[str, List[SentenceEvidence]]],
sampler: Callable[
[List[SentenceEvidence], Dict[str, List[SentenceEvidence]]], List[SentenceEvidence]]
) -> List[SentenceEvidence]:
output_sentences = []
ann_ids = sorted(evidence_data.keys())
# in place shuffle so we get a different per-epoch ordering
random.shuffle(ann_ids)
for ann_id in ann_ids:
for docid, sentences in evidence_data[ann_id].items():
data = sampler(sentences, None)
output_sentences.extend(data)
return output_sentences
logging.info(f'Beginning training with {len(train)} annotations, {len(val)} for validation')
evidence_identifier_output_dir = os.path.join(save_dir, 'evidence_identifier')
os.makedirs(save_dir, exist_ok=True)
os.makedirs(evidence_identifier_output_dir, exist_ok=True)
model_save_file = os.path.join(evidence_identifier_output_dir, 'evidence_identifier.pt')
epoch_save_file = os.path.join(evidence_identifier_output_dir, 'evidence_identifier_epoch_data.pt')
if optimizer is None:
optimizer = torch.optim.Adam(evidence_identifier.parameters(), lr=model_pars['evidence_identifier']['lr'])
criterion = nn.CrossEntropyLoss(reduction='none')
sampling_method = _get_sampling_method(model_pars['evidence_identifier'])
batch_size = model_pars['evidence_identifier']['batch_size']
epochs = model_pars['evidence_identifier']['epochs']
patience = model_pars['evidence_identifier']['patience']
max_grad_norm = model_pars['evidence_classifier'].get('max_grad_norm', None)
evidence_train_data = annotations_to_evidence_identification(train, documents)
evidence_val_data = annotations_to_evidence_identification(val, documents)
device = next(evidence_identifier.parameters()).device
results = {
# "sampled" losses do not represent the true data distribution, but do represent training data
'sampled_epoch_train_losses': [],
'sampled_epoch_val_losses': [],
# "full" losses do represent the true data distribution
'full_epoch_val_losses': [],
'full_epoch_val_acc': [],
'full_epoch_val_rationale_scores': [],
}
# allow restoring an existing training run
start_epoch = 0
best_epoch = -1
best_val_loss = float('inf')
best_model_state_dict = None
epoch_data = {}
if os.path.exists(epoch_save_file):
evidence_identifier.load_state_dict(torch.load(model_save_file))
epoch_data = torch.load(epoch_save_file)
start_epoch = epoch_data['epoch'] + 1
# handle finishing because patience was exceeded or we didn't get the best final epoch
if bool(epoch_data.get('done', 0)):
start_epoch = epochs
results = epoch_data['results']
best_epoch = start_epoch
best_model_state_dict = OrderedDict({k: v.cpu() for k, v in evidence_identifier.state_dict().items()})
logging.info(f'Training evidence identifier from epoch {start_epoch} until epoch {epochs}')
optimizer.zero_grad()
for epoch in range(start_epoch, epochs):
epoch_train_data = _prep_data_for_epoch(evidence_train_data, sampling_method)
epoch_val_data = _prep_data_for_epoch(evidence_val_data, sampling_method)
sampled_epoch_train_loss = 0
evidence_identifier.train()
logging.info(
f'Training with {len(epoch_train_data) // batch_size} batches with {len(epoch_train_data)} examples')
for batch_start in range(0, len(epoch_train_data), batch_size):
batch_elements = epoch_train_data[batch_start:min(batch_start + batch_size, len(epoch_train_data))]
# we sample every time to thereoretically get a better representation of instances over the corpus.
# this might just take more time than doing so in advance.
targets, queries, sentences = zip(*[(s.kls, s.query, s.sentence) for s in batch_elements])
ids = [(s.ann_id, s.docid, s.index) for s in batch_elements]
targets = torch.tensor(targets, dtype=torch.long, device=device)
if tensorize_model_inputs:
queries = [torch.tensor(q, dtype=torch.long) for q in queries]
sentences = [torch.tensor(s, dtype=torch.long) for s in sentences]
preds = evidence_identifier(queries, ids, sentences)
loss = criterion(preds, targets.to(device=preds.device)).sum()
sampled_epoch_train_loss += loss.item()
loss = loss / len(preds)
loss.backward()
if max_grad_norm:
torch.nn.utils.clip_grad_norm_(evidence_identifier.parameters(), max_grad_norm)
optimizer.step()
if scheduler:
scheduler.step()
optimizer.zero_grad()
sampled_epoch_train_loss /= len(epoch_train_data)
results['sampled_epoch_train_losses'].append(sampled_epoch_train_loss)
logging.info(f'Epoch {epoch} sampled training loss {sampled_epoch_train_loss}')
with torch.no_grad():
evidence_identifier.eval()
sampled_epoch_val_loss, _, sampled_epoch_val_hard_pred, sampled_epoch_val_truth = \
make_preds_epoch(evidence_identifier,
epoch_val_data,
batch_size,
device,
criterion,
tensorize_model_inputs)
results['sampled_epoch_val_losses'].append(sampled_epoch_val_loss)
sampled_epoch_val_acc = accuracy_score(sampled_epoch_val_truth, sampled_epoch_val_hard_pred)
logging.info(f'Epoch {epoch} sampled val loss {sampled_epoch_val_loss}, acc {sampled_epoch_val_acc}')
# evaluate over *all* of the validation data
all_val_data = list(filter(lambda se: len(se.sentence) > 0, chain.from_iterable(
chain.from_iterable(x.values() for x in evidence_val_data.values()))))
epoch_val_loss, epoch_val_soft_pred, epoch_val_hard_pred, epoch_val_truth = \
make_preds_epoch(evidence_identifier,
all_val_data,
batch_size,
device,
criterion,
tensorize_model_inputs)
results['full_epoch_val_losses'].append(epoch_val_loss)
results['full_epoch_val_acc'].append(accuracy_score(epoch_val_truth, epoch_val_hard_pred))
results['full_epoch_val_rationale_scores'].append(
score_rationales(val, documents, epoch_val_data, epoch_val_soft_pred))
logging.info(
f'Epoch {epoch} full val loss {epoch_val_loss}, accuracy: {results["full_epoch_val_acc"][-1]}, rationale scores: {results["full_epoch_val_rationale_scores"][-1]}')
# if epoch_val_loss < best_val_loss:
if sampled_epoch_val_loss < best_val_loss:
logging.debug(f'Epoch {epoch} new best model with sampled val loss {sampled_epoch_val_loss}')
best_model_state_dict = OrderedDict({k: v.cpu() for k, v in evidence_identifier.state_dict().items()})
best_epoch = epoch
best_val_loss = sampled_epoch_val_loss
torch.save(evidence_identifier.state_dict(), model_save_file)
epoch_data = {
'epoch': epoch,
'results': results,
'best_val_loss': best_val_loss,
'done': 0
}
torch.save(epoch_data, epoch_save_file)
if epoch - best_epoch > patience:
epoch_data['done'] = 1
torch.save(epoch_data, epoch_save_file)
break
epoch_data['done'] = 1
epoch_data['results'] = results
torch.save(epoch_data, epoch_save_file)
evidence_identifier.load_state_dict(best_model_state_dict)
evidence_identifier = evidence_identifier.to(device=device)
evidence_identifier.eval()
return evidence_identifier, results
| StarcoderdataPython |
312623 | from mininet.topo import Topo
class SingleSwitchTopo(Topo):
def __init__(self, n, **opts):
Topo.__init__(self, **opts)
switch = self.addSwitch('s1')
for i in xrange(1, n+1):
host = self.addHost('h%d' % i,
ip = "10.0.0.%d" % i,
mac = '00:00:00:00:00:%02x' % i)
self.addLink(host, switch, port2=i)
class DoubleSwitchTopo(Topo):
def __init__(self, n, **opts):
Topo.__init__(self, **opts)
switch1 = self.addSwitch('s1a')
switch2 = self.addSwitch('s2a')
switch3 = self.addSwitch('s3a')
cpu1 = self.addHost('cpu1')
cpu2 = self.addHost('cpu2')
cpu3 = self.addHost('cpu3')
host1 = self.addHost('h2',
ip = "10.0.0.2",
mac = '00:00:00:00:00:02')
host3 = self.addHost('h4',
ip = "10.0.0.3",
mac = '00:00:00:00:00:03')
host2 = self.addHost('h3',
ip = "10.0.1.3",
mac = '00:00:00:00:01:03')
host4 = self.addHost('h5',
ip = "10.0.1.2",
mac = '00:00:00:00:01:02')
self.addLink(host1, switch1, port2=4)
self.addLink(host3, switch1, port2=5)
self.addLink(host2, switch2, port2=4)
self.addLink(host4, switch2, port2=5)
self.addLink(cpu1, switch1, port2=1)
self.addLink(cpu2, switch2, port2=1)
self.addLink(cpu3, switch3, port2=1)
self.addLink(switch1,switch3, port1=2, port2=2)
self.addLink(switch3,switch2, port1=3, port2=3)
# self.addLink(switch1,switch2,port1=2,port2=2)
class FiveSwitchTopo(Topo):
def __init__(self, **opts):
Topo.__init__(self,**opts)
switch1 = self.addSwitch('s1a')
switch2 = self.addSwitch('s2a')
switch3 = self.addSwitch('s3a')
switch4 = self.addSwitch('s4a')
switch5 = self.addSwitch('s5a')
cpu1 = self.addHost('cpu1')
cpu2 = self.addHost('cpu2')
cpu3 = self.addHost('cpu3')
cpu4 = self.addHost('cpu4')
cpu5 = self.addHost('cpu5')
self.addLink(cpu1, switch1, port2=1)
self.addLink(cpu2, switch2, port2=1)
self.addLink(cpu3, switch3, port2=1)
self.addLink(cpu4, switch4, port2=1)
self.addLink(cpu5, switch5, port2=1)
self.addLink(switch1, switch2, port1=2,port2=2)
self.addLink(switch2, switch3, port1=3,port2=3)
self.addLink(switch3, switch4, port1=2,port2=2)
self.addLink(switch4, switch5, port1=3,port2=3)
self.addLink(switch2, switch5, port1=4,port2=2)
host1 = self.addHost('h1', ip="10.0.0.1", mac='00:00:00:00:00:01')
host2 = self.addHost('h2', ip="10.0.0.2", mac='00:00:00:00:00:02')
host3 = self.addHost('h3', ip="10.0.1.3", mac='00:00:00:00:00:03')
host4 = self.addHost('h4', ip="10.0.1.4", mac='00:00:00:00:00:04')
host5 = self.addHost('h5', ip="10.0.2.5", mac='00:00:00:00:00:05')
host6 = self.addHost('h6', ip="10.0.2.6", mac='00:00:00:00:00:06')
host7 = self.addHost('h7', ip="10.0.3.7", mac='00:00:00:00:00:07')
host8 = self.addHost('h8', ip="10.0.3.8", mac='00:00:00:00:00:08')
host9 = self.addHost('h9', ip="10.0.4.9", mac='00:00:00:00:00:09')
host10 = self.addHost('h10', ip="10.0.4.10", mac='00:00:00:00:00:10')
self.addLink(host1, switch1, port2=3)
self.addLink(host2, switch1, port2=4)
self.addLink(host3, switch2, port2=5)
self.addLink(host4, switch2, port2=6)
self.addLink(host5, switch3, port2=4)
self.addLink(host6, switch3, port2=5)
self.addLink(host7, switch4, port2=4)
self.addLink(host8, switch4, port2=5)
self.addLink(host9, switch5, port2=4)
self.addLink(host10, switch5, port2=5)
| StarcoderdataPython |
3387880 | # -*- coding:utf-8 -*-
# !/usr/bin/env python3
# Power by zuosc
import json
import urllib.parse
import urllib.request
weatherApiAdder = 'http://widget.thinkpage.cn/api/weather?flavor=slim&location=WX4FBXXFKE4F&geolocation=enabled&language=zh-chs&unit=c&theme=chameleon&container=weather-widget&bubble=enabled&alarmType=circle&uid=UE4B455F4F&hash=cfc949b4b932fff04040f9c9f776018b'
result = urllib.request.urlopen(weatherApiAdder).read()
jsonData = result.decode('utf8')
data = json.loads(jsonData)
title = '今日天气'
content ='今日天气:' + data["weather"]["now"]["text"] \
+ ' \r\n当前温度:' + data["weather"]["now"]["temperature"]+'℃'
if data["weather"]["alarms"]:
content = content + ' \r\n天气报警:' + data["weather"]["alarms"][0]["type"] + data["weather"]["alarms"][0]["level"] + '预警'
sendurl='http://sc.ftqq.com/SCU5209T50ff781c69372d9b370387f5c079be01587ae52428055.send?'
params = {'text':title,'desp':content}
params = urllib.parse.urlencode(params)
urllib.request.urlopen(sendurl+params) | StarcoderdataPython |
336327 | """
Evaluate a python string as code
"""
import importlib.machinery
import importlib.util
import tempfile
import os
def eval_code(code):
"""
save code in a temporary file and import it as a module
"""
new_file, filename = tempfile.mkstemp(text=True)
os.write(new_file, bytes(code, 'ascii'))
os.close(new_file)
loader = importlib.machinery.SourceFileLoader('main', filename)
spec = importlib.util.spec_from_loader(loader.name, loader)
mod = importlib.util.module_from_spec(spec)
loader.exec_module(mod)
return mod
| StarcoderdataPython |
1948019 | <gh_stars>10-100
from pyedbglib.hidtransport.hidtransportfactory import hid_transport
from pyedbglib.protocols import housekeepingprotocol
from pyedbglib.protocols import housekeepingprotocol
from pyedbglib.protocols import avr8protocol
from pyedbglib.protocols import avr8protocolerrors
# Retrieve device info
from pymcuprog.deviceinfo import deviceinfo
# Construct an NVM provider
from pymcuprog.nvmupdi import NvmAccessProviderCmsisDapUpdi
from pyedbglib.protocols.avrcmsisdap import AvrCommand, AvrCommandError
from pyedbglib.protocols.jtagice3protocol import Jtagice3Command
import logging
import threading
import time
import asyncio
logging.basicConfig(level=logging.INFO,handlers=[logging.StreamHandler()])
class Debugger():
def __init__(self, DeviceName):
# Make a connection
self.transport = hid_transport()
self.transport.disconnect()
# Connect
self.transport.connect()
self.deviceInf = deviceinfo.getdeviceinfo(DeviceName)
self.memoryinfo = deviceinfo.DeviceMemoryInfo(self.deviceInf)
self.housekeeper = housekeepingprotocol.Jtagice3HousekeepingProtocol(self.transport)
self.housekeeper.start_session()
self.device = NvmAccessProviderCmsisDapUpdi(self.transport, self.deviceInf)
#self.device.avr.deactivate_physical()
self.device.avr.activate_physical()
# Start debug by attaching (live)
self.device.avr.protocol.attach()
#threading.Thread(target=pollingThread, args=(self.eventReciver,)).start()
def pollEvent(self):
#eventRegister = self.eventReciver.poll_events()
eventRegister = self.device.avr.protocol.poll_events()
#logging.info(eventRegister)
if eventRegister[0] == AvrCommand.AVR_EVENT: # Verifying data is an event
size = int.from_bytes(eventRegister[1:3], byteorder='big')
if size != 0:
#event recived
logging.info("Event recived")
eventarray = eventRegister[3:(size+1+3)]
SOF = eventarray[0]
protocol_version = eventarray[1:2]
sequence_id = eventarray[2:4]
protocol_handler_id = eventarray[4:5]
payload = eventarray[5:]
#logging.info(eventarray)
if payload[0] == avr8protocol.Avr8Protocol.EVT_AVR8_BREAK:
event_id = payload[0]
#event_version = payload[1]
pc = payload[1:5]
break_cause = payload[5]
extended_info = payload[6:]
print("PC: ", end="")
print(int.from_bytes(pc, byteorder='little'))
logging.info("Recived break event")
return (avr8protocol.Avr8Protocol.EVT_AVR8_BREAK, int.from_bytes(pc, byteorder='little'), break_cause)
else:
logging.info("Unknown event: " + payload[0])
return None
else:
logging.info("No event")
return None
# Memory interaction
def writeSRAM(self, address, data):
offset = (self.memoryinfo.memory_info_by_name('internal_sram'))['address']
return self.device.write(self.memoryinfo.memory_info_by_name('internal_sram'), address-offset, data)
def readSRAM(self, address, numBytes):
offset = (self.memoryinfo.memory_info_by_name('internal_sram'))['address']
return self.device.read(self.memoryinfo.memory_info_by_name('internal_sram'), address-offset, numBytes)
def readFlash(self, address, numBytes):
logging.info("Reading "+str(numBytes)+" bytes from flash at " + str(address))
offset = (self.memoryinfo.memory_info_by_name('flash'))['address']
# See programmer.py:265 in pymcuprog, maybe flashread fails due to page alignement?
return self.device.read(self.memoryinfo.memory_info_by_name('flash'), address, numBytes)
def writeEEPROM(self, address, data):
offset = (self.memoryinfo.memory_info_by_name('eeprom'))['address']
return self.device.write(self.memoryinfo.memory_info_by_name('eeprom'), address-offset, data)
def readEEPROM(self, address, numBytes):
offset = (self.memoryinfo.memory_info_by_name('eeprom'))['address']
return self.device.read(self.memoryinfo.memory_info_by_name('eeprom'), address-offset, numBytes)
def writeFuse(self, address, data):
offset = (self.memoryinfo.memory_info_by_name('fuses'))['address']
return self.device.write(self.memoryinfo.memory_info_by_name('fuses'), address-offset, data)
def readFuse(self, address, numBytes):
offset = (self.memoryinfo.memory_info_by_name('fuses'))['address']
return self.device.read(self.memoryinfo.memory_info_by_name('fuses'), address-offset, numBytes)
def writeLock(self, address, data):
offset = (self.memoryinfo.memory_info_by_name('lockbits'))['address']
return self.device.write(self.memoryinfo.memory_info_by_name('lockbits'), address-offset, data)
def readLock(self, address, numBytes):
offset = (self.memoryinfo.memory_info_by_name('lockbits'))['address']
return self.device.read(self.memoryinfo.memory_info_by_name('lockbits'), address-offset, numBytes)
def writeSignature(self, address, data):
offset = (self.memoryinfo.memory_info_by_name('signatures'))['address']
return self.device.write(self.memoryinfo.memory_info_by_name('signatures'), address-offset, data)
def readSignature(self, address, numBytes):
offset = (self.memoryinfo.memory_info_by_name('signatures'))['address']
return self.device.read(self.memoryinfo.memory_info_by_name('signatures'), address-offset, numBytes)
def writeUserSignature(self, address, data):
offset = (self.memoryinfo.memory_info_by_name('user_row'))['address']
return self.device.write(self.memoryinfo.memory_info_by_name('user_row'), address-offset, data)
def readUserSignature(self, address, numBytes):
offset = (self.memoryinfo.memory_info_by_name('user_row'))['address']
return self.device.read(self.memoryinfo.memory_info_by_name('user_row'), address-offset, numBytes)
# General debugging
def attach(self, do_break=False):
self.device.avr.protocol.attach(do_break)
def detach(self):
self.device.avr.protocol.detach()
# Flow controll
def reset(self):
self.device.avr.protocol.reset()
def step(self):
self.device.avr.protocol.step()
def stop(self):
self.device.avr.protocol.stop()
def run(self):
self.device.avr.protocol.run()
def runTo(self, address):
wordAddress = int(address/2)
self.device.avr.protocol.run_to(wordAddress)
def readStackPointer(self):
return self.device.avr.stack_pointer_read()
def readSREG(self):
return self.device.avr.protocol.memory_read(avr8protocol.Avr8Protocol.AVR8_MEMTYPE_OCD, 0x1C, 0x01)
def readRunningState(self):
# Debug interface to see what state the avr is in.
AVR8_CTXT_TEST = 0x80
AVR8_TEST_TGT_RUNNING = 0x00
running = bool(self.device.avr.protocol.get_byte(AVR8_CTXT_TEST, AVR8_TEST_TGT_RUNNING))
logging.info("AVR running state " + str(running))
return running
# Register and programcounter
def readRegs(self):
return self.device.avr.protocol.regfile_read()
def writeRegs(self, regs):
return self.device.avr.protocol.regile_write(regs)
def readProgramCounter(self):
# Returned as a word not a byte
return self.device.avr.protocol.program_counter_read()
def writeProgramCounter(self, programCounter):
self.device.avr.protocol.program_counter_write(programCounter)
# SoftwareBreakpoints EDBG expects these addresses in bytes
# Multiple SW breakpoints can be defined by shifting 4 bytes to the left
def breakpointSWSet(self, address):
self.device.avr.protocol.software_breakpoint_set(address)
def breakpointSWClear(self, address):
self.device.avr.protocol.software_breakpoint_clear(address)
def breakpointSWClearAll(self):
self.device.avr.protocol.software_breakpoint_clear_all()
# HardwareBreakpoints EDBG expects these addresses in words
def breakpointHWSet(self, address):
wordAddress = int(address/2)
self.device.avr.breakpoint_set(wordAddress)
def breakpointHWClear(self):
self.device.avr.breakpoint_clear()
# Cleanup code for detatching target
def cleanup(self):
# and end debug
self.device.avr.protocol.stop()
self.device.avr.protocol.software_breakpoint_clear_all()
self.device.avr.breakpoint_clear()
self.device.avr.protocol.detach()
# Stop session
#avr.stop()
self.device.avr.deactivate_physical()
# Unwind the stack
self.housekeeper.end_session()
self.transport.disconnect()
def __exit__(self, exc_type, exc_value, traceback):
self.cleanup()
| StarcoderdataPython |
3581777 | from unittest.test.test_case import Test
def format_poem(poem):
if poem[-1:] == ".":
return ".\n".join(x.strip() for x in str.split(poem, '.'))
else:
return ".\n".join(x.strip() for x in str.split(poem, '.'))[:-1] + poem[-1:]
x=[1, 2, 3]
a = sum(x)/len(x)
print(format_poem('Beautiful is better than ugly. Explicit is better than implicit. Simple is better than complex. Complex is better than complicated?'))
# 'Beautiful is better than ugly.\nExplicit is better than implicit.\nSimple is better than complex.\nComplex is better than complicated.') | StarcoderdataPython |
5145045 | a = int(input())
arr = []
while a != 0:
arr.append(a)
a = int(input())
for item in arr[::-1]:
print(item)
| StarcoderdataPython |
11250933 | # -*- coding: utf-8 -*-
"""
TopGun Backtest Class
@author: David
"""
# %% IMPORTs CELL
# Default Imports
import numpy as np
import pandas as pd
# Plotly for charting
import plotly.express as px
import plotly.graph_objs as go
import plotly.io as pio
# %% CLASS MODULE
class BacktestAnalytics(object):
""" Backtest Analytics & Reporting for Timeseries Data
Here we take one or more portfolios (strategies) timeseries returns and
run various tests vs. against a specified becnhmark timeseries. There is
an option to provide multiple benchmark returns in the bmkrtns dataframe;
at the moment only one benchmark is used as a direct comparitor but those
other indices will be used as part of a correlation analysis.
NB/ THIS DOES NOT MANAGE STOCK LEVEL ANALYSIS - ONLY TIMESERIES
INPUTS:
portrtns: pd.DataFrame (or pd.Series) of timeseries returns or prices;
if using prices must add ports_as_rtns=False
bmkrtns: same conditions as portrtns (& using bmks_as_rtns)
benchmark (OPTIONAL): str() as column name in bmkrtns dataframe. If
not provided a vector of zeros is used as the benchmark returns
eom: True(default)|False will converts all input dates to end-of-month
freq: 12(default) is the periods per annum. Currently only works monthly
MAIN FUNCTIONS:
run_backtest():
* builds data from portrtns, bmkrtns & benchmark
* creates full period summary table
* builds and stores rolling vol, TE, IR, etc..
* builds & saves drawdown series and analyses individual drawdowns
* creates wider correlation matrix inc. xs_rtns
plot_master():
* creates dictionary of useful plots
pretty_panda():
* applies basic styling to a pandas table - this could move
* bespoke sub funcs extend this; these funcs start "pretty_panda_xxx"
REPORTING:
In all cases we produce a templated markdown script with Plotly plots
already embedded as HTML - these can be fed to report_writer or anything
which turns markdown to a static html/pdf.
- markdown_doc() is primary function for generating markdown. REQUIRES
plot_master() to have been run but will prettify dataframe itself.
DEVELOPMENT:
- dynamic plots for correlation wrt time
- more work on hit-rates
- PCA based analysis
- basic checking that input benchmarks or Rf in bmkrtns columns
Author: <NAME>
"""
def __init__(self, portrtns, bmkrtns,
benchmark=None, Rf=None,
eom = True, freq=12,
ports_as_rtns=True, bmks_as_rtns=True):
# ingest portfolio (strategy) and benchmark returns
# check if supplied data is as returns or prices
# if prices convert to returns
self.portrtns = portrtns if ports_as_rtns else portrtns.pct_change()
self.bmkrtns = bmkrtns if bmks_as_rtns else bmkrtns.pct_change()
# convert to end-of-month dates if required
# if we do this at the initialisation stage we know all else is eom
if eom:
self.portrtns = self._eom(self.portrtns)
self.bmkrtns = self._eom(self.bmkrtns)
# Name of benchmark - should match column name in bmkrtns
# Similarly the "Risk-Free" component if being provided
self.Rb = benchmark
self.Rf = Rf
# Other options
self.freq = freq # Assume 12 for monthly
# Other setup things
self.rolling = dict() # blank dictionary for rolling window frames
# Plotly template
colourmap = ['black', 'teal', 'purple', 'grey', 'deeppink', 'skyblue', 'lime', 'green','darkorange', 'gold', 'navy', 'darkred',]
fig = go.Figure(layout=dict(
font={'family':'Garamond', 'size':14},
plot_bgcolor= 'white',
colorway=colourmap,
showlegend=True,
legend={'orientation':'v'},
margin = {'l':75, 'r':50, 'b':25, 't':50},
xaxis= {'anchor': 'y1', 'title': '', 'hoverformat':'.1f', 'tickformat':'.0f',
'showline':True, 'linecolor': 'gray',
'zeroline':True, 'zerolinewidth':1 , 'zerolinecolor':'whitesmoke',
'showgrid': True, 'gridcolor': 'whitesmoke',
},
yaxis= {'anchor': 'x1', 'title': '', 'hoverformat':'.1f', 'tickformat':'.0f',
'showline':True, 'linecolor':'gray',
'zeroline':True, 'zerolinewidth':1 , 'zerolinecolor':'whitesmoke',
'showgrid': True, 'gridcolor': 'whitesmoke'
},
updatemenus= [dict(type='buttons',
active=-1, showactive = True,
direction='down',
y=0.5, x=1.1,
pad = {'l':0, 'r':0, 't':0, 'b':0},
buttons=[])],
annotations=[],))
# Save template
pio.templates['multi_strat'] = pio.to_templated(fig).layout.template
return
# %% CLASS PROPERTIES
# Portfolio or Strategy Returns - should be a pd.DataFrame
@property
def portrtns(self): return self.__portrtns
@portrtns.getter
def portrtns(self): return self.__portrtns
@portrtns.setter
def portrtns(self, x):
if isinstance(x, pd.Series):
self.__portrtns = x.to_frame()
elif isinstance(x, pd.DataFrame):
self.__portrtns = x
else:
raise ValueError('portrtns must be a pandas df or series: {} given'
.format(type(x)))
# Benchmark Returns - should be a pd.DataFrame
@property
def bmkrtns(self): return self.__bmkrtns
@bmkrtns.getter
def bmkrtns(self): return self.__bmkrtns
@bmkrtns.setter
def bmkrtns(self, x):
if isinstance(x, pd.Series):
self.__bmkrtns = x.to_frame()
elif isinstance(x, pd.DataFrame):
self.__bmkrtns = x
else:
raise ValueError('bmkrtns must be a pandas df or series: {} given'
.format(type(x)))
# %% BIG BANG
def big_bang(self, title=""):
""" End-to-End Control Function """
# Run Basic Backtest
self.run_backtest()
# Generate Plots
self.plot_master()
# Generate Markdown
md = self.markdown_doc(title=title)
self.md = md
return md
# %% HELPER FUNCTIONS
def _eom(self, x):
""" Trivial function to ensure End-of-Month Dates in Pandas """
x.index = x.index + pd.offsets.MonthEnd(0)
return x
# %% BASIC BACKTESTING
def run_backtest(self):
""" MAIN FUNCTION
Function will splice port returns with benchmark & Rf returns so we have
a common time history, then do a series of things:
- Cumulative Returns
- Excess Returns (to benchmark)
- Drawdown and Excess Drawdown
- Rolling 12m metrics
- Summary Table - Full Sample Period
- Summary Table - per_annum except most recent year which is YTD
- Build "wide" correlation matrix with port rtns, xs returns and
all benchmarks specified in self.bmkrtns
INPUTS not required but the following must have been set:
- self.portrtns
- self.bmkrtns
- self.benchmark
- self.Rf
"""
# Benchamrk
# Pull from bmkrtns if provided
# Pull from bmkrtns if index provided; set as vector of 0 otherwise
if self.Rb == None:
bmk = pd.Series(data=0, index=self.portrtns.index, name='BMK')
else:
bmk = self.bmkrtns.loc[:,self.Rb]
bmk.name = 'BMK'
# Risk Free Rate Stuff
# Pull from bmkrtns if index provided; set as vector of 0 otherwise
# Be careful about the alignment of dates (indices)
if self.Rf == None:
Rf = pd.Series(data=0, index=self.portrtns.index, name='Rf')
else:
Rf = self.bmkrtns.loc[:, self.Rf]
Rf.name = 'Rf'
# Consolidated dataframe for risk-free, benchmarks & returns
# Also set up cumulative returns
# Rf always at 0, Benchmark always at 1 in dataframe
self.rtns = pd.concat([Rf, bmk, self.portrtns], axis=1).dropna()
cr = (1 + self.rtns).cumprod() * 100 # cumulative returns
self.cum_rtn = cr
# Excess Returns
# Remember again Rb at 1
self.xsrtns = self.rtns.subtract(self.rtns.iloc[:, 1], axis='rows')
self.cum_xs_rtn = cr.subtract(cr.iloc[:,1], axis='rows') + 100
# drawdown analysis
self.drawdown = self.rtns2drawdown(alpha=False)
self.xs_drawdown = self.rtns2drawdown(alpha=True)
self.drawdown_table = self.drawdown_breakdown(alpha=False)
self.xs_drawdown_table = self.drawdown_breakdown(alpha=True)
# rolling period analysis
for t in [12]:
# 12m returns for data & risk free index
irtn = cr.pct_change(t)
# excess return taken by subtracting the benchmark
irtn_xs = irtn.subtract(irtn.iloc[:, 1], axis='rows')
# rolling volatility
iVol = self.rtns.rolling(window=t).std() * np.sqrt(self.freq)
# Ex-Post Tracking Error [std(Rp-Rb)]
iTE = self.xsrtns.rolling(t).std() * np.sqrt(self.freq)
# Sharpe Ratio [(Rp-Rb)/vol]
# Remember Rf at position 0
iSharpe = irtn.subtract(irtn.iloc[:, 0], axis='rows').divide(iVol, axis='rows')
# save ith data to dictionary
self.rolling[t] = dict(vol=iVol,
rtn=irtn,
xsrtn=irtn_xs,
te=iTE,
sharpe=iSharpe)
# Run summary table & annualised summary and ingest
self.summary = self.backtest_summary()
self.summary_pa = self.per_annum()
# Extended Correlation Matrix
# Use BMK, PORT, PORT_XS_RTNS & the bmkrtns indices to form corr matrix
# Some minor adjustments to remove Rf from 1st column
rtns_wide = pd.concat([self.rtns.iloc[:,1:], self.xsrtns.iloc[:, 2:]], axis=1)
rtns_wide.columns = list(self.xsrtns.columns)[1:] + list(self.xsrtns.columns + '_XS')[2:]
rtns_wide = pd.concat([rtns_wide, self.bmkrtns], axis=1).dropna()
self.rtns_wide = rtns_wide
self.corr = rtns_wide.corr()
return
def rtns2drawdown(self, alpha=True):
""" Returns-to-Drawdown Timeseries
NB/ Rebased to 0 not 100
"""
# Need to select a method for drawdown
# if alpha is True use excess returns, otherwise returns
# Remove risk free column
rtns = self.xsrtns if alpha else self.rtns
rtns = rtns.iloc[:,1:]
dd = 1 + rtns # add 1 to monthly rtns
dd.iloc[0,:] = 100 # rebase to 100
# iterate through each time period
# create an index series with a max of 100
for i, d in enumerate(dd.index):
# ignore 0th because we need the i-1 time period
if i == 0:
continue
ix = dd.iloc[i-1] * dd.iloc[i,:] # index level for i
ix[ix > 100] = 100 # reset to 100 if > that
dd.iloc[i,:] = ix # populate drawdown dataframe
return (dd - 100) / 100 # set to zero & percentages
def drawdown_breakdown(self, alpha=True, dd_threshold=0):
""" Drawdowns Details by Individual Drawdown
Builds table which breaks out each individual drawdown period from a
timeseries of drawdowns (set with base at zero NOT 100). Table data
currently shows the date drawdown starts, throughs & ends as well as
the number of months in total, top to bottom & recovery as well as the
max drawdown itself.
INPUTS:
alpha: True(default)|False the function takes the drawdown ts from
self.xs_drawdown or self.drawdown depending on if alpha.
NB/ MUST RUN self.rtns2drawdown FIRST OR THIS WILL FAIL
dd_threshold: +ve number; excludes drawdowns less than this level.
"""
# determine if we need table of excess drawdowns or just drawdowns
# the table determines if we need to start on the 0th or 1st column
# (no point doing excess returns on a benchmark)
if alpha:
dd = self.xs_drawdown
n_start = 1
else:
dd = self.drawdown
n_start = 0
# dummy output dataframe
df = pd.DataFrame()
# iterate through strategies in port
# start at
for p in dd.columns[n_start:]:
ix = dd[p]
# find index of series where value == 0
# can use this to to create sub-series of drawdowns
idx = np.argwhere(ix.values == 0)
idx = list(idx.flatten()) # flatten to list (makes life much easier)
# because we are searching for 0s we won't get an index if the end
# of the current timeseries is still in drawdown
# fudge by checking if the last index == index of final obs
# if not add the index of final obs to the list
if idx[-1] != int(len(ix)):
idx.append(len(ix))
for i, v in enumerate(idx):
# relative index means we start by looking back
# thus ignore the first zero we find
if i == 0:
continue
z = ix.iloc[(idx[i-1]+1):(idx[i]+1)]
# ignore blanks (which will be periods of positive performance)
if len(z) > 1:
# create dictionary with info from drawdown period
start=z.index[0]
end=z.index[-1]
trough=z.idxmin()
# subset the series to just the ith drawdown
idd = dict(start=start, end=end, trough=trough,
length=z.count(),
fall=ix.loc[start:trough].count(),
recovery=ix.loc[trough:end].count(),
drawdown=z.min(),)
# This is a wrinkly
# We forced an index if the series is still in drawdown
# need to set the exit & the recovery to nan
if v == idx[-1] and ix[-1] != 0:
idd['recovery'] = np.nan
idd['end'] = np.nan
# Thresholds
if abs(idd['drawdown']) < dd_threshold:
continue
# add to output dataframe
df = pd.concat([df, pd.Series(idd, name=p)], axis=1)
return df.T
def backtest_summary(self):
""" Summary Table for the Whole Sample Period
"""
df = pd.DataFrame()
# Annualised Total Return, Vol & Risk-adjusted-return
df['TR'] = (self.cum_rtn.iloc[-1,:]/100)**(self.freq/len(self.cum_rtn)) - 1
df['Vol'] = self.rtns.std() * np.sqrt(self.freq)
# Beta, Ex-Post Tracking Error & Information Ratio
df['Beta'] = self.rtns.cov().iloc[:,1] / self.rtns.iloc[:,1].var()
df['TE'] = self.xsrtns.std() * np.sqrt(self.freq)
df['IR'] = (df.TR - df.TR[1]) / df.TE
# Sharpe Ratio & Risk-Adjusted-Return
#Rf = (self.Rf_cum_rtn[-1]/100)**(self.freq/len(self.cum_rtn)) - 1
df['Sharpe'] = (df.TR - df.TR[0]) / df.Vol
df['RaR'] = df.TR / df.Vol
# Drawdown Analysis
df['Max_Drawdown'] = self.drawdown.min(axis=0)
df['Max_XS_DD'] = self.xs_drawdown.min(axis=0)
df['Hitrate'] = self.xsrtns[self.xsrtns > 0].count() / self.rtns.count()
df['xs_mean'] = self.xsrtns.mean()
df['xs_worst'] = self.xsrtns.min()
df['xs_best'] = self.xsrtns.max()
# Remove Risk Free Rate from summary table
self.Rf_obs_rtn = df.loc['Rf', 'TR']
self.summary = df.T.iloc[:, 1:]
return self.summary
def per_annum(self):
""" Convert Return Stream to Per Annum Metrics
NB/ for current year we calculate YTD rather than annualised or 12m
"""
# Requires only the returns dataframe
# Rf in [0] and Rb in [1]
x = self.rtns
pa = dict.fromkeys(['rtn', 'alpha', 'xscash', 'vol', 'te', 'sharpe', 'ir'])
# create group object which has years as keys
# find index of last month; can't just do annual or we miss YTD
grp = x.index.groupby(x.index.year)
idx = [v[-1] for v in grp.values()] # index of last month
yrs = grp.keys() # list of years
# Return - Annual & current YTD
# 1st ret2px, then subset dates and calc return
rtn = (1 + x).cumprod().loc[idx, :].pct_change()
rtn.index = yrs # relabel indices to years (from timestamp)
pa['rtn'] = rtn
# Volatility - fairly simple
pa['vol'] = x.groupby(x.index.year).std() * np.sqrt(12)
# Alpha & Excess-Cash Return
# Remember Rf in posn 0 & Rb in posn 1
pa['xscash'] = rtn.subtract(rtn.iloc[:,0], axis='rows')
pa['alpha'] = rtn.subtract(rtn.iloc[:,1], axis='rows')
# Tracking Error
# Can't use rtn above because that is annualised
# Need to create monthly series of alpha stream
xsrtn = x.subtract(x.iloc[:,1], axis='rows')
pa['te'] = xsrtn.groupby(x.index.year).std() * np.sqrt(12)
# Sharpe & IR therefore easy to calculate
pa['sharpe'] = pa['xscash'] / pa['vol']
pa['ir'] = pa['alpha'] / pa['te']
self.summary_pa = pa
return pa
# %% PLOTLY PLOTS
def _px_addsource(self, fig, x=1, y=-0.125, align='right'):
return fig.add_annotation(
text="Source: STANLIB Multi-Strategy".format(),
xref='paper', yref='paper',
x=x, y=y, ax=0, ay=0,
align=align)
def plot_index(self, df, title="", benchmark=True, risk_free=False,
yfmt=['.0f', '.2f'], ytitle='Port', height=0,
source=False, y_src=-0.15):
""" Basic Line Plot in Backtester"""
# Remember the 1st column is a Risk-Free rate
Rf = df.iloc[:,0] # Risk Free
df = df.iloc[:,1:] # Benchmark & Simulations
# Plot basic line
fig = px.line(df, title=title, labels={'variable':'Port:'}, template='multi_strat', )
# Append Risk-Free Line if Required
if risk_free:
fig.add_scatter(x=Rf.index, y=Rf, name="Rf",
line={'color':'black', 'dash':'dot','width': 0.75})
# Hide benchmark if required
if not benchmark:
fig.data[0]['visible'] = 'legendonly' # hide bmk
fig.update_layout(
yaxis= {'anchor':'x1','title':ytitle, 'tickformat':yfmt[0], 'hoverformat':yfmt[1], },
xaxis= {'anchor':'y1','title':'', 'hoverformat':'%b-%y', 'tickformat':'%b-%y',},)
if height != 0:
fig.update_layout(height=height)
if source:
fig = self._px_addsource(fig, y=y_src)
return fig
def plot_ridgeline(self, df, title='Ridgeline KDE Distributions',
side='positive', meanline=True, box=False, width=3,
template='multi_strat',
source=False, y_src=-0.15,
**kwargs):
""" Simplified KDE from bootstrapper """
# Remember the 1st column is a Risk-Free rate
#Rf = df.iloc[:,0] # Risk Free
df = df.iloc[:,1:] # Benchmark & Simulations
n = len(df.columns)
# create a blended colours list- here is teal to purple
if n > 1:
from plotly.colors import n_colors
colors = n_colors('rgb(0, 128, 128)', 'rgb(128, 0, 128)', n, colortype='rgb')
else:
colors = ['rgb(0, 128, 128)']
# blank plotly express template
fig = px.scatter(title=title, template=template)
for i, v in enumerate(df): # add violin plots as traces
fig.add_trace(go.Violin(x=df.iloc[:,i],
line_color=colors[i],
line_width=1,
name=v,
spanmode='soft',))
# convert from violins to horizontal kde charts
fig.update_traces(orientation='h',
side=side,
meanline_visible=meanline,
width=width,
box_visible=box)
# update layouts
fig.update_layout(
yaxis= {'anchor':'x1', 'title':'Simulation', 'hoverformat':'.1%', 'tickformat':'.0%',},
xaxis= {'anchor':'y1', 'title':'Annualised Return', 'hoverformat':'.1%', 'tickformat':'.0%',})
if source:
fig = self._px_addsource(fig, y=y_src)
return fig
def plot_histo(self, df, title='', opacity=0.5, benchmark=False,
source=False, y_src=-0.15):
""" Basic Histogram """
# Remember the 1st column is a Risk-Free rate
#Rf = df.iloc[:,0] # Risk Free
df = df.iloc[:,1:] # Benchmark & Simulations
fig = px.histogram(df, title=title, histnorm='probability',
opacity=opacity, template='multi_strat')
if benchmark != True:
fig.data[0]['visible'] = 'legendonly' # hide bmk from histogram
fig.update_layout(barmode='overlay')
fig.update_layout(
yaxis= {'anchor':'x1','title':'Probability', 'tickformat':'.0%', 'hoverformat':'.2%', },
xaxis= {'anchor':'y1','title':'Excess Return', 'tickformat':'.1%', 'hoverformat':'.2%', },)
if source:
fig = self._px_addsource(fig, y=y_src)
return fig
def plot_regression(self, title='', alpha=True,
source=False, y_src=-0.15):
""" CAPM Style Regression Plot
Plots the benchmark on the x-axis & port(s) on the y-axis;
OLS regression line plotted through
IMPORTANT:
function takes input dataframe from self. Therefore in order to
run you need to have already run a backtest function which stores
self.xsrtns or self.rtns
INPUTS:
alpha: True(default)|False decides between xsrtns or rtns dfs
"""
# stack either the returns or excess returns
# rename columns as required
# Also remember to remove risk free column
if alpha:
y = self.xsrtns.iloc[:,1:].stack().reset_index()
ytitle='Alpha'
benchmark=False
else:
y = self.rtns.iloc[:,1:].stack().reset_index()
ytitle='Port Return'
benchmark=False
y.columns = ['Dates', 'Port', 'Returns'] # rename columns
# Repmat benchmark returns & Match columns
# This is so we can stack - so we can then concat
x = pd.concat([self.rtns['BMK']] * (len(self.xsrtns.columns)-1), axis=1)
x.columns = self.xsrtns.columns[1:] # [1:] excludes Rf
x = x.stack().reset_index()
x.columns = ['Dates', 'Port', 'Mkt']
# Merge things together so we have an x, y & colour column
z = pd.concat([x,y['Returns']], axis=1)
# plot scatter with OLS
fig = px.scatter(z, title=title,
x='Mkt', y='Returns', color='Port',
trendline="ols",
template='multi_strat')
fig.update_layout(
yaxis= {'anchor':'x1','title':ytitle, 'tickformat':'.1%', 'hoverformat':'.2%', },
xaxis= {'anchor':'y1','title':'Benchmark Return', 'tickformat':'.1%', 'hoverformat':'.2%', },)
if not benchmark:
fig.data[0]['visible'] = 'legendonly' # hide bmk
if source:
fig = self._px_addsource(fig, y=y_src)
return fig
def plot_hitrate(self, df, title='', binary=True,
source=False, y_src=-0.15):
""" Hitrate Heatmap
Plots Months x Years Heatmap, either as returns or binary outcome
INPUT:
df: pd.DataFrame with each columns a series of returns
binary: True(default)|False map the returns or switch to 1/0
depending on if the monthly return was positive or negative
"""
# Use crosstab to break pd.Series to pd.DataFrame with months x years
# Cols will be done alphabetically so we manually reorder dataframe
plots = pd.crosstab(df.index.year,
df.index.strftime("%b"),
df.values,
aggfunc='sum',
rownames=['years'],
colnames=['months'])
# Re0order because crosstab will spit out in alphabetical order
plots = plots.loc[:,['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']]
# Convert excess returns to hit/miss
if binary:
plots = plots.applymap(lambda x: 1 if x >= 0 else x)
plots = plots.applymap(lambda x: 0 if x <= 0 else x)
# Plot
fig = px.imshow(plots, x=plots.columns.to_list(), y=plots.index.to_list(),
title=title,
labels=dict(x='Month', y='Year', color='Hit-or-Miss'),
color_continuous_midpoint=0,
aspect='auto', template='multi_strat')
# Colourscale stuff
fig.update_traces(dict(colorscale='Tealrose', reversescale=True, showscale=False, coloraxis=None),)
if source:
fig = self._px_addsource(fig, y=y_src)
return fig
def plot_hitrates(self,
min_count=3,
show=False,
plotly2html=True, plotlyjs='cdn',
plot_height=450, plot_width=850):
""" Combined Function Charts & Annual Table
Charts are the Year x Month, binary, hit-or-miss heatmap and
Table is the annualised hit rate per year - in a styled dataframe
IMPORTANT:
requires bactest functions have to be run because needs
self.xsrtns & self.rtns dataframes to exist
uses self.plot_hitrates() function
INPUTS:
min_count: 3(default) is min. months req to get a per-year number
OUTPUT:
dict() with key 'annual' for styled df & others will be port name
i.e. {'annual':styled_df, 'PORT1':plotly_fig}
"""
plots = dict() # dummy dictionary for plotly & dataframe
df = pd.DataFrame() # dummy dataframe for table
# iterate through each portfolios alpha
# could be done as a matrix but complexity isn't worth the speed
# remember to remove risk-free column zero
for i, p in enumerate(self.xsrtns.iloc[:,1:]):
if i == 0:
continue
## PLOTLY
# Get the Year x Month Hitrate Plot
plots[p] = self.plot_hitrate(self.xsrtns[p],
title="Hit Rate Heatmap: {}".format(p),
binary=True,
source=False, y_src=-0.15)
## TABLE
# Calc the average annual across backtest things
# use crosstab again to break down by year and month
# then we can sum across the months
ix = self.xsrtns.loc[:, p]
ix = pd.crosstab(index=ix.index.year,
columns=ix.index.strftime("%b"),
values=ix.values,
aggfunc='sum',
rownames=['years'],
colnames=['months'])
# Map excess returns to hit or miss
ix = ix.applymap(lambda x: 1 if x >= 0 else x)
ix = ix.applymap(lambda x: -1 if x <= 0 else x)
# Hit rate by month & rename result series
ihr = ix[ix==1].sum(axis=1) / ix.count(axis=1)
# May not want to distort data if we don't have a minimum no obs
# by default we use 3-months
if min_count > 0:
ihr[~(ix.count(axis=1) >= min_count)] = np.nan
ihr.name = p
df = pd.concat([df, ihr.to_frame()], axis=1)
if show:
for k in plots:
plots[k].show()
if plotly2html:
for k, v in plots.items():
plots[k] = v.to_html(full_html=False,
include_plotlyjs=plotlyjs,
default_height=plot_height,
default_width=plot_width,
)
plots['annual'] = self.pretty_panda(df.reset_index())\
.format(formatter="{:.0f}", subset=pd.IndexSlice[:, df.columns[0]])\
.format(formatter="{:.1%}", subset=pd.IndexSlice[:, df.columns[0:]])\
.background_gradient('RdYlGn', vmin=0.2, vmax=0.8, subset=pd.IndexSlice[:, df.columns[0:]])\
.highlight_null(null_color='white')\
return plots
def plot_correl(self, cor=None, title='Correlation Matrix', aspect='auto',
colorscale='Tealrose', reversescale=False, **kwargs):
""" Plotly Heatmap with Overlay annotations
NB/ DIRECT RIP FROM BOOTSTRAP - need to consolidate these in Overplot
"""
# Pull correlation matrix from Bootsrap class
if cor is None:
cor = self.corr
## Basic plotly express imshow heatmap
fig = px.imshow(cor,
x=cor.index, y=cor.index,
labels={'color':'Correlation'},
title=title,
color_continuous_midpoint=0, # change for VCV
aspect=aspect,
**kwargs)
## Formatting
fig.update_layout(margin = {'l':25, 'r':50, 'b':0, 't':50},)
# format to 2dp by editing the z axis data
fig['data'][0]['z'] = np.round(fig['data'][0]['z'], 2)
# Heatmap colour - I rather like Tealrose
fig.update_traces(dict(colorscale=colorscale, reversescale=reversescale,
showscale=False, coloraxis=None),)
# By default plotly imshow doesn't give values, so we append
# Each value is a unique annotation
# iterate through columns & rows (which is a bit silly)
N = cor.shape[0]
for i in range(N):
for j in range(N):
fig.add_annotation(text="{:.2f}".format(cor.iloc[i,j]),
font={'color':'black', 'size':9},
xref='x',yref='y',x=i,y=j,ax=0,ay=0)
return fig
def plot_correl_animation(self, title="Rolling Correlation", subset=True, years=1):
""" Animated Correlation Matrix
Creates plotly animation with rolling period correlation matrix
INPUTS:
years: 1 as default; gets multiplied by self.freq for data range
DEVELOPMENT:
Having problems with the axis labels overlapping the title
"""
wide = self.rtns_wide # pull wide reutrns & xs return series
if subset:
wide = wide.iloc[:,:(len(self.rtns.columns[1:])*2-1)]
corr = wide.corr() # basic correlation matrix
n = self.freq * years # data rolling window
# Initial Correlation plot
fig = px.imshow(corr, x=corr.columns, y=corr.index,
title=title, aspect='auto',
color_continuous_midpoint=0, zmin=-1, zmax=1)
# Update Main Plot
fig.update_traces(dict(colorscale='Tealrose', reversescale=False,
showscale=True, coloraxis=None),)
fig.update_layout(margin = {'l':25, 'r':50, 'b':10, 't':75}, font_size=10)
fig.update_xaxes(side="top") # sort of struggling with overalapping
# Add play & pause buttons
fig["layout"]["updatemenus"] = [
{"buttons":[{"args":[None,{"frame": {"duration": 500, "redraw": True},
"fromcurrent": True,
"transition": {"duration": 300, "easing": "quadratic-in-out"}}],
"label": "Play",
"method": "animate"},
{"args":[[None],{"frame": {"duration": 0, "redraw": True},
"mode": "immediate",
"transition": {"duration": 0}}],
"label": "Stop",
"method": "animate"}],
"direction": "left", "pad": {"r": 0, "t": 20},
"showactive": False, "type": "buttons",
"x": 0.1, "xanchor": "right", "y": 0, "yanchor": "top"}]
### Animation Stuff
frames = []
sliders = {'yanchor': 'top',
'xanchor': 'left',
'currentvalue': {'prefix':'{}m Correlation: '.format(n),
'font':{'size': 10}, 'xanchor': 'left',},
'transition': {'duration': 10, 'easing': 'linear'},
'pad': {'b': 0, 't': 0}, 'len': 0.88, 'x': 0.12, 'y': 0,
'steps':[]}
for i, v in enumerate(wide.index[n:], n):
c = wide.iloc[i-n:i,:] # subset ith data
c = c.corr() # subset correlation matrix
label = '{:%m/%y}'.format(v) # string label - used to link slider and frame (data things)
# Add ith correlation matrix to frames list
frames.append({'name':i, 'layout':{},
'data': [dict(type='heatmap',
x=c.index, y=c.index,
z=c.values.tolist(),
zmin=-1, zmax=1)]})
# Add ith thing to sliders
sliders['steps'].append({'label':label, 'method': 'animate',
'args':[[i], {'frame':{'duration': 0, 'easing':'linear', 'redraw': True},
'transition':{'duration': 0, 'easing': 'linear'}}],})
# Append Frames & Sliders to
fig['frames'] = frames
fig['layout']['sliders'] = [sliders]
return fig
def plot_master(self, plotly2html=True, plotlyjs='cdn',
plot_height=450, plot_width=850):
""" Aggregation Function that runs ALL plots
These are saved in a big dictionary self.plots
INPUTS:
all related to if we want to save as HTML for output
this is required for markdown but less useful if used in an app
"""
plots = dict() # dummy dictionary to hold plots
# Total Return & Excess Return
plots['tr'] = self.plot_index(self.cum_rtn,
title='Cumulative Returns',
ytitle='Index Level',
risk_free=True,
source=True, y_src=-0.125)
plots['xsrtn'] = self.plot_index(self.cum_xs_rtn,
title='Excess Returns',
ytitle='Excess Returns',
benchmark=False,
source=True, y_src=-0.125)
# Return Distributions
plots['kde_rtns'] = self.plot_ridgeline(
self.rtns,
title='Ridgeline KDE Distributions: Returns',
source=True, y_src=-0.125)
plots['kde_alpha'] = self.plot_ridgeline(
self.xsrtns.iloc[:, 1:],
title='Ridgeline KDE Distributions: Excess Returns',
source=True, y_src=-0.125)
# Regression Charts
plots['regression_rtn'] = self.plot_regression(
alpha=False,
title='Return Regression: Port Returns',
source=True, y_src=-0.125)
plots['regression_alpha'] = self.plot_regression(
alpha=True,
title='Return Regression: Excess Returns',
source=True, y_src=-0.125)
plots['histogram'] = self.plot_histo(self.xsrtns,
title='Excess Return Distribution',
source=True, y_src=-0.125)
# Drawdown Charts
plots['drawdown'] = self.plot_index(self.drawdown,
title='Drawdown of Returns',
yfmt=['.1%', '.2%'], ytitle='Drawdown',
benchmark=True,
source=True, y_src=-0.125)
plots['xs_drawdown'] = self.plot_index(self.xs_drawdown,
title='Drawdown of Excess Returns',
yfmt=['.1%', '.2%'], ytitle='Drawdown',
benchmark=False,
source=True, y_src=-0.125)
# Rolling Plots
# Rolling Period Charts
plots['roll_rtn'] = self.plot_index(self.rolling[12]['rtn'],
title='Rolling Return: 12m',
yfmt=['.0%', '.2%'],
ytitle='Return',
height=350,
source=True, y_src=-0.15)
plots['roll_xsrtn'] = self.plot_index(self.rolling[12]['xsrtn'],
title='Rolling Excess Return: 12m',
yfmt=['.0%', '.2%'],
ytitle='Alpha',
benchmark=False, height=350,
source=True, y_src=-0.15)
plots['roll_vol'] = self.plot_index(self.rolling[12]['vol'],
title='Rolling Volatility: 12m',
yfmt=['.0%', '.2%'],
ytitle='Volatility',
height=350,
source=True, y_src=-0.15)
plots['roll_te'] = self.plot_index(
self.rolling[12]['te'],
title='Rolling ex-Post TE: 12m',
yfmt=['.1%', '.2%'], ytitle='Tracking Error',
benchmark=False, height=350,
source=True, y_src=-0.15)
plots['roll_sharpe'] = self.plot_index(
self.rolling[12]['sharpe'],
title='Sharpe Ratio: 12m',
yfmt=['.1f', '.2f'], ytitle='Sharpe Ratio',
benchmark=False, height=350,
source=True, y_src=-0.15)
plots['roll_rar'] = self.plot_index(
self.rolling[12]['xsrtn'] / self.rolling[12]['vol'],
title='Risk Adjusted Return: 12m',
yfmt=['.1f', '.2f'], ytitle='Information Ratio',
benchmark=False, height=350,
source=True, y_src=-0.15)
plots['roll_ir'] = self.plot_index(
self.rolling[12]['xsrtn'] / self.rolling[12]['te'],
title='Rolling Information Ratio: 12m',
yfmt=['.1f', '.2f'], ytitle='IR',
benchmark=False, height=350,
source=True, y_src=-0.15)
# Correlation
plots['correl_wide'] = self.plot_correl(self.corr)
plots['correl_animation'] = self.plot_correl_animation(subset=True, years=1)
# Convert to HTML
if plotly2html:
for k, v in plots.items():
plots[k] = v.to_html(full_html=False,
include_plotlyjs=plotlyjs,
default_height=plot_height,
default_width=plot_width,
)
# Hitrate Plots
# These come after we've already converted most to plotly
# that is because this is an embedded dictionary & we've already
# converted the plotly
plots['hitrate'] = self.plot_hitrates(show=False,
plotly2html=plotly2html,
plotlyjs=plotlyjs,
plot_height=300,
plot_width=750)
self.plots = plots
return plots
# %% REPORTING
def pretty_panda(self, df):
""" Styler for the Back-Test Summary Table
This is the basic styler which applies some default styling to a df.
This shit is tedious - look at the following links if confused
https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html
https://pbpython.com/styling-pandas.html
https://towardsdatascience.com/style-pandas-dataframe-like-a-master-6b02bf6468b0
"""
# When we reset_index() the index becomes the first column
# by default we style that so need the column name as an indexor
faux_index = df.columns[0]
## DataFrame Styler Default for Headers & Captions
# Sort "td" with .set_properties because it's easer to override
styles = [dict(selector="th",
props=[("font-family", "Garamond"),
('padding', "5px 5px"),
("font-size", "15px"),
("background-color", "black"),
("color", "white"),
("text-align", "center"),
('border', '1px solid black')]),
dict(selector="caption",
props=[("text-align", "right"),
("caption-side", "bottom"),
("font-size", "85%"),
("color", 'grey')]),]
df = df.style.hide_index()\
.set_table_styles(styles)\
.set_caption('Source: STANLIB Multi-Strategy')\
.set_table_attributes('style="border-collapse:collapse"')\
.set_precision(3)\
.highlight_null(null_color='white')\
.set_properties(**{"font-family": "Garamond",
"font-size": "14px",
"text-align": "center",
"border": "1px solid black",
"padding": "5px 5px",
"min-width": "70px"})\
.applymap(lambda x: 'color: white' if x== 0 else 'color: black')\
.set_properties(subset=[faux_index],
**{'font-weight':'bold',
'color':'white',
'background-color':'teal',
"text-align": "justify",
'min-width':'115px'})\
return df
def pretty_panda_summary(self):
""" Styler for the Back-Test Summary Table
This shit is tedious - look at the following links if confused
https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html
https://pbpython.com/styling-pandas.html
https://towardsdatascience.com/style-pandas-dataframe-like-a-master-6b02bf6468b0
"""
df = self.backtest_summary()
# duplicate the index in the dataframe
# we don't just hide because we want to show & reference the index
m = df.index
m.name = 'Metric'
x = pd.concat([m.to_frame(), df], axis=1).fillna(0)
x = self.pretty_panda(x)
## Generally set to 0.1%; few things as 0.02; zeros have white text
x = x.format(formatter="{:.1%}", subset=pd.IndexSlice[:, x.columns[1:]])\
.format(formatter="{:.2f}", subset=pd.IndexSlice[['RaR', 'Sharpe', 'Beta', 'IR'], x.columns[1:]])\
## Conditional Format Bits
# These Include the Benchmark
y = [['TR', 'Sharpe', 'RaR', 'Max_Drawdown'], x.columns[1:]]
x = x.highlight_max(color='lightseagreen', subset=pd.IndexSlice[y[0], y[1]], axis=1)
x = x.highlight_min(color='crimson', subset=pd.IndexSlice[y[0], y[1]], axis=1)
# These only make sense if there is more than one port being tested
if len(df.columns) > 2:
y = [['IR', 'Hitrate'], x.columns[2:]]
x = x.highlight_max(color='lightseagreen', subset=pd.IndexSlice[y[0], y[1]], axis=1)
x = x.highlight_min(color='crimson', subset=pd.IndexSlice[y[0], y[1]], axis=1)
return x
def pretty_panda_drawdown(self, alpha=True):
""" Styling for the Annualised Drawdown Tables """
# standard now - pick from dataframe based on if we want exccess rtns
# Sort by drawdown & pick only the last however many
x = self.xs_drawdown_table if alpha else self.drawdown_table
x = x.sort_values(by='drawdown', ascending=False).tail(10)
# useful for indexing - the formating in pandas can't take NaT
# so need to find the index of potential end dates that won't have ended
idxna = ~x['recovery'].isna()
# general stuff
x = self.pretty_panda(x.reset_index())
return x.format(dict(start='{:%b-%y}', trough='{:%b-%y}', drawdown='{:.1%}'))\
.format(formatter="{:%b-%y}", subset=pd.IndexSlice[x.index[idxna], ['end']])\
.background_gradient('RdYlGn', subset='drawdown')
def pretty_panda_annual(self, key='rtn'):
""" Styling for Tables of Annualised Metrics
These are calc'd by self.per_annum() which is itself in self.run_backtest()
Tables are stored in a dict() called self.summary_pa
Each table needs stubtly different styling
INPUT:
key: refers to the key from self.summary_pa
"""
pa = self.summary_pa
# subtle differences depending on if we want Rb or not
# also subtle difference in if is .2% of .2f
if key in ['rtn']:
x = self.pretty_panda(pa[key].dropna().iloc[:,1:].reset_index())
x = x.format(formatter="{:.1%}", subset=pd.IndexSlice[:, x.columns[1:]])
x = x.background_gradient('RdYlGn', subset=pd.IndexSlice[:, x.columns[1:]],)
elif key in ['vol']:
x = self.pretty_panda(pa[key].dropna().iloc[:,1:].reset_index())
x = x.format(formatter="{:.1%}", subset=pd.IndexSlice[:, x.columns[1:]])
x = x.background_gradient('RdYlGn_r', subset=pd.IndexSlice[:, x.columns[1:]],)
elif key in ['sharpe']:
x = self.pretty_panda(pa[key].dropna().iloc[:,1:].reset_index())
x = x.format(formatter="{:.2f}", subset=pd.IndexSlice[:, x.columns[1:]])
x = x.background_gradient('RdYlGn', vmin=-2, vmax=+3, subset=pd.IndexSlice[:, x.columns[1:]],)
elif key in ['alpha']:
x = self.pretty_panda(pa[key].dropna().iloc[:,2:].reset_index())
x = x.format(formatter="{:.1%}", subset=pd.IndexSlice[:, x.columns[1:]])
x = x.background_gradient('RdYlGn', vmin=-0.05, vmax=+0.05, subset=pd.IndexSlice[:, x.columns[1:]],)
elif key in ['te']:
x = self.pretty_panda(pa[key].dropna().iloc[:,2:].reset_index())
x = x.format(formatter="{:.1%}", subset=pd.IndexSlice[:, x.columns[1:]])
x = x.background_gradient('RdYlGn_r', vmin=0, vmax=0.06, subset=pd.IndexSlice[:, x.columns[1:]],)
elif key in ['ir']:
x = self.pretty_panda(pa[key].dropna().iloc[:,3:].reset_index())
x = x.format(formatter="{:.2f}", subset=pd.IndexSlice[:, x.columns[1:]])
return x
def markdown_doc(self, title="TEST"):
""" Master Markdown file for full backtest report """
md = [] # dummy list container - convert to strings later
# Title
md.append("# STANLIB Multi-Strategy Backtest")
md.append("### Report: {}".format(title))
md.append("Returns based backtest comparing portfolio(s) against the \
{} benchmark; risk-free return are proxied by the {} index. \
Data contains {} monthly observations running from \
{:%b-%y} to {:%b-%y}. \
\n \n".format(self.Rb, self.Rf,
len(self.rtns.index),
self.rtns.index[0],
self.rtns.index[-1],))
md.append("## Summary")
md.append(self.pretty_panda_summary().render())
md.append("Annualised 'risk-free' return of the {} index over the \
period was {:.2%}. \n \n".format(self.Rf, self.Rf_obs_rtn))
## Risk & Return
md.append("## Portfolio Returns")
md.append(self.plots['tr'])
md.append(self.plots['xsrtn'])
md.append(self.plots['roll_rtn'])
md.append(self.pretty_panda_annual('rtn').render())
md.append("\n \n ")
md.append(self.plots['roll_xsrtn'])
md.append(self.pretty_panda_annual('alpha').render())
## Portfolio Risk & Drawdown
md.append("## Portfolio Risk & Drawdowns")
md.append(self.plots['roll_vol'])
md.append(self.pretty_panda_annual('vol').render())
md.append("\n \n ")
md.append(self.plots['roll_te'])
md.append(self.pretty_panda_annual('te').render())
md.append("\n \n ")
md.append(self.plots['xs_drawdown'])
md.append(self.pretty_panda_drawdown(alpha=True).render())
md.append(self.plots['drawdown'])
md.append(self.pretty_panda_drawdown(alpha=False).render())
## Rolling Risk Adjusted Measures
md.append("## Risk Adjusted Returns - Rolling")
md.append(self.plots['roll_sharpe'])
md.append(self.pretty_panda_annual('sharpe').render())
md.append("\n \n ")
md.append(self.plots['roll_ir'])
md.append(self.plots['roll_rar'])
## Regression & Return Distributions
md.append("## Return Distribution")
md.append(self.plots['kde_rtns'])
md.append(self.plots['kde_alpha'])
md.append(self.plots['histogram'])
md.append("Visualising return or alpha regressions adds colour to CAPM Beta. \
Steeper regression lines indicate higher Beta whilst R<sup>2</sup> gives \
an impression of the correlation; look for non-linearity \
that may be missed in headline metrics.")
md.append(self.plots['regression_rtn'])
md.append(self.plots['regression_alpha'])
# Hitrate
md.append("## Hit Rate Analysis")
md.append("Here we aren't interested in the quantum of return, \
simply the binary outcome per month. Heatmaps will show \
month-by-month experience as either +1 or 0. \
For annualised analysis we look at the percentage monthly hit-rate \
over a calendar year; subject to a minimum of 3-observations. \n \n")
md.append(self.plots['hitrate']['annual'].render())
for p in self.plots['hitrate']:
if p == 'annual':
continue
md.append(self.plots['hitrate'][p])
# Correlation Analysis
md.append("## Correlation Review")
md.append("We present the correlation matrix for the full sample period, \
showing both the Portfolio returns and the Alpha stream. \
Additionally we include a series of strategic asset classes \
relevant for multi-asset portfolios. \n ")
md.append(self.plots['correl_wide'])
md.append(self.plots['correl_animation'])
md.append("\n \n")
return "\n \n".join(md)
# %% TEST CODE
# import xlwings as xlw
# wb = xlw.Book('BACKTEST.xlsm')
# # index data from timeseries sheet
# benchmarks = wb.sheets['TIMESERIES'].range('D1').options(pd.DataFrame, expand='table').value.iloc[3:,:]
# benchmarks.index = pd.to_datetime(benchmarks.index)
# E = wb.sheets['Enhanced'].range('A1').options(pd.DataFrame, expand='table').value.iloc[:,1]
# C = wb.sheets['Core'].range('A1').options(pd.DataFrame, expand='table').value.iloc[:,1]
# E.index = E.index + pd.offsets.MonthEnd(0)
# C.index = C.index + pd.offsets.MonthEnd(0)
# E.name = 'Enhanced'
# C.name = 'Core'
# rtns = pd.concat([E, C], axis=1).dropna()
# x = 0.3
# rtns['E30'] = rtns['Enhanced'] * x + rtns['Core'] * (1 - x)
# bt = BacktestAnalytics(rtns, benchmarks, bmks_as_rtns=False, benchmark='SWIX', Rf='STEFI')
# md = bt.big_bang(title="TEST")
# from topgun.reporting import Reporting
# Reporting().md2html(md=md, title='test')
#print(df)
#x = bt.rolling | StarcoderdataPython |
1627897 | <filename>src/api2db/ingest/api2pandas.py
# -*- coding: utf-8 -*-
"""
Contains the Api2Pandas class
=============================
"""
from ..app.log import get_logger
from .api_form import ApiForm
import pandas as pd
import os
from typing import Union, Callable
class Api2Pandas(object):
"""Used to extract incoming data from an API into a pandas DataFrame"""
def __init__(self, api_form: Callable[[], ApiForm]):
"""
Creates a Api2Pandas object and loads its ApiForm
Args:
api_form: The function that generates the ApiForm for the associated collector
"""
self.api_form = api_form()
def dependencies_satisfied(self) -> bool:
"""
Checks to ensure any data-linking dependency files exist
This feature currently only exists for :py:class:`api2db.ingest.post_process.merge_static.MergeStatic`
Returns:
True if all dependencies are satisfied, otherwise False
"""
logger = get_logger()
res = True
for pre in self.api_form.pre_process:
if pre.ctype in []:
if not os.path.isfile(pre.path):
logger.warning(f"Missing PreProcess Dependency File: {pre.path}")
res = False
for post in self.api_form.post_process:
if post.ctype in ["merge_static"]:
if not os.path.isfile(post.path):
logger.warning(f"Missing PostProcess Dependency File: {post.path}")
res = False
return res
def extract(self, data: dict) -> Union[pd.DataFrame, None]:
"""
Performs data-extraction from data arriving from an API.
Workflow:
1. Perform all pre-processing on data
2. Perform all data-feature extraction
3. Perform all post-processing on data
4. Return a DataFrame containing the cleaned data.
Args:
data: The data arriving from an API to perform data extraction on.
Returns:
The cleaned data if it is possible to clean the data otherwise None
"""
# Global extraction dictionary
pre_2_post = {}
# For each pre-processor
for pre in self.api_form.pre_process:
# If the pre-processor is a global extraction, add the feature extracted to the global extraction dictionary
if pre.ctype == "global_extract":
pre_2_post[pre.key] = pre(lam_arg=data)
else:
# Perform the pre-processor and replace the existing data with the new data
data = pre(lam_arg=data)
if data is None:
return data
rows = []
# For each row in the data
for data_point in data:
row = {}
# Extract all the features from the row
for feat in self.api_form.data_features:
row[feat.key] = feat(data_point)
rows.append(row)
# Create the DataFrame from the rows
df = pd.DataFrame(rows)
# Cast the DataFrame to the correct dtypes
df = df.astype(self.api_form.pandas_typecast())
# Add all globally extracted data to the DataFrame
for k, v in pre_2_post.items():
df[k] = v["value"]
df[k] = df[k].astype(self.api_form.typecast(v["dtype"]))
# For each post-processor
for post in self.api_form.post_process:
if post.ctype == "futures": # FUTURES MAY REQUIRE DIFFERENT OPERATIONS
pass
else:
# Perform the post-processing operation on the DataFrame
df = post(df)
# Get rid of the data index
df = df.reset_index(drop=True)
# Return the clean Data Hooray!
return df
| StarcoderdataPython |
3568276 | IP_INFO = {
'facebook': {
'range': ('192.168.3.11', '192.168.127.12'),
},
'google': {
'range': ('192.168.127.12', '192.168.3.11')
},
'youtube': {
'range': ('192.168.127.12', '172.16.58.3')
},
'amazon': {
'range': ('172.16.17.32', '172.16.58.3')
},
'github': {
'range': ('172.16.58.3', '172.16.31.10')
}
}
def convert_ipv4(ip):
return tuple(int(n) for n in ip.split('.'))
def ipv4_in(addr, ip_range):
start = ip_range[0]
end = ip_range[1]
return convert_ipv4(start) < convert_ipv4(addr) < convert_ipv4(end)
def extract_tag(ip_address):
for tag, info in IP_INFO.items():
ip_range = info.get('range')
single_ip = info.get('single')
if single_ip and (single_ip == ip_address):
return tag
if ip_range and ipv4_in(ip_address, ip_range):
return tag
| StarcoderdataPython |
5141133 | <reponame>trhongbinwang/data_science_journey
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
########## enviroment setup ################
import os
import sys
# set enviroment and path to run pyspark
spark_home = os.environ.get('SPARK_HOME', None)
print(spark_home)
if not spark_home:
raise ValueError('SPARK_HOME environment variable is not set')
sys.path.insert(0, os.path.join(spark_home, 'python'))
sys.path.insert(0, os.path.join(spark_home, 'python/lib/py4j-0.10.4-src.zip')) ## may need to adjust on your system depending on which Spark version you're using and where you installed it.
##############################
from pyspark.sql import SparkSession
# $example on$
from pyspark.ml.feature import Binarizer
# $example off$
def create_data(spark):
# $example on$
continuousDataFrame = spark.createDataFrame([
(0, 0.1),
(1, 0.8),
(2, 0.2)
], ["id", "feature"])
return continuousDataFrame
def pre_processing(continuousDataFrame):
binarizer = Binarizer(threshold=0.5, inputCol="feature", outputCol="binarized_feature")
binarizedDataFrame = binarizer.transform(continuousDataFrame)
print("Binarizer output with Threshold = %f" % binarizer.getThreshold())
binarizedDataFrame.show()
if __name__ == "__main__":
# initialize spark session
spark = SparkSession\
.builder\
.appName("BinarizerExample")\
.getOrCreate()
# create data
continuousDataFrame = create_data(spark)
# binarizer
pre_processing(continuousDataFrame)
# stop
spark.stop()
| StarcoderdataPython |
13954 | import sys
import PyQt5.QtWidgets as qtw
import PyQt5.QtCore as qtc
from Image import Image
from main_layout import Ui_MainWindow
import logging
import os
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(levelname)s:%(name)s:%(asctime)s - %(message)s')
file_handler = logging.FileHandler('log')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
class MainWindow(qtw.QMainWindow):
def __init__(self):
super().__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.show()
self.images = {
'1': {
'original': self.ui.image_1_original,
'filtered': self.ui.image_1_after_filter,
'picker': self.ui.image_1_pick
},
'2': {
'original': self.ui.image_2_original,
'filtered': self.ui.image_2_after_filter,
'picker': self.ui.image_2_pick
}
}
self.img = {}
self.modes = {'Output 1': '', 'Output 2': ''}
self.output_channels = {
'Output 1': self.ui.output_1,
'Output 2': self.ui.output_2
}
self.output_channels_controlers = {
'': {
'select1': '',
'select2': '',
'slider1': 0,
'slider2': 0,
'type1': '',
'type2': '',
'percentage1': 0,
'percentage2': 0,
},
'Output 1': {
'select1': '',
'select2': '',
'slider1': 0,
'slider2': 0,
'type1': '',
'type2': '',
'percentage1': 0,
'percentage2': 0,
},
'Output 2': {
'select1': '',
'select2': '',
'slider1': 0,
'slider2': 0,
'type1': '',
'type2': '',
'percentage1': 0,
'percentage2': 0,
},
}
self.output_complementary = {
'': ['', 'Magnitude', 'Phase', 'Real', 'Imaginary', 'Uniform Magnitude', 'Uniform Phase'],
'Magnitude': ['Phase', 'Uniform Phase'],
'Phase': ['Magnitude', 'Uniform Magnitude'],
'Real': ['Imaginary'],
'Imaginary': ['Real'],
'Uniform Magnitude': ['Phase', 'Uniform Phase'],
'Uniform Phase': ['Magnitude', 'Uniform Magnitude'],
}
self.available_images = {
'': ''
}
self.enables = {
'': [self.ui.component_1_select, self.ui.component_2_select, self.ui.component_1_percentage,
self.ui.component_1_slider, self.ui.component_1_type,
self.ui.component_2_percentage, self.ui.component_2_slider, self.ui.component_2_type],
'output-select': [self.ui.component_1_select, self.ui.component_2_select],
'select1': [self.ui.component_1_percentage, self.ui.component_1_type],
'select2': [self.ui.component_2_percentage, self.ui.component_2_type],
'type1': [self.ui.component_1_slider],
'type2': [self.ui.component_2_slider]
}
self.current_output_channel = None
self.ui.action_new.triggered.connect(self.new_instance)
self.ui.action_exit.triggered.connect(self.close)
self.ui.action_open_image_1.triggered.connect(lambda: self.open_image(self.images['1'], 1))
self.ui.action_open_image_2.triggered.connect(lambda: self.open_image(self.images['2'], 2))
self.ui.image_1_pick.currentIndexChanged.connect(lambda: self.display_component(self.img['Image 1']))
self.ui.image_2_pick.currentIndexChanged.connect(lambda: self.display_component(self.img['Image 2']))
self.ui.output_select.currentIndexChanged.connect(lambda: self.pick_mixer_output())
self.ui.component_1_select.currentIndexChanged.connect(lambda: self.select_enable('select1', self.ui.component_1_select.currentText()))
self.ui.component_2_select.currentIndexChanged.connect(lambda: self.select_enable('select2', self.ui.component_2_select.currentText()))
self.ui.component_1_slider.sliderReleased.connect(lambda: self.mixer('slider1', str(self.ui.component_1_slider.value())))
self.ui.component_2_slider.sliderReleased.connect(lambda: self.mixer('slider2', str(self.ui.component_2_slider.value())))
self.ui.component_1_percentage.valueChanged.connect(lambda: self.change_image('percentage1', str(self.ui.component_1_percentage.value())))
self.ui.component_2_percentage.valueChanged.connect(lambda: self.change_image('percentage2', str(self.ui.component_2_percentage.value())))
self.ui.component_1_type.currentIndexChanged.connect(lambda: self.component_1_conplementary())
self.ui.component_1_type.currentIndexChanged.connect(lambda: self.select_enable('type1', str(self.ui.component_1_type.currentText())))
self.ui.component_2_type.currentIndexChanged.connect(lambda: self.select_enable('type2', str(self.ui.component_2_type.currentText())))
def new_instance(self) -> None:
self.child_window = MainWindow()
self.child_window.show()
def open_image(self, imageWidget: dict, channel: int) -> None:
image = Image()
if not image.path:
return
if len(self.img) == 1:
if f'Image {2//channel}' in self.img:
if not image.compare(self.img[f'Image {2//channel}']['image']):
qtw.QMessageBox.warning(self, 'failed', 'The Two Images Must be of the same size')
return
else :
self.img[f'Image {channel}'] = {'image': image, 'widgets': imageWidget}
if f'Image {channel}' not in self.available_images:
self.available_images[f'Image {channel}'] = f'Image {channel}'
self.append_outputs(isOneChanneled=False)
else :
self.img[f'Image {channel}'] = {'image': image, 'widgets': imageWidget}
elif len(self.img) >= 2:
if not image.compare(self.img[f'Image {2//channel}']['image']):
qtw.QMessageBox.warning(self, 'failed', 'The Two Images Must be of the same size')
return
self.img[f'Image {channel}']["image"] = image
self.img[f'Image {channel}']["widgets"] = imageWidget
else :
self.img[f'Image {channel}'] = {'image': image, 'widgets': imageWidget}
if f'Image {channel}' not in self.available_images:
self.available_images[f'Image {channel}'] = f'Image {channel}'
self.append_outputs(channel=self.available_images[f'Image {channel}'])
imageWidget['original'].setPixmap(image.get_pixmap().scaled(300,300, aspectRatioMode=qtc.Qt.KeepAspectRatio, transformMode=qtc.Qt.SmoothTransformation))
imageWidget['picker'].setDisabled(False)
self.ui.output_select.setDisabled(False)
def append_outputs(self, isOneChanneled: bool=True, channel: str='') -> None:
if isOneChanneled:
self.ui.component_1_select.addItem('')
self.ui.component_2_select.addItem('')
self.ui.component_1_select.setItemText(0, '')
self.ui.component_1_select.setItemText(1, channel)
self.ui.component_2_select.setItemText(0, '')
self.ui.component_2_select.setItemText(1, channel)
else:
self.ui.component_1_select.addItem('')
self.ui.component_2_select.addItem('')
self.ui.component_1_select.setItemText(0, '')
self.ui.component_1_select.setItemText(1, 'Image 1')
self.ui.component_1_select.setItemText(2, 'Image 2')
self.ui.component_2_select.setItemText(0, '')
self.ui.component_2_select.setItemText(1, 'Image 1')
self.ui.component_2_select.setItemText(2, 'Image 2')
def display_component(self, imageWidget: dict) -> None:
component = imageWidget['widgets']['picker'].currentText()
imageWidget['widgets']['filtered'].setPixmap(imageWidget['image'].get_component_pixmap(component).scaled(300,300, aspectRatioMode=qtc.Qt.KeepAspectRatio, transformMode=qtc.Qt.SmoothTransformation))
try:
os.remove('test.png')
except:
pass
def pick_mixer_output(self) -> None:
self.current_output_channel = self.ui.output_select.currentText()
self.ui.component_1_slider.setValue(int(self.output_channels_controlers[self.ui.output_select.currentText()]['slider1']))
self.ui.component_1_percentage.setValue(int(self.output_channels_controlers[self.ui.output_select.currentText()]['percentage1']))
self.ui.component_1_select.setCurrentText(self.output_channels_controlers[self.ui.output_select.currentText()]['select1'])
self.ui.component_1_type.setCurrentText(self.output_channels_controlers[self.ui.output_select.currentText()]['type1'])
self.ui.component_2_slider.setValue(int(self.output_channels_controlers[self.ui.output_select.currentText()]['slider2']))
self.ui.component_2_percentage.setValue(int(self.output_channels_controlers[self.ui.output_select.currentText()]['percentage2']))
self.ui.component_2_select.setCurrentText(self.output_channels_controlers[self.ui.output_select.currentText()]['select2'])
self.ui.component_2_type.setCurrentText(self.output_channels_controlers[self.ui.output_select.currentText()]['type2'])
if self.ui.output_select.currentText() != '':
self.set_mixer_components_disabled(self.enables['output-select'] ,False)
else:
self.set_mixer_components_disabled(self.enables['output-select'], True)
def set_mixer_components_disabled(self, components: list, logic: bool) -> None:
for component in components:
component.setDisabled(logic)
def select_enable(self, component: str, value: str):
self.change_image(component, value)
if value != '':
self.set_mixer_components_disabled(self.enables[component], False)
else:
self.set_mixer_components_disabled(self.enables[component], True)
def change_image(self, component: str, value: str) -> None:
self.output_channels_controlers[self.current_output_channel][component] = value
def component_1_conplementary(self):
self.ui.component_2_type.clear()
self.ui.component_2_type.addItems(self.output_complementary[self.ui.component_1_type.currentText()])
self.ui.component_2_type.update()
self.change_image('type1', self.ui.component_1_type.currentText())
def mixer(self, slider: str, sliderValue: str) -> None:
self.change_image(slider, sliderValue)
channel_1_ratio = float(self.output_channels_controlers[self.current_output_channel]['slider1']) / 100
channel_2_ratio = float(self.output_channels_controlers[self.current_output_channel]['slider2']) / 100
image_1 = self.output_channels_controlers[self.current_output_channel]['select1']
image_2 = self.output_channels_controlers[self.current_output_channel]['select2']
type1 = self.output_channels_controlers[self.current_output_channel]['type1']
type2 = self.output_channels_controlers[self.current_output_channel]['type2']
if image_1 == "" or image_2 == "" or type1 == "" or type2 == "":
return
try:
if (type1 in ['Magnitude', 'Phase', 'Uniform Magnitude', 'Uniform Phase']
and type2 in ['Magnitude', 'Phase', 'Uniform Magnitude', 'Uniform Phase']):
self.modes[self.current_output_channel] = 'mag-phase'
elif (type1 in ['Real', 'Imaginary']and type2 in ['Real', 'Imaginary']):
self.modes[self.current_output_channel] = 'real-imag'
else:
print('Error')
return
self.outImage = self.img[image_1]['image'].mix(self.img[image_2]['image'], self.output_channels_controlers[self.current_output_channel]['type1'], self.output_channels_controlers[self.current_output_channel]['type2'], channel_1_ratio, channel_2_ratio, self.modes[self.current_output_channel])
self.output_channels[self.current_output_channel].setPixmap(self.outImage.scaled(300,300, aspectRatioMode=qtc.Qt.KeepAspectRatio, transformMode=qtc.Qt.SmoothTransformation))
except:
pass
try:
os.remove('test.png')
except:
pass
def main_window():
app = qtw.QApplication(sys.argv)
app.setStyle("Fusion")
window = MainWindow()
sys.exit(app.exec_())
if __name__ == '__main__':
main_window() | StarcoderdataPython |
374427 | <reponame>dunitian/BaseCode<gh_stars>10-100
from multiprocessing.dummy import Pool as ThreadPool, Condition
s_list = []
con = Condition()
def Shop(i):
global con
global s_list
# 加锁保护共享资源
for x in range(5):
with con:
s_list.append(x)
print(f"[生产者{i}]生产商品{x}")
con.notify_all() # 通知消费者有货了
def User(i):
global con
global s_list
while True:
with con:
if s_list:
print(f"列表商品:{s_list}")
name = s_list.pop() # 消费商品
print(f"[消费者{i}]消费商品{name}")
print(f"列表剩余:{s_list}")
else:
con.wait()
def main():
p = ThreadPool()
# 两个生产者
p.map_async(Shop, range(2))
# 五个消费者
p.map_async(User, range(5))
p.close()
p.join()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1919114 | # %load ../imports.py
%matplotlib inline
%load_ext autoreload
%autoreload 2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import seaborn as sns
width=20
height=3
plt.rcParams["figure.figsize"] = (width,height)
sns.set(rc={'figure.figsize':(width,height)})
#import seaborn as sns
import os
from collections import OrderedDict
from IPython.display import display
pd.options.display.max_rows = 999
pd.options.display.max_columns = 999
pd.set_option("display.max_columns", None)
import folium
import plotly.express as px
import plotly.graph_objects as go
import sys
import os
from sklearn.metrics import r2_score
import scipy.integrate
import seaborn as sns
import pyarrow as pa
import pyarrow.parquet as pq
import dask.dataframe
import statsmodels.api as sm
#sys.path.append('../')
from src.visualization import visualize
import scipy.integrate
try:
import trip_statistics
except:
import src.models.pipelines.longterm.scripts.prepdata.trip_statistics
sys.path.insert(0, src.models.pipelines.longterm.scripts.prepdata.trip_statistics.path)
import trip_statistics
try:
import trip_id,prepare_dataset,trips
except:
import src.models.pipelines.longterm.scripts.prepdata.trip
sys.path.insert(0, src.models.pipelines.longterm.scripts.prepdata.trip.path)
import trip_id,prepare_dataset,trips
try:
import clean_statistics
except:
import src.models.pipelines.longterm.scripts.prepdata.clean_statistics
sys.path.insert(0, src.models.pipelines.longterm.scripts.prepdata.clean_statistics.path)
import clean_statistics | StarcoderdataPython |
6699782 | <filename>setup.py<gh_stars>0
from setuptools import setup, find_namespace_packages
from setuptools_rust import Binding, RustExtension, Strip
setup(
name='fluvio',
version="0.10.0",
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author = "<NAME>",
description='Python client library for Fluvio',
python_requires='>=3.6',
url='https://www.fluvio.io/',
keywords=['fluvio', 'streaming', 'stream'],
license='APACHE',
author_email = "<EMAIL>",
setup_requires=['wheel'],
project_urls={ # Optional
'Bug Reports': 'https://github.com/infinyon/fluvio-client-python/issues',
'Source': 'https://github.com/infinyon/fluvio-client-python',
},
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[ # Optional
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate you support Python 3. These classifiers are *not*
# checked by 'pip install'. See instead 'python_requires' below.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3 :: Only',
],
rust_extensions=[RustExtension("fluvio._fluvio_python", path="Cargo.toml", binding=Binding.RustCPython, debug=False)],
packages=["fluvio"],
# rust extensions are not zip safe, just like C-extensions.
zip_safe=False,
)
| StarcoderdataPython |
8151844 | <filename>y2020/d24.py
from aoc_input import get_input
import aoc_helpers as ah
import re
DAY = 24
YEAR = 2020
RE_STEPS = re.compile("e|w|nw|ne|sw|se")
STEPS = {
"e": (( 1, 1), 0),
"w": ((-1, -1), 0),
"nw": ((-1, 0), -1),
"ne": (( 0, 1), -1),
"sw": ((-1, 0), 1),
"se": (( 0, 1), 1),
}
def sol0(pzin):
black_tiles = set()
for line in pzin:
x = y = 0
for move in RE_STEPS.findall(line):
x += STEPS[move][0][y & 1]
y += STEPS[move][1]
coords = (x, y)
if coords in black_tiles:
black_tiles.remove(coords)
else:
black_tiles.add(coords)
return (len(black_tiles), black_tiles)
def get_black_neighbors(tile_config, coords):
x, y = coords
return sum(
(x + step[0][y & 1], y + step[1]) in tile_config
for step in STEPS.values()
)
def sol1(pzin):
tile_config = sol0(pzin)[1]
for _ in range(100):
border_estimate = (
(min(x for x, y in tile_config) - 1, min(y for x, y in tile_config) - 1),
(max(x for x, y in tile_config) + 1, max(y for x, y in tile_config) + 1),
)
next_config = set()
for y in range(border_estimate[0][1], border_estimate[1][1] + 1):
for x in range(border_estimate[0][0], border_estimate[1][0] + 1):
coords = (x, y)
bn = get_black_neighbors(tile_config, coords)
if (
((coords in tile_config) and (1 <= bn <= 2)) or
((coords not in tile_config) and bn == 2)
):
next_config.add(coords)
tile_config = next_config
return len(tile_config)
def main():
puzzle_in = get_input(YEAR, DAY).strip().split("\n")
for i, f in enumerate((sol0, sol1)):
res = f(puzzle_in)
res = res[0] if i == 0 else res
if res is None:
continue
print(
f"===[AoC 2020, {DAY}.{i} result:]===\n{res}\n"
f"{'='*len(str(DAY))}============================"
)
| StarcoderdataPython |
4868311 | import pickle
from typing import Dict, Any, Optional, Type
from ..data import GraphDataset
from .task_utils import task_name_to_dataset_class
# This should rightfully live in model_utils.py, but that would lead to a circular import...
def get_model_file_path(model_path: str, target_suffix: str):
assert target_suffix in ("hdf5", "pkl")
if model_path.endswith(".hdf5"):
return model_path[:-4] + target_suffix
elif model_path.endswith(".pkl"):
return model_path[:-3] + target_suffix
else:
raise ValueError(
f"Model path has to end in hdf5/pkl, which is not the case for {model_path}!"
)
def load_dataset_for_prediction(trained_model_file: str):
"""
基于训练模型的位置加载训练模型用于预测
"""
with open(get_model_file_path(trained_model_file, "pkl"), "rb") as in_file:
data_to_load = pickle.load(in_file)
dataset_class: Type[GraphDataset] = data_to_load["dataset_class"]
return dataset_class(
params=data_to_load.get("dataset_params", {}),
metadata=data_to_load.get("dataset_metadata", {}),
)
def get_dataset(
task_name: Optional[str],
dataset_cls: Optional[Type[GraphDataset]],
dataset_model_optimised_default_hyperparameters: Dict[str, Any],
loaded_data_hyperparameters: Dict[str, Any],
cli_data_hyperparameter_overrides: Dict[str, Any],
loaded_metadata: Dict[str, Any],
) -> GraphDataset:
"""
Args:
task_name:
dataset_cls:
dataset_model_optimised_default_hyperparameters:
loaded_data_hyperparameters:
cli_data_hyperparameter_overrides:
loaded_metadata:
根据参数生成 GraphDataset
"""
if not dataset_cls:
(
dataset_cls,
dataset_default_hyperparameter_overrides,
) = task_name_to_dataset_class(task_name)
dataset_params = dataset_cls.get_default_hyperparameters()
print(f" Dataset default parameters: {dataset_params}")
dataset_params.update(dataset_default_hyperparameter_overrides)
if len(dataset_default_hyperparameter_overrides):
print(
f" Dataset parameters overridden by task defaults: {dataset_default_hyperparameter_overrides}"
)
dataset_params.update(dataset_model_optimised_default_hyperparameters)
if len(dataset_default_hyperparameter_overrides):
print(
f" Dataset parameters overridden by task/model defaults: {dataset_model_optimised_default_hyperparameters}"
)
else:
dataset_params = loaded_data_hyperparameters
dataset_params.update(cli_data_hyperparameter_overrides)
if len(cli_data_hyperparameter_overrides):
print(
f" Dataset parameters overridden from CLI: {cli_data_hyperparameter_overrides}"
)
if len(loaded_metadata):
print(" WARNING: Dataset metadata loaded from disk, not calculated from data.")
return dataset_cls(dataset_params, loaded_metadata)
| StarcoderdataPython |
1843618 | import sys
if sys.version_info[0] < 3:
raise Exception('Must be running Python version 3 or up') | StarcoderdataPython |
8046192 | import numpy as np
import math
from scipy import signal
# Return a box filter of size n by n
# Requires n to be an odd integer
def boxfilter(n):
assert(n % 2 == 1), "Dimension must be odd"
# Create n x n array, every element = n^-2
return np.full((n, n), pow(n, -2))
# Return a 1D Gaussian filter
# Fix length of filter array to (6 * sigma) rounded up to next odd integer
def gauss1d(sigma):
# Max distance from center of the array
# = floor(((6 * sigma) rounded up to next odd integer)/2)
# = (6 * sigma) rounded up to next integer
maxDistFromCenter = int(np.ceil(6 * sigma)) / 2
distsFromCenter = np.arange(-maxDistFromCenter, maxDistFromCenter + 1)
# unnormalizedFilter[i] = exp(-x^2 / (2*sigma^2)), where x = distance of i from array center
unnormalizedFilter = np.exp(-pow(distsFromCenter, 2) / (2.0 * pow(sigma, 2)))
return normalize(unnormalizedFilter)
# Return a 2D Gaussian filter
# Fix side lengths to (6 * sigma) rounded up to next odd integer
def gauss2d(sigma):
gauss1dArray = gauss1d(sigma)
# The 2D Gaussian filter is the convolution of the 1D Gaussian and its transpose
reshapedTo2D = gauss1dArray.reshape(1,gauss1dArray.size) # Reshape to allow us to transpose
unnormalizedFilter = signal.convolve2d(reshapedTo2D, reshapedTo2D.transpose())
return normalize(unnormalizedFilter)
# Return the result of applying a 2D Gaussian filter convolution to an input array
def gaussconvolve2d(array, sigma):
gaussFilter = gauss2d(sigma)
return signal.convolve2d(array, gaussFilter, 'same')
# Normalize an array so that its elements sum to approximately 1.00
def normalize(numpyArray):
return numpyArray / numpyArray.sum()
| StarcoderdataPython |
12854305 | <filename>problem0650.py
###########################
#
# #650 Divisors of Binomial Product - Project Euler
# https://projecteuler.net/problem=650
#
# Code by <NAME>
#
###########################
| StarcoderdataPython |
1938162 |
import sys
from pylinac.version import __version__, __version_info__
# check python version
if sys.version_info[0] < 3 or sys.version_info[1] < 6:
raise ValueError("Pylinac is only supported on Python 3.6+. Please update your environment.")
# import shortcuts
from pylinac.ct import CatPhan504, CatPhan600, CatPhan503, CatPhan604
from pylinac.core import decorators, geometry, image, io, mask, profile, roi, utilities
from pylinac.core.utilities import clear_data_files, assign2machine
from pylinac.field_analysis import FieldAnalysis, DeviceFieldAnalysis, Protocol, Device, Edge, \
Interpolation, Normalization, Centering
from pylinac.planar_imaging import LeedsTOR, StandardImagingQC3, LasVegas, DoselabMC2kV, DoselabMC2MV, \
StandardImagingQCkV, PTWEPIDQC, SNCMV, SNCkV, StandardImagingFC2
from pylinac.log_analyzer import load_log, Dynalog, TrajectoryLog, MachineLogs
from pylinac.picketfence import PicketFence # must be after log analyzer
from pylinac.starshot import Starshot
from pylinac.vmat import DRMLC, DRGS
from pylinac.winston_lutz import WinstonLutz
from pylinac.calibration import tg51, trs398
| StarcoderdataPython |
11261489 | import secrets
from Cryptodome.Cipher import ChaCha20
KEY_SIZE = 32
NONCE_SIZE = 12
def random_key() -> str:
return secrets.token_hex(KEY_SIZE) + secrets.token_hex(NONCE_SIZE)
# Note: all our API clients depend on this, can't change for legacy reasons.
def decrypt(data: bytes, key: str) -> bytes:
if len(key) != KEY_SIZE * 2 + NONCE_SIZE * 2:
raise ValueError("invalid decryption key")
cacha_key = b''.fromhex(key[:KEY_SIZE * 2])
crypt = ChaCha20.new(key=cacha_key, nonce=b''.fromhex(key[KEY_SIZE * 2:]))
return crypt.decrypt(data)
| StarcoderdataPython |
3559293 | <filename>cstar/command.py
# Copyright 2017 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Locate and parse cstar commands"""
import json
import os
import re
import functools
from collections import namedtuple
from stat import S_ISREG
from pkg_resources import resource_filename
from cstar.exceptions import BadEnvironmentVariable
from cstar.output import warn
_property_re = re.compile(r"^# C\*\s*([^\s:]+)\s*:\s*(.*)\s*$", re.MULTILINE)
_env_re = re.compile("[^a-zA-Z0-9_]")
Command = namedtuple("Command", "name file strategy cluster_parallel dc_parallel arguments description")
class Argument(object):
"""Represents an argument for a command"""
def __init__(self, name, option, description, required=False, default=None):
for name, val in locals().items():
if name != "self":
self.__setattr__(name, val)
if _env_re.search(self.name):
raise BadEnvironmentVariable(self.name)
def load(command_name):
"""Loads a command"""
file = _search(command_name)
with open(file) as f:
return _parse(command_name, file, f.read())
def _parse(command_name, filename, definition):
"""Parses a command"""
cluster_parallel = None
dc_parallel = None
description = None
strategy = "topology"
arguments = []
truth_lookup = {"true": True, "false": False}
for line in definition.split('\n'):
if not line or line[0] != "#":
break
match = _property_re.match(line)
if match:
name = match.group(1)
value = match.group(2)
if name == "cluster-parallel":
cluster_parallel = truth_lookup[value.lower()]
elif name == "dc-parallel":
dc_parallel = truth_lookup[value.lower()]
elif name == "description":
description = value
elif name == "strategy":
strategy = value
elif name == "argument":
arguments.append(Argument(**json.loads(value)))
else:
warn("Ignoring unknown property %s while parsing %s" % (name, filename))
return Command(name=command_name, file=filename,
cluster_parallel=cluster_parallel,
dc_parallel=dc_parallel,
description=description,
strategy=strategy,
arguments=arguments)
def _search_path():
return os.path.expanduser('~/.cstar/commands'), '/etc/cstar/commands', resource_filename('cstar.resources', 'commands')
def _stat_is_reg(stat_output):
return S_ISREG(stat_output.st_mode)
def _search(name, listdir=os.listdir, stat=os.stat, check_is_file=_stat_is_reg):
"""Returns a the filename for a given command"""
if "/" not in name:
listing = _list(listdir, stat, check_is_file)
if name in listing:
return listing[name]
if check_is_file(stat(name)):
return name
raise FileNotFoundError("Failed to find definition for command %s" % (name,))
def list(listdir=os.listdir, stat=os.stat, check_is_file=_stat_is_reg):
return _list(listdir, stat, check_is_file).keys()
@functools.lru_cache(None)
def _list(listdir, stat, check_is_file):
"""Returns a list of the names of all available commands"""
res = {}
for dir in _search_path():
try:
for filename in listdir(dir):
full_name = os.path.join(dir, filename)
try:
if not check_is_file(stat(full_name)):
continue
except FileNotFoundError:
continue
if filename.endswith("~") or filename.startswith("#"):
continue
if "." in filename:
prefix = re.sub(r"\..*", "", filename)
else:
prefix = filename
if prefix not in res:
res[prefix] = full_name
except FileNotFoundError:
pass
return res
| StarcoderdataPython |
12818759 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 16 17:45:53 2020
@author: ashwin_menon
"""
#Made By <NAME>(101703120)
import pandas as pd
import numpy as np
import sys
from sklearn.impute import SimpleImputer
def missing_values(inputfile,outputfile):
dataset=pd.read_csv(inputfile)
head=dataset.columns
columns_null=dataset.columns[dataset.isnull().any()] #looking for columns having null values
print("Columns having null values are-",columns_null)
for target in columns_null:
null_cells=dataset[target].isnull()
count=sum(null_cells)
print(target," has ",count," missing values")
imputer=SimpleImputer(strategy='median') #strategy can be changed
#for learning about SimpleImputer more-https://scikit-learn.org/stable/modules/generated/sklearn.impute.SimpleImputer.html
imputer.fit_transform(dataset) #fitting and transforming
data=pd.DataFrame(imputer.transform(dataset)) #making dataframe
data.columns=head #giving names to the columns as in main dataset
data.to_csv(outputfile,index=False)
print("Success")
argList=sys.argv #picking values from command line
infile=argList[1] #input file
outfile=argList[2] #output file
missing_values(infile,outfile) #calling function | StarcoderdataPython |
1815211 | <reponame>Nik6198/Advanced-DataStructure
import _fib_heap
import random
import time
import matplotlib.pyplot as plt
class graph:
def __init__(self,n):
self.graph=[]
for i in range(n):
temp=[random.randint(0,1001) for i in range(n)]
temp[i]=0
self.graph.append(temp)
def accept(self):
for i in range(len(self.graph)):
m=int(input())
n=int(input())
for j in range(n):
dest=int(input())
cost=int(input())
self.graph[m-1][dest-1]=cost
def dij_array(self,source,n):
start=time.time()
dist=self.graph[source-1].copy()
#print(dist)
vis=[False for i in range(len(self.graph))]
vis[source-1]=True
for i in range(len(self.graph)-1):
min=9999999
v=None
for i in range(len(self.graph)):
if not vis[i] and min>dist[i]:
v=i
min=dist[i]
if v is None:
break
vis[v]=True
min=dist[v]
for i in range(len(self.graph)):
if not vis[i] and dist[i]>dist[v]+self.graph[v][i]:
dist[i]=dist[v]+self.graph[v][i]
#print(min,v,dist,vis)
t=abs(start-time.time())
plt.plot([n],[t],'bo')
print("time taken for array",t,n)
return dist
def dij_heap(self,source,n):
h1=_fib_heap.fib()
for i in range(len(self.graph)):
if i is not (source-1):
h1.insert(key=self.graph[source-1][i],vertex=i)
dist=self.graph[source-1].copy()
dist1=dist.copy()
#print(dist)
#h1.print1()
#print("**")
#vis=[False for i in range(len(self.graph))]
#vis[source-1]=True
start=time.time()
for i in range(len(self.graph)-1):
min=h1.extract_min()
#print(min[0],min[1])
#if h1.min is not None:
#print("***")
#h1.print1()
#print("***")
list1=[]
for k in h1.hash.keys():
#print(h1.hash.keys(),"hi",h1.hash[k],"bye")
#list1=list(h1.hash.values())
#print(k,list1[0].key[1])
if h1.hash[k].key[1] > dist[min[0]]+self.graph[min[0]][k]:
h1.decrease_key(k,dist1[k],dist[min[0]]+self.graph[min[0]][k])
dist[k]=dist[min[0]]+self.graph[min[0]][k]
t=abs(start-time.time())
plt.plot(n,[t],'ro')
print("time taken is for heap",t,n)
return dist
#h1=fib()
n=0
while n<=5000:
n+=100
#n=int(input())
g=graph(n)
#print(g.graph[0])
#g.accept()
#for i in g.graph:
# print(i)
g.dij_heap(1,n)
g.dij_array(1,n)
plt.show()
| StarcoderdataPython |
11333091 | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from model.loss import MultiLabelSoftmaxLoss
from model.loss import cross_entropy_loss
from tools.accuracy_init import single_label_top1_accuracy, multi_label_accuracy
class Attention(nn.Module):
def __init__(self):
super(Attention, self).__init__()
self.w = nn.Linear(512, 256)
def forward(self, h, u):
h = self.w(h)
u = u.permute(0, 2, 1)
a = torch.bmm(h, u)
a = torch.softmax(a, dim=1)
return a
class Attribute(nn.Module):
def __init__(self, config, gpu_list, *args, **params):
super(Attribute, self).__init__()
self.word_num = 0
f = open(os.path.join(config.get("model", "bert_path"), "vocab.txt"), "r")
for line in f:
self.word_num += 1
self.embedding = nn.Embedding(self.word_num, 256)
self.encoder = nn.LSTM(256, 256, batch_first=True, num_layers=2, bidirectional=True)
self.ys_emb = nn.Embedding(27, 256)
self.ys_token = []
for a in range(0, 27):
self.ys_token.append(a)
self.ys_token = Variable(torch.LongTensor(self.ys_token)).cuda()
self.attention = Attention()
self.ys_fc = nn.Linear(256 * 27 * 2, 54)
self.ft_fc = nn.Linear(1024, 23)
self.zm_fc = nn.Linear(1024, 20)
self.criterion = {
"ys": MultiLabelSoftmaxLoss(config),
"ft": cross_entropy_loss,
"zm": cross_entropy_loss
}
self.accuracy_function = {
"ys": multi_label_accuracy,
"ft": single_label_top1_accuracy,
"zm": single_label_top1_accuracy
}
def init_hidden(self, bs):
self.hidden = (torch.autograd.Variable(torch.zeros(4, bs, 256).cuda()),
torch.autograd.Variable(torch.zeros(4, bs, 256).cuda()))
def forward(self, data, config, gpu_list, acc_result, mode):
x = data['token']
batch_size = x.size()[0]
x = self.embedding(x)
self.init_hidden(batch_size)
h, c = self.encoder(x, self.hidden)
ys_emb = self.ys_emb(self.ys_token.repeat(batch_size).view(batch_size, -1))
e = torch.max(h, dim=1)[0]
a = self.attention(h, ys_emb)
g = torch.bmm(a.permute(0, 2, 1), h)
g = g.view(batch_size, -1)
r = torch.mean(h, dim=2)
er = torch.cat([e, r], dim=1)
ys = self.ys_fc(g)
ft = self.ft_fc(er)
zm = self.zm_fc(er)
loss = 0
if acc_result is None:
acc_result = {"ys": None, "ft": None, "zm": None}
if "ys" in data.keys():
y = ys
y = y.view(y.size()[0], -1, 2)
label = data["ys"]
y_out = nn.Softmax(dim=2)(y)
y_out = y_out[:, :, 1]
loss += self.criterion["ys"](y, label)
acc_result["ys"] = self.accuracy_function["ys"](y_out, label, config, acc_result["ys"])
if "ft" in data.keys():
y = ft
label = data["ft"]
loss += self.criterion["ft"](y, label)
acc_result["ft"] = self.accuracy_function["ft"](y, label, config, acc_result["ft"])
if "zm" in data.keys():
y = zm
label = data["zm"]
loss += self.criterion["zm"](y, label)
acc_result["zm"] = self.accuracy_function["zm"](y, label, config, acc_result["zm"])
return {"loss": loss, "acc_result": acc_result}
| StarcoderdataPython |
1921763 | <reponame>imlegend19/GRAS
from gras.github.entity.api_static import APIStaticV4, RepositoryStatic
from gras.github.entity.github_models import ForkModel
from gras.github.github import GithubInterface
class ForkStruct(GithubInterface, ForkModel):
"""
The object models the query to fetch the list of directly forked repositories and
generates an object using
:class:`gras.github.entity.github_models.ForkModel` containing the fetched data.
Please see GitHub's `repository documentation`_, `fork connection documentation`_ for more information.
.. _repository documentation:
https://developer.github.com/v4/object/repository/
.. _fork connection documentation:
https://developer.github.com/v4/object/repositoryconnection/
:param name: name of the repository
:type name: str
:param owner: owner of the repository
:type owner: str
"""
FORK_QUERY = """
{{
repository(name: "{name}", owner: "{owner}") {{
forks(first: 100, orderBy: {{field: CREATED_AT, direction: ASC}}, after: {after}) {{
nodes {{
createdAt
nameWithOwner
}}
pageInfo {{
hasNextPage
endCursor
}}
}}
}}
}}
"""
def __init__(self, name, owner):
"""Constructor Method"""
super().__init__(
query=self.FORK_QUERY,
query_params=dict(name=name, owner=owner, after="null"),
)
def iterator(self):
"""
Iterator function for :class:`gras.github.structs.fork_struct.ForkStruct`. For more information see
:class:`gras.github.github.githubInterface`.
:return: a single API response or a list of responses
:rtype: generator<dict>
"""
generator = self._generator()
hasNextPage = True
while hasNextPage:
try:
response = next(generator)
except StopIteration:
break
endCursor = response[APIStaticV4.DATA][APIStaticV4.REPOSITORY][RepositoryStatic.FORKS][
APIStaticV4.PAGE_INFO][APIStaticV4.END_CURSOR]
self.query_params[APIStaticV4.AFTER] = '\"' + endCursor + '\"' if endCursor is not None else "null"
yield response[APIStaticV4.DATA][APIStaticV4.REPOSITORY][RepositoryStatic.FORKS][APIStaticV4.NODES]
hasNextPage = response[APIStaticV4.DATA][APIStaticV4.REPOSITORY][
RepositoryStatic.FORKS][APIStaticV4.PAGE_INFO][APIStaticV4.HAS_NEXT_PAGE]
def process(self):
"""
Generates a :class:`gras.github.entity.github_models.ForkModel` object representing the fetched data.
:return: A :class:`gras.github.entity.github_models.ForkModel` object
:rtype: class
"""
for lst in self.iterator():
for fork in lst:
yield self.object_decoder(fork)
| StarcoderdataPython |
1741338 | from __future__ import print_function, unicode_literals, absolute_import, division
import numpy as np
from gputools import OCLArray, get_device
from gputools.fft.oclfft import fft, fft_plan
from gputools.core.oclalgos import OCLElementwiseKernel
from gputools.core.ocltypes import assert_bufs_type
_complex_multiply_kernel = OCLElementwiseKernel(
"""cfloat_t *a, cfloat_t * b""",
"""a[i] = cfloat_mul(a[i],b[i])""", "mult")
def fft_convolve(data, h, res_g = None,
plan = None, inplace = False,
kernel_is_fft = False,
kernel_is_fftshifted = False):
""" convolves data with kernel h via FFTs
data should be either a numpy array or a OCLArray (see doc for fft)
both data and h should be same shape
if data/h are OCLArrays, then:
- type should be complex64
- shape should be equal and power of two
- h is assumed to be already fftshifted
(otherwise set kernel_is_fftshifted to true)
"""
if isinstance(data,np.ndarray):
return _fft_convolve_numpy(data, h,
plan = plan,
kernel_is_fft = kernel_is_fft,
kernel_is_fftshifted = kernel_is_fftshifted)
elif isinstance(data,OCLArray):
return _fft_convolve_gpu(data,h, res_g = res_g,
plan = plan, inplace = inplace,
kernel_is_fft = kernel_is_fft)
else:
raise TypeError("array argument (1) has bad type: %s"%type(arr_obj))
def _fft_convolve_numpy(data, h, plan = None,
kernel_is_fft = False,
kernel_is_fftshifted = False):
""" convolving via opencl fft for numpy arrays
data and h must have the same size
"""
if data.shape != h.shape:
raise ValueError("data and kernel must have same size! %s vs %s "%(str(data.shape),str(h.shape)))
data_g = OCLArray.from_array(data.astype(np.complex64))
if not kernel_is_fftshifted:
h = np.fft.fftshift(h)
h_g = OCLArray.from_array(h.astype(np.complex64))
res_g = OCLArray.empty_like(data_g)
_fft_convolve_gpu(data_g,h_g,res_g = res_g,
plan = plan,
kernel_is_fft = kernel_is_fft)
res = abs(res_g.get())
del data_g
del h_g
del res_g
return res
def _fft_convolve_gpu(data_g, h_g, res_g = None,
plan = None, inplace = False,
kernel_is_fft = False):
""" fft convolve for gpu buffer
"""
assert_bufs_type(np.complex64,data_g,h_g)
if data_g.shape != h_g.shape:
raise ValueError("data and kernel must have same size! %s vs %s "%(str(data_g.shape),str(h_g.shape)))
if plan is None:
plan = fft_plan(data_g.shape)
if inplace:
res_g = data_g
else:
if res_g is None:
res_g = OCLArray.empty(data_g.shape,data_g.dtype)
res_g.copy_buffer(data_g)
if not kernel_is_fft:
kern_g = OCLArray.empty(h_g.shape,h_g.dtype)
kern_g.copy_buffer(h_g)
fft(kern_g,inplace=True, plan = plan)
else:
kern_g = h_g
fft(res_g,inplace=True, plan = plan)
#multiply in fourier domain
_complex_multiply_kernel(res_g,kern_g)
fft(res_g,inplace = True, inverse = True, plan = plan)
return res_g
if __name__ == '__main__':
N = 512
d = np.zeros((N,)*2)
d[N//2,N/2] = 1.
h = np.zeros((N,)*2)
h[N//3:2*N//3,N//3:2*N//3] = 1.
h = np.fft.fftshift(h)
d_g = OCLArray.from_array(d.astype(np.complex64))
h_g = OCLArray.from_array(h.astype(np.complex64))
hf_g = OCLArray.from_array(np.fft.fft2(h.astype(np.complex64)))
# out = fft_convolve(d_g,h_g, inplace = False, kernel_is_fft = True)
out_g = fft_convolve(d_g,h_g, inplace = False)
out = fft_convolve(d,h, inplace = False)
print(np.sum(abs(out_g.get())),N**2/9)
| StarcoderdataPython |
3489622 | <reponame>Arongil/Diversity-Scraper
import json
import os
names = []
sofar = set()
journals = os.listdir()
for j in journals:
if os.path.isfile(j + '/names.txt') and os.access(j + '/names.txt', os.R_OK):
with open(j + '/names.txt', encoding='utf-8-sig') as f:
current = json.load(f)
for author in current:
if author['name'] not in sofar:
sofar.add(author['name'])
names.append(author)
with open('all/names.txt', 'w', encoding='utf-8') as f:
json.dump(names, f, ensure_ascii=False, indent=4)
| StarcoderdataPython |
1786610 | # Offset for screen_grab
x_pad = 622
y_pad = 99
width = 127
height = 248
# Columns to check
left_column = 8
right_column = 113
| StarcoderdataPython |
1826040 | <filename>python_lib/mitxgraders/plugins/template.py
"""
This is a template file that also is used to test that the plugin loading
mechanism is working.
"""
from __future__ import print_function, division, absolute_import, unicode_literals
# Make sure that imports are working
from mitxgraders.baseclasses import ItemGrader
def plugin_test():
"""Function is designed to be called as part of a test suite"""
return True
# Allow the function to be imported with *
__all__ = ["plugin_test"]
| StarcoderdataPython |
3400177 | import cgi, datetime, dateutil.parser, os.path, pytz, sys
from flask import Flask, request
for sam_dir in (
# ./scheduler-and-mapper/
os.path.join(os.path.dirname(__file__), "scheduler-and-mapper"),
# ../scheduler-and-mapper/
os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"scheduler-and-mapper"
)
):
# Look for the scheduler-and-mapper directory.
if os.path.isdir(sam_dir):
# Use the first one that is found.
sys.path.insert(1, sam_dir)
break
import agency_nyu, agency_walking, agency_walking_static, \
agency_walking_dynamic, departure_lister, itinerary_finder, stops
TIME_FORMAT = "%I:%M %p on %A"
TIMEZONE = pytz.timezone("America/New_York")
agencies = (
agency_nyu.AgencyNYU,
agency_walking_static.AgencyWalkingStatic,
agency_walking_dynamic.AgencyWalkingDynamic,
)
agencies_to_vary = (
agency_nyu.AgencyNYU,
)
weekdays = tuple(
# Sunday to Saturday
datetime.date(2006, 1, d).strftime("%A") for d in range(1, 8)
)
def days_hours_minutes(td):
'''
Breaks a datetime.timedelta into the days as an integer, the hours as an
integer, and the minutes as a float.
'''
return td.days, td.seconds // 3600, (td.seconds / 60) % 60
def days_hours_minutes_string(td):
'''
Converts a datetime.timedelta into a string that contains days, hours, and
minutes.
'''
days, hours, minutes = days_hours_minutes(td)
minutes = round(minutes)
result = []
if days:
if days == 1:
result.append("1 day")
else:
result.append("{} days".format(days))
if hours:
if hours == 1:
result.append("1 hour")
else:
result.append("{} hours".format(hours))
if minutes:
if minutes == 1:
result.append("1 minute")
else:
result.append("{} minutes".format(minutes))
if result:
if len(result) == 1:
return result[0]
if len(result) == 2:
return result[0] + " and " + result[1]
result[-1] = "and " + result[-1]
return ", ".join(result)
return "An instant"
def get_datetime_trip():
# Combine the "day" and "when" GET parameters and parse them together.
try:
return dateutil.parser.parse(
request.args.get("day", "") +
" " +
request.args["when"]
)
except (KeyError, ValueError):
return datetime.datetime.now(TIMEZONE).replace(tzinfo=None)
def get_weekdays_checked(datetime_trip):
# Make a list of the days of the week and select the one in datetime_trip.
dow = (datetime_trip.weekday() + 1) % 7
return \
[(s, False) for s in weekdays[:dow]] + \
[(weekdays[dow], True)] + \
[(s, False) for s in weekdays[dow+1:]]
def mark_weighted_edge_up(edge, margin):
'''
Renders a weighted edge as one HTML <li> element.
The margin is inserted at the beginning of every line.
'''
return (
# Start the list item.
margin + "<li>\n" +
# Add the human-readable instruction.
margin + "\t" + cgi.escape(
edge.get_human_readable_instruction()
) + "\n" +
# Start the nested unordered list.
margin + "\t<ul>\n" +
# Add the departure time.
margin + "\t\t<li>\n" +
margin + "\t\t\t<span class=\"itinerary-time\">" + cgi.escape(
edge.datetime_depart.strftime(TIME_FORMAT)
) + ":</span>\n" +
margin + "\t\t\tDepart from\n" +
margin + "\t\t\t<span class=\"itinerary-node\">" + cgi.escape(
edge.from_node
) + "</span>.\n" +
margin + "\t\t</li>\n" +
# Add the list of intermediate nodes.
(
(
# Start the list item.
margin + "\t\t<li>\n" +
# Add the heading for the nested list.
margin + "\t\t\tIntermediate stops:\n" +
# Start the nested ordered list.
margin + "\t\t\t<ol>\n" +
# Add the list items.
"".join(
margin + "\t\t\t\t<li>\n" +
margin + "\t\t\t\t\t<span class=\"itinerary-time\">" +
cgi.escape(
node_and_time.time.strftime(TIME_FORMAT)
) + ":</span>\n" +
margin + "\t\t\t\t\t<span class=\"itinerary-node\">" +
cgi.escape(node_and_time.node) + "</span>\n" +
margin + "\t\t\t\t</li>\n"
for node_and_time in edge.intermediate_nodes
) +
# End the nested ordered list.
margin + "\t\t\t</ol>\n" +
# End the list item.
margin + "\t\t</li>\n"
)
if edge.intermediate_nodes else
""
) +
# Add the arrival time.
margin + "\t\t<li>\n" +
margin + "\t\t\t<span class=\"itinerary-time\">" + cgi.escape(
edge.datetime_arrive.strftime(TIME_FORMAT)
) + ":</span>\n" +
margin + "\t\t\tArrive at\n" +
margin + "\t\t\t<span class=\"itinerary-node\">" + cgi.escape(
edge.to_node
) + "</span>.\n" +
margin + "\t\t</li>\n" +
# End the nested unordered list.
margin + "\t</ul>\n" +
# End the list item.
margin + "</li>\n"
)
| StarcoderdataPython |
1801032 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 10 10:32:35 2020
@author: Ethan
"""
from PyQt5 import uic
fin = open('test_stimulation.ui','r')
fout = open('test_stimulation.py','w')
uic.compileUi(fin,fout,execute=False)
fin.close()
fout.close() | StarcoderdataPython |
42315 | <reponame>dargueso/IceVarFigs
"""
Calculates current year percentage of record daily low SIE 2002-present
using JAXA metadata
Website : https://ads.nipr.ac.jp/vishop/vishop-extent.html
Author : <NAME>
Date : 18 October 2016
"""
### Import modules
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import datetime
import urllib as UL
### Directory and time
directoryfigure = '/home/zlabe/Documents/Projects/IceVarFigs/Figures/'
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day-1)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
### Load url
url = 'https://ads.nipr.ac.jp/vishop.ver1/data/graph/plot_extent_n_v2.csv'
### Read file
raw_data = UL.request.urlopen(url)
dataset = np.genfromtxt(raw_data, skip_header=0,delimiter=",",)
### Set missing data to nan
dataset[np.where(dataset==-9999)] = np.nan
### Variables
month = dataset[1:,0] # 1-12, nan as month[0]
day = dataset[1:,1] # 1-31, nan as day[0]
mean1980 = dataset[1:,2] # km^2, nan as mean1980[0]
mean1990 = dataset[1:,3] # km^2, nan as mean1990[0]
mean2000 = dataset[1:,4] # km^2, nan as mean2000[0]
years = dataset[1:,5:]
doy = np.arange(0,len(day),1)
### Change units to million km^2
years = years/1e6
### Recent day of current year
currentyear = years[:,-1]
lastday = now.timetuple().tm_yday - 1
currentice = currentyear[lastday]
currentanom = currentice - (mean1980[lastday]/1e6)
### Fill in random missing days (does not affect!)
currentyear[10] = currentyear[9]
### Calculate magnitude of record
years2 = years[:,:-1]
mins = np.nanmin(years2[:,:],axis=1)
### Select month
octs = np.where(month == 10)[0]
recdiff = currentyear - mins
###############################################################################
###############################################################################
###############################################################################
### Plot figure
matplotlib.rc('savefig', facecolor='black')
matplotlib.rc('axes', edgecolor='white')
matplotlib.rc('xtick', color='white')
matplotlib.rc('ytick', color='white')
matplotlib.rc('axes', labelcolor='white')
matplotlib.rc('axes', facecolor='black')
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
fig = plt.figure()
ax = plt.subplot(111)
### Adjust axes in time series plots
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
adjust_spines(ax, ['left', 'bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
recdiff_masked = np.ma.masked_less_equal(recdiff, 0)
zeroline = [0]*len(doy)
plt.plot(doy,zeroline,linewidth=2,color='w',linestyle='--',
zorder=11)
barlist = plt.bar(np.arange(366),recdiff,color='tomato',
edgecolor='tomato',zorder=10)
barlist = plt.bar(np.arange(366),recdiff_masked.filled(np.nan),
color='deepskyblue',edgecolor='deepskyblue',zorder=11)
xlabels = [r'Jan',r'Feb',r'Mar',r'Apr',r'May',r'Jun',r'Jul',
r'Aug',r'Sep',r'Oct',r'Nov',r'Dec',r'Jan']
plt.xticks(np.arange(0,366,30.4),xlabels,rotation=0,fontsize=11)
ylabels = [r'-1.5',r'-1.0',r'-0.5',r'\textbf{0.0}',r'0.5',r'1.0',r'1.5']
plt.yticks(np.arange(-1.5,1.6,0.5),ylabels,fontsize=11)
plt.text(0,-1.35,r'\textbf{DATA:} JAXA (Arctic Data archive System, NIPR)',
fontsize=5,rotation='horizontal',ha='left',color='darkgrey')
plt.text(0,-1.45,r'\textbf{SOURCE:} https://ads.nipr.ac.jp/vishop/vishop-extent.html',
fontsize=5,rotation='horizontal',ha='left',color='darkgrey')
plt.text(0,-1.25,r'\textbf{GRAPHIC:} <NAME> (@ZLabe)',
fontsize=5,rotation='horizontal',ha='left',color='darkgrey')
plt.text(90,1.55,r'[ 2018 -- Previous Daily Record ]',color='darkgrey',ha='left',
fontsize=11)
plt.xlim([0,365])
plt.ylim([-1.5,1.5])
plt.ylabel(r'\textbf{Extent [$\bf{\times 10^{6}}$\ \textbf{km}$\bf{^2}$]}',
fontsize=15,color='darkgrey')
ax.yaxis.grid(zorder=1,color='w',alpha=0.35)
fig.suptitle(r'\textbf{ARCTIC SEA ICE EXTENT}',
fontsize=18,color='darkgrey')
ax.tick_params('both',length=5.5,width=2,which='major')
### Save figure
plt.savefig(directoryfigure + 'JAXA_seaice_record_magnitude_year',dpi=900)
### Print additional information
print('\n')
print('----JAXA Sea Ice Change----')
print('Day 5 = %s km^2' % ((currentyear[lastday-4] - currentyear[lastday-5])*1e6))
print('Day 4 = %s km^2' % ((currentyear[lastday-3] - currentyear[lastday-4])*1e6))
print('Day 3 = %s km^2' % ((currentyear[lastday-2] - currentyear[lastday-3])*1e6))
print('Day 2 = %s km^2' % ((currentyear[lastday-1] - currentyear[lastday-2])*1e6))
print('Day 1 = %s km^2' % ((currentyear[lastday] - currentyear[lastday-1])*1e6))
print('\n' 'Total 5-day Change = %s km^2' % ((currentyear[lastday]-currentyear[lastday-5])*1e6))
print('\n')
print('2016-1980 = %s km^2' % ((currentyear[lastday]*1e6) - mean1980[lastday]))
print('2016-2012 = %s km^2' % ((currentyear[lastday] - years[lastday,-5])*1e6))
print('\n') | StarcoderdataPython |
4926081 | <gh_stars>0
a1 = 100
class Demo:
def mymethod1(self):
print(a1)
myobj = Demo()
myobj.mymethod1()
| StarcoderdataPython |
6677990 | <reponame>webhacking/hhvm<filename>hphp/hack/test/integration/runner.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import sys
import unittest
import test_save_mini
import test_save_restore
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('hh_server')
parser.add_argument('hh_client')
args = parser.parse_args()
test_save_restore.hh_server = args.hh_server
test_save_restore.hh_client = args.hh_client
save_suite = unittest.defaultTestLoader.loadTestsFromTestCase(
test_save_restore.TestSaveRestore)
save_mini_suite = unittest.defaultTestLoader.loadTestsFromTestCase(
test_save_mini.TestSaveMiniState)
save_suite.addTests(save_mini_suite)
result = unittest.TextTestRunner(verbosity=2).run(save_suite)
if not result.wasSuccessful():
sys.exit(1)
| StarcoderdataPython |
1865077 | # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: <EMAIL>
# Maintained By: <EMAIL>
from ggrc.models.mixins import Timeboxed
from ggrc import db
class RelativeTimeboxed(Timeboxed):
# Frequencies and offset:
# annual:
# month is the 0-indexed month (0 is January)
# day is the 0-indexed offset day
# quarterly:
# month is in [0,1,2], as the offset within the quarter
# day is same as annual
# weekly:
# month is ignored
# day is in [1,2,3,4,5] where 0 is Monday
relative_start_month = db.Column(db.Integer, nullable=True)
relative_start_day = db.Column(db.Integer, nullable=True)
relative_end_month = db.Column(db.Integer, nullable=True)
relative_end_day = db.Column(db.Integer, nullable=True)
| StarcoderdataPython |
11393863 | <reponame>azadoks/aiida-core
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Definition of known configuration options and methods to parse and get option values."""
from typing import Any, Dict, List, Tuple
import jsonschema
from aiida.common.exceptions import ConfigurationError
__all__ = ('get_option', 'get_option_names', 'parse_option', 'Option')
NO_DEFAULT = ()
class Option:
"""Represent a configuration option schema."""
def __init__(self, name: str, schema: Dict[str, Any]):
self._name = name
self._schema = schema
def __str__(self) -> str:
return f'Option(name={self._name})'
@property
def name(self) -> str:
return self._name
@property
def schema(self) -> Dict[str, Any]:
return self._schema
@property
def valid_type(self) -> Any:
return self._schema.get('type', None)
@property
def default(self) -> Any:
return self._schema.get('default', NO_DEFAULT)
@property
def description(self) -> str:
return self._schema.get('description', '')
@property
def global_only(self) -> bool:
return self._schema.get('global_only', False)
def validate(self, value: Any, cast: bool = True) -> Any:
"""Validate a value
:param value: The input value
:param cast: Attempt to cast the value to the required type
:return: The output value
:raise: ConfigValidationError
"""
# pylint: disable=too-many-branches
from aiida.manage.caching import _validate_identifier_pattern
from .config import ConfigValidationError
if cast:
try:
if self.valid_type == 'boolean':
if isinstance(value, str):
if value.strip().lower() in ['0', 'false', 'f']:
value = False
elif value.strip().lower() in ['1', 'true', 't']:
value = True
else:
value = bool(value)
elif self.valid_type == 'string':
value = str(value)
elif self.valid_type == 'integer':
value = int(value)
elif self.valid_type == 'number':
value = float(value)
elif self.valid_type == 'array' and isinstance(value, str):
value = value.split()
except ValueError:
pass
try:
jsonschema.validate(instance=value, schema=self.schema)
except jsonschema.ValidationError as exc:
raise ConfigValidationError(message=exc.message, keypath=[self.name, *(exc.path or [])], schema=exc.schema)
# special caching validation
if self.name in ('caching.enabled_for', 'caching.disabled_for'):
for i, identifier in enumerate(value):
try:
_validate_identifier_pattern(identifier=identifier)
except ValueError as exc:
raise ConfigValidationError(message=str(exc), keypath=[self.name, str(i)])
return value
def get_schema_options() -> Dict[str, Dict[str, Any]]:
"""Return schema for options."""
from .config import config_schema
schema = config_schema()
return schema['definitions']['options']['properties']
def get_option_names() -> List[str]:
"""Return a list of available option names."""
return list(get_schema_options())
def get_option(name: str) -> Option:
"""Return option."""
options = get_schema_options()
if name not in options:
raise ConfigurationError(f'the option {name} does not exist')
return Option(name, options[name])
def parse_option(option_name: str, option_value: Any) -> Tuple[Option, Any]:
"""Parse and validate a value for a configuration option.
:param option_name: the name of the configuration option
:param option_value: the option value
:return: a tuple of the option and the parsed value
"""
option = get_option(option_name)
value = option.validate(option_value, cast=True)
return option, value
| StarcoderdataPython |
3460094 | import os, sys, re, tarfile, collections
import argparse
import pickle
from collections import Counter
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup
from nltk.util import ngrams
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import RegexpTokenizer
from nltk.stem.wordnet import WordNetLemmatizer
class Non_Academic:
def __init__(self, amazon_review, serialize_path):
self.AMAZON_REVIEW = amazon_review
self.SERIALIZE = serialize_path
self.unigrams_ctr = Counter()
self.bigrams_ctr = Counter()
self.trigrams_ctr = Counter()
self.quadgrams_ctr = Counter()
def clean_content(self, content, remove_stopwords=False, lemmatize_words=True):
"""Clean the dataset - remove stop words, lemmatize the word tokens
:param content: The string that needs to be cleaned
:type content: str
:param remove_stopwords: default False
:type remove_stopwords: bool
:param lemmatize_words: default True
:type lemmatize_words: bool
"""
content = " ".join(re.findall(r"[a-zA-Z0-9]+", content)) # Remove special characters
content = content.lower() # Lower case
if(remove_stopwords):
stop_words = set(stopwords.words('english')) # Remove stop words
word_tokens = word_tokenize(content)
if(lemmatize_words and not remove_stopwords):
lem = WordNetLemmatizer()
text = [lem.lemmatize(word) for word in word_tokens]
if(lemmatize_words and remove_stopwords):
lem = WordNetLemmatizer()
text = [lem.lemmatize(word) for word in word_tokens if word not in stop_words]
content = " ".join(text)
return content
def get_ngram(self, content, n):
"""Compute the n-grams using NLTK
:param content: The string from which the n grams have to be computed
:type content: str
:param n: Specify whether 2, 3, 4 gram to be computed
:type n: int
"""
tokenized = content.split()
es_ngrams = ngrams(tokenized, n)
return list(es_ngrams)
def update_unigram_counter(self, unigram):
"""Update the frequency counter
:param unigram: List of unigrams
:type unigram: list
"""
for u in unigram:
self.unigrams_ctr[u] += 1
def update_bigram_counter(self, bigram):
"""Update the frequency counter
:param bigram: List of bigrams
:type bigram: list
"""
for b in bigram:
self.bigrams_ctr[b] += 1
def update_trigram_counter(self, trigram):
"""Update the frequency counter
:param trigram: List of trigrams
:type trigram: list
"""
for t in trigram:
self.trigrams_ctr[t] += 1
def update_quadgram_counter(self, quadgram):
"""Update the frequency counter
:param quadgram: List of quadgrams
:type quadgram: list
"""
for q in quadgram:
self.quadgrams_ctr[q] += 1
def compute_ngrams(self, pickle_filename_unigrams='non_academic_unigrams.pkl', pickle_filename_bigrams='non_academic_bigrams.pkl', pickle_filename_trigrams='non_academic_trigrams.pkl', pickle_filename_quadgrams='non_academic_quadgrams.pkl'):
"""Compute the n-grams from the corpus
:param pickle_filename_unigrams: File name for the non academic unigrams counter pickle file
:type pickle_filename_unigrams: str
:param pickle_filename_bigrams: File name for the non academic bigrams counter pickle file
:type pickle_filename_bigrams: str
:param pickle_filename_trigrams: File name for the non academic trigrams counter pickle file
:type pickle_filename_quadgrams: str
:param pickle_filename_quadgrams: File name for the non academic quadgrams counter pickle file
:type pickle_filename_quadgrams: str
"""
df = pd.read_csv(os.path.join(self.AMAZON_REVIEW, 'train.csv'), header=None)
review_texts = df[2]
down_sample_df = pd.DataFrame()
# Shuffle the reviews
down_sample_df['review_texts'] = review_texts.sample(frac=1).reset_index(drop=True)
down_sample_df['count'] = down_sample_df['review_texts'].str.split().str.len()
# Total number of words in the academic corpus
TOTAL = 75184498
# Compute down sample index in the non academic corpus
pos = down_sample_df['count'].cumsum().searchsorted(TOTAL)[0]
down_sample_df = down_sample_df.iloc[:pos]
review_texts = down_sample_df['review_texts']
for item in review_texts.iteritems():
content = item[1]
content = self.clean_content(content)
unigrams = self.get_ngram(content, 1)
self.update_unigram_counter(unigrams)
bigrams = self.get_ngram(content, 2)
self.update_bigram_counter(bigrams)
trigrams = self.get_ngram(content, 3)
self.update_trigram_counter(trigrams)
quadgrams = self.get_ngram(content, 4)
self.update_quadgram_counter(quadgrams)
with open(os.path.join(self.SERIALIZE, pickle_filename_unigrams), 'wb') as f:
pickle.dump(self.unigrams_ctr, f)
with open(os.path.join(self.SERIALIZE, pickle_filename_bigrams), 'wb') as f:
pickle.dump(self.bigrams_ctr, f)
with open(os.path.join(self.SERIALIZE, pickle_filename_trigrams), 'wb') as f:
pickle.dump(self.trigrams_ctr, f)
with open(os.path.join(self.SERIALIZE, pickle_filename_quadgrams), 'wb') as f:
pickle.dump(self.quadgrams_ctr, f)
def load_ngram_ctrs(self, pickle_filename_unigrams='non_academic_unigrams.pkl', pickle_filename_bigrams='non_academic_bigrams.pkl', pickle_filename_trigrams='non_academic_trigrams.pkl', pickle_filename_quadgrams='non_academic_quadgrams.pkl'):
"""Loads the n-grams counters from the pickle files
:param pickle_filename_unigrams: File name for the non academic unigrams counter pickle file
:type pickle_filename_unigrams: str
:param pickle_filename_bigrams: File name for the non academic bigrams counter pickle file
:type pickle_filename_bigrams: str
:param pickle_filename_trigrams: File name for the non academic trigrams counter pickle file
:type pickle_filename_quadgrams: str
:param pickle_filename_quadgrams: File name for the non academic quadgrams counter pickle file
:type pickle_filename_quadgrams: str
"""
with open(os.path.join(self.SERIALIZE, pickle_filename_unigrams), 'rb') as f:
self.unigrams_ctr = pickle.load(f)
with open(os.path.join(self.SERIALIZE, pickle_filename_bigrams), 'rb') as f:
self.bigrams_ctr = pickle.load(f)
with open(os.path.join(self.SERIALIZE, pickle_filename_trigrams), 'rb') as f:
self.trigrams_ctr = pickle.load(f)
with open(os.path.join(self.SERIALIZE, pickle_filename_quadgrams), 'rb') as f:
self.quadgrams_ctr = pickle.load(f)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Computes the n-gram distribution from non academic corpus')
parser.add_argument('--text_non_academic_corpus', help='Path to text non-academic corpus', required=True)
parser.add_argument('--serialize_output', help='Path to output picke objects', required=True)
args = parser.parse_args()
non_academic = Non_Academic(amazon_review=args.text_non_academic_corpus, serialize_path=args.serialize_output)
non_academic.compute_ngrams()
| StarcoderdataPython |
306984 | from django import forms
from captcha.fields import CaptchaField
import datetime
class UserForm(forms.Form):
username = forms.CharField(label='用户名', max_length= 128, widget = forms.TextInput(attrs={'class':'form-control'}))
password = forms.CharField(label='密码', max_length= 256, widget = forms.PasswordInput(attrs={'class': 'form-control'}))
captcha = CaptchaField(label='验证码')
class RegisterForm(forms.Form):
gender = (('male','男'), ('female','女'),)
username = forms.CharField(label='用户名', max_length= 128, widget = forms.TextInput(attrs={'class':'form-control'}))
password1 = forms.CharField(label='密码', max_length=256, widget=forms.PasswordInput(attrs={'class': 'form-control'}))
password2 = forms.CharField(label='确认密码', max_length=256, widget=forms.PasswordInput(attrs={'class': 'form-control'}))
email = forms.EmailField(label='邮箱地址', widget=forms.EmailInput(attrs={'class': 'form-control'}))
sex = forms.ChoiceField(label='性别', choices=gender)
captcha = CaptchaField(label='验证码')
month = datetime.datetime.now().month
class TypeForm(forms.Form):
types = (('1', '1.调账说明'), ('2', '2.银行未达账项'), ('3', '3.组织架构及共享分工明细'), ('4', '4.融资费用缺失发票'), ('5', '5.金蝶报表制作问题'),('6', '6.2018年报调整账套'),)
months = (('1', '1月',), ('2', '2月',), ('3', '3月',), ('4', '4月',), ('5', '5月',), ('6', '6月',), ('7', '7月',), ('8', '8月',), ('9', '9月',), ('10', '10月',), ('11', '11月',), ('12', '12月',),)
type = forms.ChoiceField(label='种类', choices=types)
month = forms.ChoiceField(label='月份', choices=months, initial=month)
class ChPswdForm(forms.Form):
username = forms.CharField(label='用户名', max_length= 128, widget = forms.TextInput(attrs={'class':'form-control'}))
password1 = forms.CharField(label='旧密码', max_length=256, widget=forms.PasswordInput(attrs={'class': 'form-control'}))
password2 = forms.CharField(label='新密码', max_length=256, widget=forms.PasswordInput(attrs={'class': 'form-control'}))
password3 = forms.CharField(label='确认新密码', max_length=256, widget=forms.PasswordInput(attrs={'class': 'form-control'})) | StarcoderdataPython |
1866840 | import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import editdistance as ed
# CreateOnehotVariable function
# *** DEV NOTE : This is a workaround to achieve one, I'm not sure how this function affects the training speed ***
# This is a function to generate an one-hot encoded tensor with given batch size and index
# Input : input_x which is a Tensor or Variable with shape [batch size, timesteps]
# encoding_dim, the number of classes of input
# Output: onehot_x, a Variable containing onehot vector with shape [batch size, timesteps, encoding_dim]
def CreateOnehotVariable( input_x, encoding_dim=63):
if type(input_x) is Variable:
input_x = input_x.data
input_type = type(input_x)
batch_size = input_x.size(0)
time_steps = input_x.size(1)
input_x = input_x.unsqueeze(2).type(torch.LongTensor)
onehot_x = Variable(torch.LongTensor(batch_size, time_steps, encoding_dim).zero_().scatter_(-1,input_x,1)).type(input_type)
return onehot_x
# TimeDistributed function
# This is a pytorch version of TimeDistributed layer in Keras I wrote
# The goal is to apply same module on each timestep of every instance
# Input : module to be applied timestep-wise (e.g. nn.Linear)
# 3D input (sequencial) with shape [batch size, timestep, feature]
# output: Processed output with shape [batch size, timestep, output feature dim of input module]
def TimeDistributed(input_module, input_x):
batch_size = input_x.size(0)
time_steps = input_x.size(1)
reshaped_x = input_x.contiguous().view(-1, input_x.size(-1))
output_x = input_module(reshaped_x)
return output_x.view(batch_size,time_steps,-1)
# LetterErrorRate function
# Merge the repeated prediction and calculate editdistance of prediction and ground truth
def LetterErrorRate(pred_y,true_y):
ed_accumalate = []
for p,t in zip(pred_y,true_y):
compressed_t = [w for w in t if (w!=1 and w!=0)]
compressed_p = []
for p_w in p:
if p_w == 0:
continue
if p_w == 1:
break
compressed_p.append(p_w)
ed_accumalate.append(ed.eval(compressed_p,compressed_t)/len(compressed_t))
return ed_accumalate
def label_smoothing_loss(pred_y,true_y,label_smoothing=0.1):
# Self defined loss for label smoothing
# pred_y is log-scaled and true_y is one-hot format padded with all zero vector
assert pred_y.size() == true_y.size()
seq_len = torch.sum(torch.sum(true_y,dim=-1),dim=-1,keepdim=True)
# calculate smoothen label, last term ensures padding vector remains all zero
class_dim = true_y.size()[-1]
smooth_y = ((1.0-label_smoothing)*true_y+(label_smoothing/class_dim))*torch.sum(true_y,dim=-1,keepdim=True)
loss = - torch.mean(torch.sum((torch.sum(smooth_y * pred_y,dim=-1)/seq_len),dim=-1))
return loss
def batch_iterator(batch_data, batch_label, listener, speller, optimizer, tf_rate, is_training, data='timit',**kwargs):
bucketing = kwargs['bucketing']
use_gpu = kwargs['use_gpu']
output_class_dim = kwargs['output_class_dim']
label_smoothing = kwargs['label_smoothing']
# Load data
if bucketing:
batch_data = batch_data.squeeze(dim=0)
batch_label = batch_label.squeeze(dim=0)
current_batch_size = len(batch_data)
max_label_len = min([batch_label.size()[1],kwargs['max_label_len']])
batch_data = Variable(batch_data).type(torch.FloatTensor)
batch_label = Variable(batch_label, requires_grad=False)
criterion = nn.NLLLoss(ignore_index=0)
if use_gpu:
batch_data = batch_data.cuda()
batch_label = batch_label.cuda()
criterion = criterion.cuda()
# Forwarding
optimizer.zero_grad()
listner_feature = listener(batch_data)
if is_training:
raw_pred_seq, _ = speller(listner_feature,ground_truth=batch_label,teacher_force_rate=tf_rate)
else:
raw_pred_seq, _ = speller(listner_feature,ground_truth=None,teacher_force_rate=0)
pred_y = (torch.cat([torch.unsqueeze(each_y,1) for each_y in raw_pred_seq],1)[:,:max_label_len,:]).contiguous()
if label_smoothing == 0.0 or not(is_training):
pred_y = pred_y.permute(0,2,1)#pred_y.contiguous().view(-1,output_class_dim)
true_y = torch.max(batch_label,dim=2)[1][:,:max_label_len].contiguous()#.view(-1)
loss = criterion(pred_y,true_y)
# variable -> numpy before sending into LER calculator
batch_ler = LetterErrorRate(torch.max(pred_y.permute(0,2,1),dim=2)[1].cpu().numpy(),#.reshape(current_batch_size,max_label_len),
true_y.cpu().data.numpy(),data) #.reshape(current_batch_size,max_label_len), data)
else:
true_y = batch_label[:,:max_label_len,:].contiguous()
true_y = true_y.type(torch.cuda.FloatTensor) if use_gpu else true_y.type(torch.FloatTensor)
loss = label_smoothing_loss(pred_y,true_y,label_smoothing=label_smoothing)
batch_ler = LetterErrorRate(torch.max(pred_y,dim=2)[1].cpu().numpy(),#.reshape(current_batch_size,max_label_len),
torch.max(true_y,dim=2)[1].cpu().data.numpy(),data) #.reshape(current_batch_size,max_label_len), data)
if is_training:
loss.backward()
optimizer.step()
batch_loss = loss.cpu().data.numpy()
return batch_loss, batch_ler
def log_parser(log_file_path):
tr_loss,tt_loss,tr_ler,tt_ler = [], [], [], []
with open(log_file_path,'r') as log_f:
for line in log_f:
tmp = line.split('_')
tr_loss.append(float(tmp[3]))
tr_ler.append(float(tmp[5]))
tt_loss.append(float(tmp[7]))
tt_ler.append(float(tmp[9]))
return tr_loss,tt_loss,tr_ler,tt_ler
# Collapse 61 phns to 39 phns
# http://cdn.intechopen.com/pdfs/15948/InTech-Phoneme_recognition_on_the_timit_database.pdf
def collapse_phn(seq, return_phn = False, drop_q = True):
phonemes = ["b", "bcl", "d", "dcl", "g", "gcl", "p", "pcl", "t", "tcl", "k", "kcl", "dx", "q", "jh", "ch", "s", "sh", "z", "zh",
"f", "th", "v", "dh", "m", "n", "ng", "em", "en", "eng", "nx", "l", "r", "w", "y",
"hh", "hv", "el", "iy", "ih", "eh", "ey", "ae", "aa", "aw", "ay", "ah", "ao", "oy",
"ow", "uh", "uw", "ux", "er", "ax", "ix", "axr", "ax-h", "pau", "epi", "h#"]
phonemes2index = {k:(v+2) for v,k in enumerate(phonemes)}
index2phonemes = {(v+2):k for v,k in enumerate(phonemes)}
phonemse_reduce_mapping = {"b":"b", "bcl":"h#", "d":"d", "dcl":"h#", "g":"g", "gcl":"h#", "p":"p", "pcl":"h#", "t":"t", "tcl":"h#", "k":"k", "kcl":"h#", "dx":"dx", "q":"q", "jh":"jh", "ch":"ch", "s":"s", "sh":"sh", "z":"z", "zh":"sh",
"f":"f", "th":"th", "v":"v", "dh":"dh", "m":"m", "n":"n", "ng":"ng", "em":"m", "en":"n", "eng":"ng", "nx":"n", "l":"l", "r":"r", "w":"w", "y":"y",
"hh":"hh", "hv":"hh", "el":"l", "iy":"iy", "ih":"ih", "eh":"eh", "ey":"ey", "ae":"ae", "aa":"aa", "aw":"aw", "ay":"ay", "ah":"ah", "ao":"aa", "oy":"oy",
"ow":"ow", "uh":"uh", "uw":"uw", "ux":"uw", "er":"er", "ax":"ah", "ix":"ih", "axr":"er", "ax-h":"ah", "pau":"h#", "epi":"h#", "h#": "h#"}
# inverse index into phn
seq = [index2phonemes[idx] for idx in seq]
# collapse phn
seq = [phonemse_reduce_mapping[phn] for phn in seq]
# Discard phn q
if drop_q:
seq = [phn for phn in seq if phn != "q"]
else:
seq = [phn if phn != "q" else ' ' for phn in seq ]
if return_phn:
return seq
# Transfer back into index seqence for Evaluation
seq = [phonemes2index[phn] for phn in seq]
return seq | StarcoderdataPython |
256481 | <filename>src/software/decode/ctypeAutoGen.py
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
'''
Brief:
ctypeAutoGen.py - Method and Appratus to extract compiled code into interpreted code.
Description:
Software application to decode firmware C data stuctures to python c-types.
Classes:
Enter ("") to display Class listings.
Requirement(s):
All data structure types must be named, meaning no Anonymous subtypes.
I.E. Invalid coding standard!
typedef struct
{
union
{
struct
{
uint32_t bottom: 16;
uint32_t top: 16;
};
uint32_t height;
};
} person;
I.E. Valid coding standard!
typedef struct
{
uint32_t bottom: 16;
uint32_t top: 16;
} personParts_t;
typedef union
{
uint32_t height; // Note: the all or struct must be before the bitfields.
personParts_t part;
} personAll_t;
typedef struct
{
personAll_t allMeta;
} personHeight_t;
Usage:
To decode within a wrapper or a python script
from telemetry.commands.telemetryCmd import *
myObj = TelemetryObjectCommands()
myObj.parseTelemetry(5,inFile=r"\testball\testTelemHostInitObjBis\Object_5_ver1.0_bank0.bin") #todo: update usage
'''
################################################################################################################
################################################################################################################
## General Python module imports
################################################################################################################
################################################################################################################
import os, re, sys, ctypes, shutil, errno, logging, platform, uuid, random
import time, builtins, struct, binascii, filecmp # @todo cleanup unused... exceptions
try:
import enum # @todo cleanup unused
from enum import * # pip install enum34 # backport from 3.x to 2.7 https://pypi.org/project/enum34/ # @todo cleanup explicit usage
except:
pass
################################################################################################################
################################################################################################################
## Explicit importing of headers
################################################################################################################
################################################################################################################
from pprint import pprint
from threading import Timer
from subprocess import Popen, PIPE
from optparse import OptionParser
from ctypes import * # @todo cleanup explicit usage
from ctypes.util import find_library # @todo cleanup explicit usage
from optparse import OptionGroup # @todo cleanup explicit usage
from sys import version_info # @todo cleanup explicit usage
# from builtins import classmethod, int, long # @todo cleanup explicit usage
# from exceptions import * # @todo cleanup explicit usage
################################################################################################################
################################################################################################################
## Debug methods.
################################################################################################################
################################################################################################################
ENABLE_CLANG = 0 # @todo Adding LLVM clang parser
ENABLE_DEBUG_ENTER = 0 # @todo debug switch
################################################################################################################
################################################################################################################
## LLVM CLang Compiler Keywords
################################################################################################################
################################################################################################################
if ENABLE_CLANG:
print("Using Clang and it is not supported yet...")
# Diagram of API https://coggle.it/diagram/VSk7_32dyC9M7Wtk/t/python-clang
# import clang # @todo
# from clang.cindex import Index # @todo
# from clang.cindex import CursorKind, TypeKind # @todo
# from clang.cindex import Index, TranslationUnit # @todo
# from clang.cindex import TypeKind # @todo
# import ctypeslib # @todo
# from ctypeslib.codegen import cursorhandler # @todo
# from ctypeslib.codegen import typedesc # @todo
# from ctypeslib.codegen import typehandler # @todo
# from ctypeslib.codegen import util # @todo
# from ctypeslib.codegen.util import log_entity # @todo
# from ctypeslib.codegen.handler import ClangHandler # @todo
# from ctypeslib.codegen.handler import CursorKindException # @todo
# from ctypeslib.codegen.handler import InvalidDefinitionError # @todo
# from ctypeslib.codegen.handler import DuplicateDefinitionException # @todo
################################################################################################################
################################################################################################################
## Operation Mode
################################################################################################################
################################################################################################################
TRUNK = None
def redefineMedia(shellMode=False):
'''
Allows for assignment of the shell status for subprocess execution.
'''
global TRUNK
if (shellMode == True):
TRUNK = "NAND"
else:
TRUNK = "SXP"
return
################################################################################################################
################################################################################################################
## Filenaming Globals and Updates for Threading
################################################################################################################
################################################################################################################
extTXT = ".txt"
fname_structDefFile = str("ctypeAutoGen_structDefs" + extTXT)
fname_subStructDefFile = str("ctypeAutoGen_subStructDefs" + extTXT)
fname_structSizeFile = str("ctypeAutoGen_structSizes" + extTXT)
fname_srcFileFile = str("ctypeAutoGen_srcFiles" + extTXT)
fname_typedefFile = str("ctypeAutoGen_typedefs" + extTXT)
fname_tempSubStructDefs = str("ctypeAutoGen_tempSubStructDefs" + extTXT)
fname_logFileName = str("ctypeAutoGen_log" + extTXT)
extRC = ".rc"
fname_multiCmdFile = str("ctypeAutoGen_multiCmdFile" + extRC)
fname_subStructMultiCmdFile = str("ctypeAutoGen_subStructMultiCmdFile" + extRC)
fname_structSizeMultiCmdFile = str("ctypeAutoGen_structSizeMultiCmdFile" + extRC)
fname_srcFileMultiCmdFile = str("ctypeAutoGen_srcFileMultiCmdFile" + extRC)
fname_typedefMultiCmdFile = str("ctypeAutoGen_typedefMultiCmdFile" + extRC)
def redefineFileNames():
'''
Allows for unique id of files.
'''
global TRUNK
global fname_structDefFile
global fname_subStructDefFile
global fname_structSizeFile
global fname_srcFileFile
global fname_typedefFile
global fname_tempSubStructDefs
global fname_logFileName
global fname_multiCmdFile
global fname_subStructMultiCmdFile
global fname_structSizeMultiCmdFile
global fname_srcFileMultiCmdFile
global fname_typedefMultiCmdFile
tagCreate = str(os.getpid()) + "-" + str(datetime.now()) + "-" + str(random.randint(1, 1024))
uPIDName = str(uuid.uuid5(uuid.NAMESPACE_DNS, tagCreate))
fname_structDefFile = str("ctypeAutoGen_structDefs_" + uPIDName + extTXT)
fname_subStructDefFile = str("ctypeAutoGen_subStructDefs_" + uPIDName + extTXT)
fname_structSizeFile = str("ctypeAutoGen_structSizes_" + uPIDName + extTXT)
fname_srcFileFile = str("ctypeAutoGen_srcFiles_" + uPIDName + extTXT)
fname_typedefFile = str("ctypeAutoGen_typedefs_" + uPIDName + extTXT)
fname_tempSubStructDefs = str("ctypeAutoGen_tempSubStructDefs_" + uPIDName + extTXT)
fname_logFileName = str("ctypeAutoGen_log_" + uPIDName + extTXT)
fname_multiCmdFile = str("ctypeAutoGen_multiCmdFile_" + uPIDName + extRC)
fname_subStructMultiCmdFile = str("ctypeAutoGen_subStructMultiCmdFile_" + uPIDName + extRC)
fname_structSizeMultiCmdFile = str("ctypeAutoGen_structSizeMultiCmdFile_" + uPIDName + extRC)
fname_srcFileMultiCmdFile = str("ctypeAutoGen_srcFileMultiCmdFile_" + uPIDName + extRC)
fname_typedefMultiCmdFile = str("ctypeAutoGen_typedefMultiCmdFile_" + uPIDName + extRC)
################################################################################################################
################################################################################################################
## Reg Expression
################################################################################################################
################################################################################################################
if not ENABLE_CLANG:
TRUNK = "NAND"
#########################################################################
# Graphical Flow Draw: https://www.debuggex.com/?flavor=python#cheatsheet
#########################################################################
# Legend: Skip means ? outside of statement and Loop means (?) within. #
# ....................Skip............Skip...............................#
# ...................._____..........._____..............................#
# ....................|...|...........|...|..............................#
# ====Start====(})====(\s)====(\w)====(\s)====[;]====($)====End====......#
# ....................|...|...|...|...|...|..............................#
# ....................|___|...........|___|..............................#
# ....................Loop............Loop...............................#
#########################################################################
detectedStructureMainName = re.compile(r"(})(\s?)+(\w)+?(\s?)+?[;:]$")
#########################################################################
# ....................Skip............Skip...............................#
# ...................._____..........._____..............................#
# ....................|...|...........|...|..............................#
# ====Start====(})====(\s)====(\w)====(\s)====[;]====($)====End====......#
# ....................|...|...|...|...|...|..............................#
# ....................|___|...........|___|..............................#
# ....................Loop............Loop...............................#
#########################################################################
detectedStructureSubName = re.compile(r"(})(\s?)+(\w)+?(\s?)+?[;]$")
#########################################################################
# ....................Skip...............................................#
# ...................._____..............................................#
# ....................|...|..............................................#
# ====Start====(})====(\s)====[;]====($)====End====......................#
# ....................|...|..............................................#
# ....................|___|..............................................#
# ....................Loop...............................................#
#########################################################################
detectedAnonymousName = re.compile(r"(})(\s?)+?[;]$")
############################################################################
# Detection of Struct or Union Pointer in a line so we can assign MMU type #
############################################################################
# I.E.myvalue = struct transDmaDwordDesc_t*dmaAdmin; #
############################################################################
detectComplexStructOrUnionPointer = re.compile(
r"(((\s+(\w)+(\s)+)|(\s+(\w)+=\s+))|(\s+)?)(struct|union)(\s)+?((\w)+)?(\s+)?[*](\s+)?(\w)+(\s+)?[;](\s+)?")
############################################################################
# Detection of Struct or Union Pointer in a line so we can assign MMU type #
############################################################################
# I.E.struct transDmaDwordDesc_t*dmaAdmin; #
# I.E.union transDmaDwordDesc_t*dmaAdmin; #
############################################################################
detectSimpleStructOrUnionPointer = re.compile(
r"((\s+)?)(struct|union)(\s)+?((\w)+)?(\s+)?[*](\s+)?(\w)+(\s+)?[;](\s+)?")
############################################################################
# Detection of basic type Pointer in a line so we can assign MMU type #
############################################################################
# I.E.char*dmaAdmin; #
############################################################################
detectBasicPointer = re.compile(r"((\s+)?)(\w+)(\s+)?[*](\s+)?(\w)+(\s+)?[;](\s+)?")
# Sequences used in matching. Use precompiled version to accelerate code.
matchSequence = [None] * 27 # Assign size to the array
matchSequence[1] = re.compile(r"\d+: (.+)$")
matchSequence[2] = re.compile(r"^//")
matchSequence[3] = re.compile(r"^ObjectBegin==>(.+)")
matchSequence[4] = re.compile(r"^\_+")
matchSequence[5] = re.compile(r"^0x[a-fA-F0-9]+$")
matchSequence[6] = re.compile(r"^union \{$")
matchSequence[7] = re.compile(r"^(\w+) \{$")
matchSequence[8] = re.compile(r"^(\w+) (\w+) \{$")
matchSequence[9] = re.compile(r"^(\w+) union (\w+) \{$")
matchSequence[10] = re.compile(r"^(\w+) (\w+) (\w+) \{$")
matchSequence[11] = re.compile(r"^(\w+) = (\w+) \{$")
matchSequence[12] = re.compile(r"^(\w+) = union (\w+) \{$")
matchSequence[13] = re.compile(r"^(\w+) = (\w+) (\w+) \{$")
matchSequence[14] = re.compile(r"^([\w ]+) ([*\w]+);$")
matchSequence[15] = re.compile(r"^(\w+) = union (\w+?::.+) \{")
matchSequence[16] = re.compile(r"^(\w+) = (\w+?::.+) \{")
matchSequence[17] = re.compile(r"^(\w+) (\w+?::.+) \{")
matchSequence[18] = re.compile(r"^\d+$")
matchSequence[19] = re.compile(r"^(versionMajor) = (.+)")
matchSequence[20] = re.compile(r"^(versionMinor) = (.+)")
matchSequence[21] = re.compile(
r"(\w+_[et])[ ;:]?") # NAND type enumeration, and type regex detection. @todo name_size_t causes a slice of name_s detected so removed from NAND.
matchSequence[22] = re.compile(r"(\w+_[ets])[ ;:]?") # SXP type enumeration, type, and struct regex detection.
matchSequence[23] = re.compile(
r"(\w+_[et])[ ;]") # NAND type enumeration, and type regex detection. @todo name_size_t causes a slice of name_s detected so removed from NAND.
matchSequence[24] = re.compile(r"(\w+_[ets])[ ;]") # SXP type enumeration, type, and struct regex detection.
matchSequence[25] = re.compile(r"(versionMajor) = (.+)")
matchSequence[26] = re.compile(r"(versionMinor) = (.+)")
################################################################################################################
################################################################################################################
## Python 2 and 3 redefines
################################################################################################################
################################################################################################################
# if (sys.version_info[0] < 3):
# # Python 3 code in this block
# range = xrange # @todo python 3 convert
#
# if (sys.version[:3] > "2.3"):
# import hashlib # @todo cleanup explicit usage
# try:
# # Python 2
# xrange # @todo python 2 convert
# except NameError:
# # Python 3
# xrange = range
# Python 3 and 2 check
# try:
# input = raw_input # @todo python 3 convert
# except NameError:
# pass
################################################################################################################
################################################################################################################
## Execute the binaries if there are no changes. I.E. Intel just in time compiler make the binary faster as it
## is used within our system.
################################################################################################################
################################################################################################################
try:
if platform.system() == 'Linux':
ghsPath = '/usr/ghs'
exeSuffix = ''
elif platform.system() == 'Windows':
ghsPath = 'c:/ghs'
exeSuffix = '.exe'
import win32com.shell.shell as shell
elif 'CYGWIN_NT' in platform.system():
ghsPath = 'c:/ghs'
exeSuffix = '.exe'
except:
print("Failed binary exe")
cmdPath, cmdFile = os.path.split(sys.argv[0])
usage = "%s --projectname PROJ_NAME --fwbuilddir FW_BUILD_DIR --tools TELEMETRY_TOOLS_DIR --multiexeversion MULTI_VER" % (
sys.argv[0])
################################################################################################################
## Helper function to pause for user input (for debug use only)
################################################################################################################
################################################################################################################
def pressReturnToContinue(aString=None):
if (ENABLE_DEBUG_ENTER == 1):
if (sys.version_info[0] < 3):
if aString is None:
usersInput = input("PRESS RETURN TO CONINTUE or 'q' to quit: ")
else:
usersInput = input("(%s) PRESS RETURN TO CONINTUE or 'q' to quit: " % (aString))
else:
usersInput = eval(input("PRESS RETURN TO CONINTUE or 'q' to quit"))
if (usersInput == 'q'):
sys.exit(0)
else:
print("Debug enter disabled.")
def formatDataControlObjects(enumGenFile):
# Process through specified input data object file to get list for scanning
iFile = open(enumGenFile, 'r')
if iFile.mode == 'r':
lines = iFile.readlines()
else:
if ENABLE_DEBUG_ENTER: quit(1)
iFile.close()
objectList = []
for l in lines:
line = l.strip()
line = re.sub('\/\/\/<', ' ', line)
line = re.sub('=', ',', line)
line = re.sub('^ +', '', line)
line = re.sub(' +', ' ', line)
line = re.sub(' +,', ',', line)
line = re.sub(', +', ',', line)
if re.search('^\/\/', line): continue
if (line == ''): continue
objectList.append(line.split(','))
for i in range(len(objectList)):
print(" %-40s = %20s, ///< %10s, %18s, %12s" % (
str(objectList[i][0]), str(objectList[i][1]), str(objectList[i][2]), str(objectList[i][3]), 'No'))
################################################################################################################
################################################################################################################
# Base class to store intermediate data object info
################################################################################################################
################################################################################################################
class GenericObject(object):
"""Generic object node used to traverse Abstract Syntax Tree (AST)
Attributes:
Tracking node for information to construct a c-type from context free grammar (CFG)
"""
def __init__(self, debug=False):
""" Init class with nil content."""
self.subStructSizeGood = 0
self.arrayDimList = [1]
self.debug = debug
self.depth = 0
self.endLineNum = 0
self.fwObject = ''
self.fwStruct = ''
self.parent = None
self.ancestryNames = []
self.ancestryTypes = []
self.altMemberList = []
self.memberList = []
self.sizeInBits = 0
self.startLineNum = 0
self.structType = ''
self.isPointer = None
self.uid = 0xBADDC0DE
self.versionMajor = 0xBADD ### Default
self.versionMajorStr = 'versionMajor' ### Default
self.versionMinor = 0xC0DE ### Default
self.versionMinorStr = 'versionMinor' ### Default
################################################################################################################
################################################################################################################
# Class for FW C-Struct extraction Python C-Type generation
################################################################################################################
################################################################################################################
class CtypeAutoGen(object):
"""Class to extract FW C-Structs for Telemetry Data Objects
Attributes:
Traverses properties of the firmware code to construct the destination type
in multible stages.
"""
def __init__(self, options):
""" Init class with nil and static content."""
######################################################################
######################################################################
# Data members needed for FW C-Struct extraction
######################################################################
######################################################################
self.outDir = os.path.abspath(os.path.join(options.fwBuildOutputDir, 'telemetry'))
if not os.path.exists(self.outDir):
try:
os.makedirs(self.outDir)
except OSError:
print("Failed to create the telemetry output folder")
if ENABLE_DEBUG_ENTER: quit(2)
self.buildDir = os.path.abspath(os.path.join(self.outDir, os.pardir))
self.objsDir = os.path.abspath(os.path.join(self.buildDir, os.pardir))
self.projectsDir = os.path.abspath(os.path.join(self.objsDir, os.pardir))
print()
print("self.outDir ", self.outDir)
print("self.buildDir ", self.buildDir)
print("self.objsDir ", self.objsDir)
print("self.projectsDir", self.projectsDir)
self.logFileName = os.path.join(self.outDir, fname_logFileName)
logging.basicConfig(filename=self.logFileName, filemode='w', format='%(asctime)s %(levelname)s:%(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG)
logging.debug('ctypeAutoGen.py Log')
# logging.debug('Message to file')
# logging.info('Message to file')
# logging.warning('Message to file')
self.log = logging
self.options = options
self.verbose = options.verbose
self.telemetryDataControlFile = None
self.telemetryObjectList = []
self.telemetryObjectListAll = []
self.fileList = []
self.versionList = []
self.structNameList = []
self.structDefFile = os.path.join(self.outDir, fname_structDefFile)
self.subStructDefFile = os.path.join(self.outDir, fname_subStructDefFile)
self.structSizeFile = os.path.join(self.outDir, fname_structSizeFile)
self.srcFileFile = os.path.join(self.outDir, fname_srcFileFile)
self.typedefFile = os.path.join(self.outDir, fname_typedefFile)
self.multiCmdFile = os.path.join(self.outDir, fname_multiCmdFile)
self.subStructMultiCmdFile = os.path.join(self.outDir, fname_subStructMultiCmdFile)
self.structSizeMultiCmdFile = os.path.join(self.outDir, fname_structSizeMultiCmdFile)
self.srcFileMultiCmdFile = os.path.join(self.outDir, fname_srcFileMultiCmdFile)
self.typedefMultiCmdFile = os.path.join(self.outDir, fname_typedefMultiCmdFile)
self.ghsPath = ghsPath
self.exeSuffix = exeSuffix
if (self.options.multiExeVersion is not None):
self.multiExe = os.path.join(self.ghsPath, self.options.multiExeVersion, "multi%s" % (self.exeSuffix))
if not os.path.exists(self.multiExe):
print("\n-E- Could not locate multi.exe")
pressReturnToContinue('self.multiExe %s does not exist' % self.multiExe)
if ENABLE_DEBUG_ENTER: quit(3)
else:
self.multiExe = self.locateMultiExe()
if self.multiExe is None:
print("\n-E- Could not locate multi.exe")
if ENABLE_DEBUG_ENTER: quit(4)
print("Found the following multi debugger: %s" % (self.multiExe))
self.elfFile = os.path.abspath(
os.path.join(self.options.fwBuildOutputDir, '%s.elf' % (self.options.projectName)))
if not os.path.exists(self.elfFile):
print("-E- Could not locate elf file (%s)" % (self.elfFile))
if ENABLE_DEBUG_ENTER: quit(5)
if ENABLE_CLANG:
print("Media Agnostic")
elif (self.options.media is not None or self.options.media == "NAND"):
self.TRUNK = "NAND"
elif (self.options.media == "NAND"):
self.TRUNK = "SXP"
else:
self.TRUNK = "NAND"
self.recursive = False
self.subStructList = set()
self.masterObjectList = {}
self.numValidStructsFound = 0
######################################################################
######################################################################
# Data members needed for Python C-Type generation
######################################################################
######################################################################
self.masterObjectListUidValue = {};
self.objectsInStructDefFile = []
self.telemetryFolder = self.outDir
if not os.path.exists(self.telemetryFolder):
try:
os.makedirs(self.telemetryFolder)
except OSError:
print("Failed to create the telemetry output folder")
if ENABLE_DEBUG_ENTER: quit(6)
self.fwToolsDir = self.options.fwToolsDir
self.parsersFolder = os.path.join(self.outDir, 'parsers')
if not os.path.exists(self.parsersFolder):
try:
os.makedirs(self.parsersFolder)
except OSError:
print("Failed to create the parsers folder")
if ENABLE_DEBUG_ENTER: quit(7)
self.commandsFolder = os.path.join(self.outDir, 'commands')
if not os.path.exists(self.commandsFolder):
try:
os.makedirs(self.commandsFolder)
except OSError:
print("Failed to create the parsers folder")
if ENABLE_DEBUG_ENTER: quit(8)
self.maxStructDepth = 0
# C-Types supported within the automatic generation.
self.cToPythonCtypeMap = {
"signed char": "ctypes.c_int8",
"unsigned char": "ctypes.c_uint8",
"char": "ctypes.c_uint8", ### Need to verify
"bool": "ctypes.c_uint8", ### Need to verify
"signed short": "ctypes.c_int16",
"short": "ctypes.c_int16",
"unsigned short": "ctypes.c_uint16",
"signed int": "ctypes.c_int32",
"signed long": "ctypes.c_int32",
"int": "ctypes.c_int32",
"unsigned int": "ctypes.c_uint32",
"unsigned long": "ctypes.c_uint32",
"void": "ctypes.c_uint32", ### Need to verify
"signed long long": "ctypes.c_int64",
"unsigned long long": "ctypes.c_uint64",
}
def locateMultiExe(self):
"""Performs the green hills (GHS) compiler executable used in extracting definitions."""
multiExe = None
multiExeCtime = None
for path, dirs, files in os.walk(os.path.abspath(ghsPath)):
for filename in files:
if 'multi.exe' == filename.lower():
if (multiExe is None):
multiExe = os.path.join(path, filename)
multiExeCtime = os.path.getctime(multiExe)
elif (os.path.getctime(os.path.join(path, filename)) > multiExeCtime):
multiExe = os.path.join(path, filename)
multiExeCtime = os.path.getctime(multiExe)
print("\nMulti Debugger: %s" % (multiExe))
return multiExe
def runCmd(self, multicmd, timeout=100):
"""Performs an execution of GHS program."""
proc = Popen(multicmd, stdout=PIPE, stderr=PIPE, shell=True)
timer = Timer(timeout, proc.kill)
try:
timer.start()
stdout, stderr = proc.communicate()
# except:
# proc.kill()
# proc.wait()
finally:
timer.cancel()
def autoSrcDirScan(self):
"""Performs an export of all files within the build and writes to a file."""
multiCmdFile = open(self.srcFileMultiCmdFile, 'w+')
multiCmdFile.write('_LINES = 10000\n')
multiCmdFile.write('l f\n') ### List all source code files
multiCmdFile.write('quitall\n') ### Exit multi gracefully
multiCmdFile.close()
command = '%s %s -nodisplay -p %s -RO %s' % (
self.multiExe, self.elfFile, self.srcFileMultiCmdFile, self.srcFileFile)
self.executeMultiScript(command)
if os.path.isfile(self.srcFileFile) is not True:
print("srcFileFile does not exist")
quit(1)
iFile = open(self.srcFileFile, 'r')
lines = iFile.readlines()
iFile.close()
for i in range(len(lines)):
line = lines[i].strip()
if matchSequence[1].match(lines[i].strip()):
m = matchSequence[1].match(lines[i].strip())
l = m.group(1).strip()
f = os.path.abspath(os.path.join(self.projectsDir, l.strip()))
if f not in self.fileList:
self.fileList.append(f)
# for i in self.fileList: print i
print('\nScanned %i .h/.c/.cpp files\n' % (len(self.fileList)))
def locateTelemetryDataControlFile(self):
"""Obtain the datacontrol.h file location"""
for filename in self.fileList:
# print ("Filename" + filename)
if (self.options.uidEnumFile is not None):
if self.options.uidEnumFile.strip() in filename:
self.telemetryDataControlFile = os.path.join(filename)
return True
elif 'datacontrol.h' in filename:
self.telemetryDataControlFile = os.path.join(filename)
return True
return False
def getTelemetryObjectList(self, option=None, objList=[]):
"""Find the location of datacontrol.h file in the FW source code."""
######################################################################
######################################################################
# Extracts telemetry objects from datacontrol.h
######################################################################
######################################################################
# Find the location of datacontrol.h file in the FW source code
if not self.locateTelemetryDataControlFile():
print("\n-E- Could not locate datacontrol.h to extract the master telemetry object list")
if ENABLE_DEBUG_ENTER: quit(9)
print("\n datacontrol.h path:", self.telemetryDataControlFile)
# Process through datacontrol.h to get the Telemetry Master Object List
iFile = open(self.telemetryDataControlFile, 'r')
lines = iFile.readlines()
iFile.close()
for i in range(len(lines)):
if ('typedef enum' in lines[i]):
self.eUidEnumStart = i + 1
if ('} eUniqueIdentifier' in lines[i]):
self.eUidEnumEnd = i + 1
break
print("\neUniqueIdentifier Start: %i" % self.eUidEnumStart)
print("eUniqueIdentifier End: %i" % self.eUidEnumEnd)
foundTelemetryV2ListStart = False
for i in range(self.eUidEnumStart + 1, self.eUidEnumEnd):
if ('version 2' in lines[i].lower()):
foundTelemetryV2ListStart = True
continue
if ("{" in lines[i]) or ("}" in lines[i]): continue
line = lines[i].strip()
line = re.sub(' +', '', line)
line = re.sub('=', ',', line)
if re.search('^\/\/', line): continue
if (line == ''): continue
line = re.sub('\/\/\/<', '', line)
if (not foundTelemetryV2ListStart): continue
myList = line.split(',')
# myList[0] = re.sub('^uid_','',myList[0])
myList[0] = re.sub('^eUID_', '', myList[0])
# myList[0] = re.sub('_e','',myList[0])
self.telemetryObjectListAll.append(myList)
if option is not None:
# _____Added Object Control Capabilities________
if option.lower() == "all":
self.telemetryObjectList.append(myList)
elif option.lower() == "justthese":
if myList[1] in objList:
self.telemetryObjectList.append(myList)
elif option.lower() == 'range':
if int(myList[1]) in range(objList[0], objList[1]):
self.telemetryObjectList.append(myList)
else: # defaults to editing datacontrol.h comments
if ('yes' in myList[-1].lower() or 'no' in myList[-1].lower()):
self.telemetryObjectList.append(myList)
# _____End: Added Object Control Capabilities________
else:
if ('yes' in myList[-1].lower() or 'no' in myList[-1].lower()):
self.telemetryObjectList.append(myList)
print("\n====================================================================")
print("XXXXXXXXX Telemetry Object List obtained from datacontrol.h XXXXXXXX")
print("====================================================================")
for i in range(len(self.telemetryObjectList)):
print("%2i" % (i), )
for j in range(len(self.telemetryObjectList[i])):
try:
if matchSequence[18].search(self.telemetryObjectList[i][j]):
print("%8s" % (self.telemetryObjectList[i][j]))
elif ('yes' in self.telemetryObjectList[i][j].lower()):
print("%5s" % (self.telemetryObjectList[i][j]))
elif ('no' in self.telemetryObjectList[i][j].lower()):
print("%5s" % (self.telemetryObjectList[i][j]))
else:
print("%-20s" % (self.telemetryObjectList[i][j]))
except:
pass
print()
print("====================================================================")
def dumpAllTypedefs(self):
"""Performs an extraction of all definitions from GHS."""
multiCmdFile = open(self.typedefMultiCmdFile, 'w+')
multiCmdFile.write('_LINES = 50000\n')
multiCmdFile.write('l t\n') ### List all source code files
multiCmdFile.write('quitall\n') ### Exit multi gracefully
multiCmdFile.close()
command = '%s %s -nodisplay -p %s -RO %s' % (
self.multiExe, self.elfFile, self.typedefMultiCmdFile, self.typedefFile)
self.executeMultiScript(command)
def getTypeDefName(self, dataObjectName): # @todo potential to be use Amdahl's law aka parallel.
"""Performs an extraction of the definitions from GHS."""
typeDefName = None
objectDeclarationString = re.compile('([a-zA-Z0-9_]+_t) +%s;' % dataObjectName)
objectDeclarationStringStruct = re.compile('([a-zA-Z0-9_]+_t) +%s([[])' % dataObjectName)
objectDeclarationMajorVer = re.compile('_MAJOR') # @todo unused
objectDeclarationMinorVer = re.compile('_MINOR') # @todo unused
for filepath in self.fileList:
iFile = open(filepath, 'r')
lines = iFile.readlines()
iFile.close()
for txtLine in lines:
if (dataObjectName not in txtLine): continue
if dataObjectName + ';' in txtLine or dataObjectName + '[' in txtLine:
txtLine = txtLine.strip()
txtLine = re.sub('^ +', '', txtLine)
txtLine = re.sub(' +', ' ', txtLine)
if objectDeclarationString.search(txtLine):
m = objectDeclarationString.search(txtLine)
typeDefName = m.group(1)
return typeDefName
elif objectDeclarationStringStruct.search(txtLine):
m = objectDeclarationStringStruct.search(txtLine)
typeDefName = m.group(1)
return typeDefName
return typeDefName
def getAllFwVersionMacros(self):
"""Performs an extraction of the macro definitions from GHS."""
objectDeclarationMajorVer = re.compile('#define +([a-zA-Z0-9_]+_MAJOR) +(\d+)')
objectDeclarationMinorVer = re.compile('#define +([a-zA-Z0-9_]+_MINOR) +(\d+)')
for filepath in self.fileList:
for txtLine in open(filepath, 'r'):
if matchSequence[2].match(txtLine): continue
if objectDeclarationMajorVer.search(txtLine):
m = objectDeclarationMajorVer.search(txtLine)
self.versionList.append([m.group(1), m.group(2)])
elif objectDeclarationMinorVer.search(txtLine):
m = objectDeclarationMinorVer.search(txtLine)
self.versionList.append([m.group(1), m.group(2)])
return
def getFwMacroValue(self, macroName):
"""Performs an extraction of the definition from GHS."""
macroSearchPatternDecimal = re.compile('#define +%s +(\d+)' % macroName)
macroSearchPatternHex = re.compile('#define +%s +(0x[0-9a-fA-F]+)' % macroName)
for filepath in self.fileList:
for txtLine in open(filepath, 'r'):
if matchSequence[2].match(txtLine): continue
if macroSearchPatternDecimal.search(txtLine):
m = macroSearchPatternDecimal.search(txtLine)
return int(m.group(1))
elif macroSearchPatternHex.search(txtLine):
m = macroSearchPatternHex.search(txtLine)
return int(m.group(1), 16)
return None
def getTypeDefStruct(self, typeDefName):
"""Performs an extraction struct of the definitions from GHS."""
objectDeclarationString = re.compile('\} %s;' % typeDefName)
objectDeclarationStringWithTypedef = re.compile('typedef +([a-zA-Z0-9_]+_t) +%s;' % typeDefName)
objectDeclarationStringAlterStruct = re.compile('([a-zA-Z0-9_]+_t) +%s([[])' % typeDefName)
for filepath in self.fileList:
iFile = open(filepath, 'r')
lines = iFile.readlines()
iFile.close()
for i in range(len(lines)):
if typeDefName not in lines[i]: continue
txtLine = lines[i].strip()
txtLine = re.sub('^ +', '', txtLine)
txtLine = re.sub(' +', ' ', txtLine)
if matchSequence[2].match(txtLine): continue
if objectDeclarationString.search(txtLine) or \
objectDeclarationStringWithTypedef.search(txtLine) or \
objectDeclarationStringAlterStruct.search(txtLine):
return filepath, typeDefName, i ### Last param is 0-based lineNum where the typeDefName is found
return None, None, None
def isUserAdmin(self):
"""Performs an admin check for execution."""
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except:
return False
def runShellCmd(self, multicmd):
"""Performs a run command for GHS."""
shell.ShellExecuteEx(lpVerb='runas', lpFile=sys.executable, lpParameters=multicmd)
def executeMultiScript(self, command):
"""Performs an execution for the GHS script."""
try:
# print("Platform: ", str(platform.system()))
if platform.system() == 'Linux':
### Just in case we need to do something different for Linux
self.runCmd(command)
elif platform.system() == 'Windows':
### Just in case we need to do something different for Windows
self.runCmd(command)
print("\nCommand: %s" % command)
except:
print("Failed Multi execution")
if ENABLE_DEBUG_ENTER: quit(10)
def appendNewSubStructs(self, tempSubStructDefFile):
"""Performs sub struct extraction."""
try:
tmpFileExists = os.path.isfile(tempSubStructDefFile)
if tmpFileExists:
iFile = open(tempSubStructDefFile, 'r')
lines = iFile.readlines()
iFile.close()
subFileExists = os.path.isfile(self.subStructDefFile)
if subFileExists:
oFile = open(self.subStructDefFile, 'a+')
for line in lines[6:]: ### Skip over the first 7 MULTI Debugger startup text lines
oFile.write(line)
oFile.close()
else:
oFile = open(self.subStructDefFile, 'w+')
for line in lines:
oFile.write(line)
oFile.close()
if ENABLE_DEBUG_ENTER: quit(11)
else:
if ENABLE_DEBUG_ENTER: quit(12)
except:
print("Failed appendNewSubStructs execution")
if ENABLE_DEBUG_ENTER: quit(13)
def extractArraySubStructs(self):
"""Performs array type extraction within a struct type."""
tempSubStructList = set()
if self.recursive:
if (self.verbose): print("\n recursive extractArraySubStructs call")
subFileExists = os.path.isfile(self.subStructDefFile)
if subFileExists:
fp = open(self.subStructDefFile, 'r')
else:
self.recursive = False
if ENABLE_DEBUG_ENTER: quit(14)
return
else:
if (self.verbose): print("\n Initial extractArraySubStructs call")
subFileExists = os.path.isfile(self.structDefFile)
if subFileExists:
fp = open(self.structDefFile, 'r')
else:
self.recursive = False
if ENABLE_DEBUG_ENTER: quit(15)
return
lines = fp.readlines()
# print("\n Closing " + str(fp))
fp.close()
objectInStructArray = re.compile('struct ([a-zA-Z0-9_]+_t) (([a-zA-Z0-9_]+)([[]))')
objectInUnionArray = re.compile('union ([a-zA-Z0-9_]+_t) (([a-zA-Z0-9_]+)([[]))')
for line in lines:
if objectInStructArray.search(line):
m = objectInStructArray.search(line)
if (m.group(1) not in self.subStructList):
tempSubStructList.add(m.group(1))
self.subStructList.add(m.group(1))
elif objectInUnionArray.search(line):
m = objectInUnionArray.search(line)
if (m.group(1) not in self.subStructList):
tempSubStructList.add(m.group(1))
self.subStructList.add(m.group(1))
if self.options.verbose:
print("subStructList: ", self.subStructList)
print("tempSubStructList:", tempSubStructList)
# print("\n Opening " + str(self.subStructMultiCmdFile))
multiCmdFile = open(self.subStructMultiCmdFile, 'w+')
multiCmdFile.write('_LINES = 1000\n')
for i in range(len(tempSubStructList)):
if list(tempSubStructList)[i] is not None:
multiCmdFile.write('mprintf(\"SubstructBegin==>%s\\n\")\n' % (list(tempSubStructList)[i]))
multiCmdFile.write(list(tempSubStructList)[i] + '\n')
multiCmdFile.write("sizeof(%s)\n" % (list(tempSubStructList)[i]))
multiCmdFile.write('mprintf(\"SubstructEnd==>%s\\n\")\n' % (list(tempSubStructList)[i]))
else:
multiCmdFile.write('mprintf(\"SubstructBegin==>%s\\n\")\n' % (list(tempSubStructList)[i]))
multiCmdFile.write('struct {\n')
multiCmdFile.write('{ ' + list(tempSubStructList)[i] + '\n')
multiCmdFile.write("0")
multiCmdFile.write('mprintf(\"SubstructEnd==>%s\\n\")\n' % (list(tempSubStructList)[i]))
if ENABLE_DEBUG_ENTER: quit(16)
multiCmdFile.write('quitall\n') ### Exit multi gracefully
# print("\n Closing " + str(multiCmdFile))
multiCmdFile.close()
tempSubStructDefFile = os.path.join(self.outDir, fname_tempSubStructDefs)
if (len(tempSubStructList) > 0):
if self.recursive:
command = '%s %s -nodisplay -p %s -RO %s' % (
self.multiExe, self.elfFile, self.subStructMultiCmdFile, tempSubStructDefFile)
else:
command = '%s %s -nodisplay -p %s -RO %s' % (
self.multiExe, self.elfFile, self.subStructMultiCmdFile, self.subStructDefFile)
if platform.system() == 'Linux':
### Just in case we need to do something different for Linux
self.runCmd(command)
elif platform.system() == 'Windows':
### Just in case we need to do something different for Windows
self.runCmd(command)
if self.recursive:
if os.path.exists(tempSubStructDefFile):
print(" Appending new sub structures 1.")
self.appendNewSubStructs(tempSubStructDefFile)
if os.path.exists(tempSubStructDefFile):
print(" Deleting temp structures 2.")
os.remove(tempSubStructDefFile)
self.recursive = True
print(" Extracting new sub structures 3.")
self.extractArraySubStructs()
def searchVersionMajorMinor(self, fileName, typeDefStructName, lineNum):
"""Performs version lookup for a structure."""
verMajorMacro = None
verMinorMacro = None
versionMajor = 0xBADD
versionMinor = 0xC0DE
iFile = open(fileName, 'r')
lines = iFile.readlines()
iFile.close()
if typeDefStructName not in lines[lineNum]:
print("Incorrect/inconsistent parameters being passed into searchVersionMajorMinor")
return None, None
structStartLine = 0
for i in range(lineNum, 0, -1):
if re.search('typedef [union|struct]', lines[i]):
structStartLine = i
break
if (structStartLine == 0): return None, None
for i in range(structStartLine, lineNum):
line = lines[i].strip()
if re.search('^\/\/', line): continue
if (line == ''): continue
# print line
if ('testCmdVersion_t' in line) and ('_MAJOR' in line) and ('_MINOR' in line):
if re.search('[\/<, ]([A-Z0-9_]+_MAJOR)', line):
m = re.search('[\/< ]([A-Z0-9_]+_MAJOR)', line)
verMajorMacro = m.group(1)
if re.search('[\/<, ]([A-Z0-9_]+_MINOR)', line):
m = re.search('[\/< ]([A-Z0-9_]+_MINOR)', line)
verMinorMacro = m.group(1)
for v in self.versionList:
if (verMajorMacro is not None) and (verMajorMacro in v[0]): versionMajor = int(v[1])
if (verMinorMacro is not None) and (verMinorMacro in v[0]): versionMinor = int(v[1])
if (versionMajor != 0xBADD) and (versionMinor != 0xC0DE): break
return verMajorMacro, versionMajor, verMinorMacro, versionMinor
def getAllStructNames(self):
"""Extract the struct names from definition file."""
try:
structDefFileExists = os.path.isfile(self.structDefFile)
if structDefFileExists:
iFile = open(self.structDefFile, 'r')
lines = iFile.readlines()
iFile.close()
isDetected = None
# isPointerDetected = None
for l in lines:
if TRUNK == "SXP" and not ENABLE_CLANG:
isDetected = matchSequence[22].search(l)
# print (" Attempt detect SXP name: " + str(l))
elif TRUNK == "NAND" and not ENABLE_CLANG:
isDetected = matchSequence[21].search(l)
# print (" Attempt detect member NAND name: " + str(l))
elif ENABLE_CLANG:
isDetected = detectedStructureMainName.search(l)
# print (" Attempt detect CLang name: " + str(l))
if isDetected:
m = isDetected
if (m.group(1) not in self.structNameList):
# print (" [Found] member name: " + str(m.group(1)))
# isPointerDetected = detectSimpleStructOrUnionPointer.search(l) # isPointerDetected = detectBasicPointer.search(l)
# if (isPointerDetected):
# print (" [Found] pointer: " + str(m.group(1)))
# # Mark the struct as not needed and use basic type. Mark self.isPointer = 1 later
self.structNameList.append(m.group(1))
# else:
# print (" <Duplicate> member name: " + str(m.group(1)))
# else:
# print (" Nothing member: " + str(l))
else:
iFile = open(self.structDefFile, 'w+')
iFile.close()
try:
subStructDefFileExists = os.path.isfile(self.subStructDefFile)
if subStructDefFileExists:
iFile = open(self.subStructDefFile, 'r')
lines = iFile.readlines()
iFile.close()
for l in lines:
# isAnonymousName = detectedAnonymousName.search(l)
# if isAnonymousName:
# print (" Attempt detect isAnonymousName: " + str(l))
if TRUNK == "NAND" and not ENABLE_CLANG:
isDetected = matchSequence[23].search(l)
# print (" Attempt detect sub-member NAND name: " + str(l))
elif TRUNK == "SXP" and not ENABLE_CLANG:
isDetected = matchSequence[24].search(l)
# print (" Attempt detect sub-member SXP name: " + str(l))
elif ENABLE_CLANG:
isDetected = detectedStructureSubName.search(l)
# print (" Attempt detect sub-member CLang name: " + str(l))
if isDetected:
m = isDetected
if (m.group(1) not in self.structNameList):
# print (" [Found] sub-member name: " + str(m.group(1)))
self.structNameList.append(m.group(1))
# else:
# print (" <Duplicate> sub-member: " + str(l))
# else:
# print (" Nothing sub-member: " + str(l))
else:
iFile = open(self.subStructDefFile, 'w+')
iFile.close()
if ENABLE_DEBUG_ENTER: quit(17)
except BaseException as error:
print('An exception occurred: {}'.format(error))
if ENABLE_DEBUG_ENTER: quit(17)
except:
print('An exception occurred: {}'.format("def getAllStructNames - substruct"))
if ENABLE_DEBUG_ENTER: quit(17)
except BaseException as error:
print('An exception occurred: {}'.format(error))
if ENABLE_DEBUG_ENTER: quit(18)
except:
print('An exception occurred: {}'.format("def getAllStructNames"))
if ENABLE_DEBUG_ENTER: quit(19)
def getAllStructSizes(self):
"""Performs an extraction of the definitions from GHS."""
self.getAllStructNames()
multiCmdFile = open(self.structSizeMultiCmdFile, 'w+')
multiCmdFile.write('_LINES = 10000\n')
for s in self.structNameList:
multiCmdFile.write('mprintf(\"sizeof(%s)=%%i\\n\",sizeof(%s))\n' % (s, s))
multiCmdFile.write('quitall\n') ### Exit multi gracefully
multiCmdFile.close()
command = '%s %s -nodisplay -p %s -RO %s' % (
self.multiExe, self.elfFile, self.structSizeMultiCmdFile, self.structSizeFile)
self.executeMultiScript(command)
def extractCstructs(self):
"""Performs an extraction of the definitions from GHS."""
self.getAllFwVersionMacros()
self.numValidStructsFound = 0
multiCmdFile = open(self.multiCmdFile, 'w+')
multiCmdFile.write('_LINES = 10000\n')
print("\nTelemetry Object List")
for i in range(len(self.telemetryObjectList)):
dataObjectName = self.telemetryObjectList[i][0]
typeDefName = self.getTypeDefName(dataObjectName)
try:
print("%3i/%i: %-45s" % (
i + 1, len(self.telemetryObjectList), "%s ==> %s" % (dataObjectName, str(typeDefName))))
if typeDefName is not None:
with open('dataStructure.cfg', "a") as openFile:
# Instance Name, Typedef, VERSION MAJOR macro, VERSION MINOR macro, Pack, versionMajorName, versionMinorName, RNLBA, WNLBA
openFile.write(
' [\'{0}\',\'{1}\',None,None,None,None,None,None,None],\n'.format(str(dataObjectName),
str(typeDefName)))
openFile.close()
except:
pass
### Add command to extRC to be executed in Multi Debugger
if typeDefName is not None:
fileName, typeDefStructName, lineNum = self.getTypeDefStruct(typeDefName)
if (typeDefStructName is not None):
self.numValidStructsFound += 1
#### Extracting version field
if (self.options.verbose): print(str(fileName), str(typeDefStructName), str(lineNum))
verMajorMacro, versionMajor, verMinorMacro, versionMinor = self.searchVersionMajorMinor(fileName,
typeDefStructName,
lineNum)
try:
print(', %30s=0x%04X, %30s=0x%04X,' % (
verMajorMacro, versionMajor, verMinorMacro, versionMinor), )
except:
pass
multiCmdFile.write('mprintf(\"ObjectBegin==>%s\\n\")\n' % (dataObjectName))
multiCmdFile.write(typeDefStructName + '\n')
multiCmdFile.write("sizeof(" + typeDefStructName + ")\n")
multiCmdFile.write('mprintf(\"%s = 0x%%04X\\n\",%i)\n' % ('versionMajor', versionMajor))
multiCmdFile.write('mprintf(\"%s = 0x%%04X\\n\",%i)\n' % ('versionMinor', versionMinor))
multiCmdFile.write('mprintf(\"ObjectEnd==>%s\\n\")\n' % (dataObjectName))
if (self.telemetryObjectList[i][1] not in self.masterObjectList.keys()):
if ('0X' in self.telemetryObjectList[i][1].upper()):
self.masterObjectList[int(self.telemetryObjectList[i][1], 16)] = [
self.telemetryObjectList[i][0], typeDefStructName, self.telemetryObjectList[i][2]]
elif re.search('^[a-zA-Z_]+', self.telemetryObjectList[i][1]):
### Got a macro for UID. Let scan the FW to get the value.
macroUid = self.getFwMacroValue(self.telemetryObjectList[i][1]) # @todo unused
else:
self.masterObjectList[int(self.telemetryObjectList[i][1])] = [
self.telemetryObjectList[i][0], typeDefStructName, self.telemetryObjectList[i][2]]
else:
print("\n-E- UID (%i for %s) as specified by FW datacontrol.h is not unique!" % (
self.telemetryObjectList[i][1], self.telemetryObjectList[i][0]))
if ENABLE_DEBUG_ENTER: quit(20)
print('+')
else:
self.log.debug("Not able to extract %s" % (dataObjectName))
print('-')
else:
self.log.debug("Not able to extract %s" % (dataObjectName))
print('-')
multiCmdFile.write('quitall\n') ### Exit multi gracefully
multiCmdFile.close()
if (self.options.debug):
print("\nMaster Object List:")
print("%8s: %-30s, %-30s, %2s" % ('Key', 'Object', 'Struct', 'DA'))
for key in sorted(self.masterObjectList.keys()):
print("%8i: %-30s, %-30s, %2s" % (
key, self.masterObjectList[key][0], self.masterObjectList[key][1], self.masterObjectList[key][2]))
command = '%s %s -nodisplay -p %s -RO %s' % (self.multiExe, self.elfFile, self.multiCmdFile, self.structDefFile)
self.executeMultiScript(command)
self.recursive = False ### Set recursive flag to False to start a new recursive call
self.extractArraySubStructs()
self.getAllStructSizes()
# self.deleteTempFiles()
print("\nTotal valid structures found: %i/%i" % (self.numValidStructsFound, len(self.telemetryObjectList)))
def deleteTempFiles(self):
"""Performs delete of temp files used in parsing."""
if os.path.exists(self.multiCmdFile): os.remove(self.multiCmdFile)
if os.path.exists(self.subStructMultiCmdFile): os.remove(self.subStructMultiCmdFile)
####################################################################################
####################################################################################
####################################################################################
####################################################################################
def createMasterObjectListUidValue(self):
"""Performs a key list creation for unique identifiers."""
for key in sorted(self.masterObjectList.keys()):
self.masterObjectListUidValue[self.masterObjectList[key][0]] = key
self.masterObjectListUidValue[self.masterObjectList[key][1]] = key
if (self.options.verbose):
print("\nMaster Object List w/ Data Object and Struct as keys:")
print("%-30s: %-3s" % ('Key', 'UID'))
for key in sorted(self.masterObjectListUidValue.keys()):
print("%-30s: %3s" % (key, self.masterObjectListUidValue[key]))
def getObjectsFromStructDefFile(self):
"""Performs an extraction of the definitions from GHS produced file."""
# Process through specified data object file to get list for scanning
iFile = open(self.structDefFile, 'r')
lines = iFile.readlines()
iFile.close()
myKeys = self.masterObjectListUidValue.keys()
self.objectsInStructDefFile = []
for l in lines:
if ('==>' not in l): continue
line = l.strip()
line = re.sub('^ +', '', line)
line = re.sub(' +', '', line)
line = re.sub('{', '', line)
if matchSequence[3].match(line):
m = matchSequence[3].match(line)
fwObject = m.group(1)
if fwObject in myKeys:
uid = int(self.masterObjectListUidValue[fwObject])
fwStruct = self.masterObjectList[uid][1]
self.objectsInStructDefFile.append([fwObject, fwStruct, uid])
if (self.options.verbose):
print("\nActual structs found in the stuct definition file:")
for i in range(len(self.objectsInStructDefFile)):
print(i, self.objectsInStructDefFile[i][0], self.objectsInStructDefFile[i][1],
self.objectsInStructDefFile[i][2])
def getStructSizeInBits(self, fwStruct):
"""Performs an extraction of the definitions from GHS produced file."""
iFile = open(self.structSizeFile, 'r')
lines = iFile.readlines()
iFile.close()
sizeInBits = 0
for l in lines:
if re.search('^sizeof\(%s\)=(\d+)' % fwStruct, l.strip()):
m = re.search('^sizeof\(%s\)=(\d+)' % fwStruct, l.strip())
sizeInBits = eval(m.group(1)) * 8
break
return sizeInBits
def onlyHasSimpleSubstructures(self, obj):
"""Performs check to determine if fundamental type."""
if (len(obj.memberList) > 0):
for o in obj.memberList:
if (len(o.memberList) > 0):
return False
return True
def determineObjectSizes(self, obj):
"""Performs an extraction of the definitions size from GHS."""
sizeInBits = 0
arrayDimString = str(obj.arrayDimList[0])
for i in range(1, len(obj.arrayDimList)):
arrayDimString += "*" + str(obj.arrayDimList[i])
if (obj.fwStruct in list(self.cToPythonCtypeMap.keys())):
obj.sizeInBits = ctypes.sizeof(eval(self.cToPythonCtypeMap[obj.fwStruct])) * 8 * eval(arrayDimString)
obj.subStructSizeGood = 1
elif self.getStructSizeInBits(obj.fwStruct):
sizeInBits = self.getStructSizeInBits(obj.fwStruct) * eval(arrayDimString)
obj.sizeInBits = sizeInBits
obj.subStructSizeGood = 1
elif re.search('^struct (.+)$', obj.fwStruct):
m = re.search('^struct (.+)$', obj.fwStruct)
sizeInBits = self.getStructSizeInBits(m.group(1)) * eval(arrayDimString)
if (sizeInBits):
obj.subStructSizeGood = 1
else:
sizeInBits = obj.sizeInBits
obj.sizeInBits = sizeInBits
elif re.search('^union (.+)$', obj.fwStruct):
m = re.search('^union (.+)$', obj.fwStruct)
sizeInBits = self.getStructSizeInBits(m.group(1)) * eval(arrayDimString)
if (sizeInBits):
obj.subStructSizeGood = 1
else:
sizeInBits = obj.sizeInBits
obj.sizeInBits = sizeInBits
elif re.search('^enum (.+)$', obj.fwStruct):
m = re.search('^enum (.+)$', obj.fwStruct)
sizeInBits = self.getStructSizeInBits(m.group(1)) * eval(arrayDimString)
if (sizeInBits):
obj.subStructSizeGood = 1
else:
sizeInBits = obj.sizeInBits
obj.sizeInBits = sizeInBits
if obj.memberList == []:
return
else:
for i in range(len(obj.memberList)):
self.determineObjectSizes(obj.memberList[i])
def auditStructSizes(self, obj):
"""Performs an verification of definitions from GHS."""
if (len(obj.memberList) <= 0):
return ### We do nothing here because I cannot be certain if the simple data object size is valid or not.
if (obj.sizeInBits == 0):
### Dealing with unions of size 0
if (obj.structType in ['union']):
### Setting the size for a 0-size union
for o in obj.memberList:
if (o.structType not in ['union']) and (o.sizeInBits != 0):
obj.sizeInBits = o.sizeInBits
obj.subStructSizeGood = 1
break
#### Set subStructSizeGood status for the union object's subStructs
for o in obj.memberList:
if (o.sizeInBits == obj.sizeInBits):
o.subStructSizeGood = 1
else:
o.subStructSizeGood = 0
o.sizeInBits = obj.sizeInBits
### Setting the size for a 0-size struct
elif (obj.structType in ['struct']):
for o in obj.memberList:
obj.sizeInBits += o.sizeInBits
obj.subStructSizeGood = 1
### Setting the size for a 0-size struct
elif (obj.structType in ['bitfield']) and self.onlyHasSimpleSubstructures(obj):
for o in obj.memberList:
if (o.structType not in ['union']) and (o.sizeInBits != 0):
obj.sizeInBits = o.sizeInBits
obj.subStructSizeGood = 1
o.subStructSizeGood = 0
break
### Setting the size for a 0-size struct
elif (obj.structType in ['bitfield']):
for o in obj.memberList:
obj.sizeInBits += o.sizeInBits
obj.subStructSizeGood = 1
### Catching other 0-size data construct as error for further evaluation later.
else:
pprint(vars(obj))
pressReturnToContinue('1 getting obj.sizeInBits')
### Obtain size for unions and structs
gotCalculatedUnionSize = False
calculatedSubstructSizeTotal = 0
for o in obj.memberList:
self.auditStructSizes(o)
if (obj.structType in ['union']):
if (not gotCalculatedUnionSize):
calculatedSubstructSizeTotal = o.sizeInBits
if obj.sizeInBits == o.sizeInBits:
gotCalculatedUnionSize = True
else:
calculatedSubstructSizeTotal += o.sizeInBits
### Check the goodness of an object's size relative to its substructures
if (obj.sizeInBits == calculatedSubstructSizeTotal):
obj.subStructSizeGood = 1
### Otherwise, something is not right about the size of the substructures
### Let's set the subStructSizeGood of all substructures to False
elif ('bitfield' not in obj.structType) and \
('unnamed type' not in obj.fwStruct) and \
(obj.depth > 1):
for o in obj.memberList:
o.subStructSizeGood = 0
# if ('transport' in o.fwObject):
# print
# pprint(vars(o))
# print
# pprint(vars(obj))
# print
# print "obj.sizeInBits", obj.sizeInBits
# print "calculatedSubstructSizeTotal", calculatedSubstructSizeTotal
# pressReturnToContinue('3.6')
#### Set subStructSizeGood status for the any object's subStructs
if (obj.structType and obj.fwStruct):
if ('bitfield' not in obj.structType) and ('unnamed type' not in obj.fwStruct):
for o in obj.memberList:
if (not obj.subStructSizeGood):
o.subStructSizeGood = 0
def printObjectInfo(self, obj, oFile, structNum=None):
"""Performs console print of the object information to a file."""
arrayDimString = ''
arrayDimString = str(obj.arrayDimList[0])
for i in range(len(obj.arrayDimList)):
if i > 0: arrayDimString += "*" + str(obj.arrayDimList[i])
if obj.memberList == []:
oFile.write('%s,%s,%s,%s,%s,%s,0x%04X,0x%04X,%s,\"%s\",%s\n' % \
(obj.fwStruct, str(obj.fwObject), obj.structType, str(obj.sizeInBits),
arrayDimString, str(obj.uid), int(obj.versionMajor), int(obj.versionMinor),
str(obj.subStructSizeGood), str(obj.ancestryNames), ''))
return
else:
if (obj.depth == 1):
oFile.write('%s,%s,%s,%s,%s,%s,0x%04X,0x%04X,%s,\"%s\",%s\n' % \
(obj.fwStruct, str(obj.fwObject), obj.structType, str(obj.sizeInBits),
arrayDimString, str(obj.uid), int(obj.versionMajor), int(obj.versionMinor),
str(obj.subStructSizeGood), str(obj.ancestryNames), 'ObjectStart'))
elif (obj.structType == 'union'):
oFile.write('%s,%s,%s,%s,%s,%s,0x%04X,0x%04X,%s,\"%s\",%s\n' % \
(obj.fwStruct, str(obj.fwObject), obj.structType, str(obj.sizeInBits),
arrayDimString, str(obj.uid), int(obj.versionMajor), int(obj.versionMinor),
str(obj.subStructSizeGood), str(obj.ancestryNames),
'Union%i%iStart' % (obj.depth, structNum)))
else:
oFile.write('%s,%s,%s,%s,%s,%s,0x%04X,0x%04X,%s,\"%s\",%s\n' % \
(obj.fwStruct, str(obj.fwObject), obj.structType, str(obj.sizeInBits),
arrayDimString, str(obj.uid), int(obj.versionMajor), int(obj.versionMinor),
str(obj.subStructSizeGood), str(obj.ancestryNames),
'Struct%i%iStart' % (obj.depth, structNum)))
for i in range(len(obj.memberList)):
self.printObjectInfo(obj.memberList[i], oFile, i + 1)
if (obj.depth == 1):
oFile.write('%s,%s,%s,%s,%s,%s,0x%04X,0x%04X,%s,\"%s\",%s\n' % \
(obj.fwStruct, str(obj.fwObject), obj.structType, str(obj.sizeInBits),
arrayDimString, str(obj.uid), int(obj.versionMajor), int(obj.versionMinor),
str(obj.subStructSizeGood), str(obj.ancestryNames), 'ObjectEnd'))
elif (obj.structType == 'union'):
oFile.write('%s,%s,%s,%s,%s,%s,0x%04X,0x%04X,%s,\"%s\",%s\n' % \
(obj.fwStruct, str(obj.fwObject), obj.structType, str(obj.sizeInBits),
arrayDimString, str(obj.uid), int(obj.versionMajor), int(obj.versionMinor),
str(obj.subStructSizeGood), str(obj.ancestryNames),
'Union%i%iEnd' % (obj.depth, structNum)))
else:
oFile.write('%s,%s,%s,%s,%s,%s,0x%04X,0x%04X,%s,\"%s\",%s\n' % \
(obj.fwStruct, str(obj.fwObject), obj.structType, str(obj.sizeInBits),
arrayDimString, str(obj.uid), int(obj.versionMajor), int(obj.versionMinor),
str(obj.subStructSizeGood), str(obj.ancestryNames),
'Struct%i%iEnd' % (obj.depth, structNum)))
def outputObjectCsv(self, obj):
"""Performs an collection of information to output CSV formated file."""
outFile = os.path.join(self.parsersFolder, obj.fwObject + '.csv')
cannotOpenFileForWrite = False
while (not cannotOpenFileForWrite):
try:
with open(outFile, "wb") as oFile:
oFile.write(
'TypeDef,FieldName,Type,SizeInBits,ArrayDim,UID,versionMajor,versionMinor,sizeGood,ancestryNames,Start/End\n')
self.printObjectInfo(obj, oFile)
cannotOpenFileForWrite = True
except IOError as e:
if e.errno == errno.EACCES:
usersInput = input("File access error. Close %s before proceeding (q=quit): " % (outFile))
if (usersInput.lower() in ['q', 'quit']): quit(21)
cannotOpenFileForWrite = False
else:
raise IOError
def hasSimpleMemberWithGoodSize(self, obj):
"""Performs a definition check for name and size."""
simpleMemberWithGoodSize = False
if (len(obj.memberList) > 0):
for o in obj.memberList:
if (len(o.memberList) <= 0):
hasSimpleMember = True # @todo unused
if obj.subStructSizeGood:
simpleMemberWithGoodSize = True
return simpleMemberWithGoodSize
def hasSimpleMemberWithGoodSizeMaxSize(self, obj):
"""Performs a definition check for name and approporate size."""
simpleMemberWithGoodSize = False
memberObject = None
if (len(obj.memberList) > 0):
for o in obj.memberList:
if (len(o.memberList) <= 0) and obj.subStructSizeGood:
if (not simpleMemberWithGoodSize):
simpleMemberWithGoodSize = True
memberObject = o
elif (memberObject.sizeInBits > o.sizeInBits):
memberObject = o
return memberObject
def writeObjectParserPy(self, obj, pyParserFile, prependStr=''):
"""Performs an extraction of the C definition to python c-type."""
if len(obj.memberList) > 0:
pyParserFile.write('# %s%s %s\n' % ((obj.depth * 2) * ' ', obj.fwObject, obj.structType))
elif not obj.subStructSizeGood:
# pyParserFile.write('# %s%s %s\n' % ((obj.depth*2)*' ',obj.fwObject,obj.structType))
if (len(obj.ancestryTypes) > 1) and (obj.ancestryTypes[1] in ['union']):
pyParserFile.write('# %s%s %s\n' % ((obj.depth * 2) * ' ', obj.fwObject, obj.structType))
elif (obj.parent is not None) and (obj.parent.subStructSizeGood):
self.log.debug(
"Good parent >>>%-40s, %50s, %50s" % (obj.fwObject, str(obj.ancestryTypes), str(obj.ancestryNames)))
else:
self.log.debug(
"Bad parent >>>%-40s, %50s, %50s" % (obj.fwObject, str(obj.ancestryTypes), str(obj.ancestryNames)))
elif (len(obj.altMemberList) <= 0): # todo: integrate array checking into altmemberlist flow
if (obj.fwObject == 'autoParserToken'):
pyParserFile.write('%-4s%-35s, %6s, %6s, %7s, %6s, %6s, %-35s,\n' % \
(prependStr + '', '\'' + obj.fwObject + '\'', str(obj.sizeInBits), '0', '0', 'bdSTR',
'None', '\'' + obj.fwObject + '\''))
else:
pyParserFile.write('%-4s%-35s, %6s, %6s, %7s, %6s, %6s, %-35s,\n' % \
(prependStr + '', '\'' + obj.fwObject + '\'', str(obj.sizeInBits), '0', '0', 'bdDEC',
'None', '\'' + obj.fwObject + '\''))
if (len(obj.altMemberList) > 0) and (obj.depth > 1):
for o in obj.altMemberList:
self.writeObjectParserPy(o, pyParserFile, '')
else:
if (obj.structType in ['union']):
simpleMemberWithGoodSizeObj = self.hasSimpleMemberWithGoodSize(obj) # @todo unused
simpleMemberWithGoodSizeMaxSizeObj = self.hasSimpleMemberWithGoodSizeMaxSize(obj)
for o in obj.memberList:
if (simpleMemberWithGoodSizeMaxSizeObj is not None):
if (o.fwObject != simpleMemberWithGoodSizeMaxSizeObj.fwObject):
continue
self.writeObjectParserPy(o, pyParserFile, '')
if len(o.memberList) > 0:
pyParserFile.write('# %s%s %s\n' % ((o.depth * 2) * ' ', o.fwObject, o.structType))
if (obj.structType in ['union']): break
else:
for o in obj.memberList:
self.writeObjectParserPy(o, pyParserFile, '')
if len(o.memberList) > 0:
pyParserFile.write('# %s%s %s\n' % ((o.depth * 2) * ' ', o.fwObject, o.structType))
if obj.depth == 1:
pyParserFile.write('# %s%s %s\n' % ((obj.depth * 2) * ' ', obj.fwObject, obj.structType))
def generateObjectParserPy(self, obj):
"""Performs an object generation from C to python c-type."""
### Creating [fwObject].py file in the telemetry parsers folder, one per object.
outParserFile = os.path.join(self.parsersFolder, obj.fwObject + '.py')
with open(outParserFile, "wb") as pyParserFile:
telemetryParserTxt = "\"\"\""
telemetryParserTxt += "This file is automatically generated per Telemetry object. Please do not modify."
telemetryParserTxt += "\"\"\""
telemetryParserTxt += "\nfrom bufdict import *\n\n"
telemetryParserTxt += "%s_Description_%i_%i = \\\n" % (
obj.fwObject[0].upper() + obj.fwObject[1:], obj.versionMajor, obj.versionMinor)
telemetryParserTxt += "[\n"
telemetryParserTxt += "%-4s%-35s, %6s, %6s, %7s, %6s, %6s, %-35s\n" % (
"#", "name", "size", "signed", "default", "style", "token", "desc")
pyParserFile.write(telemetryParserTxt)
self.writeObjectParserPy(obj, pyParserFile)
telemetryParserTxt = "]\n\n"
telemetryParserTxt += "%s_dict = {\n" % (obj.fwObject)
telemetryParserTxt += "%s(%i,%i): %s_Description_%i_%i,\n" % (
(len(obj.fwObject) + 9) * ' ', obj.versionMajor, obj.versionMinor,
obj.fwObject[0].upper() + obj.fwObject[1:], obj.versionMajor, obj.versionMinor)
telemetryParserTxt += "%s}\n\n" % ((len(obj.fwObject) + 8) * ' ')
telemetryParserTxt += "class %s(bufdict):\n" % (obj.fwObject[0].upper() + obj.fwObject[1:])
telemetryParserTxt += " \"%s\"\n" % (obj.fwObject[0].upper() + obj.fwObject[1:])
telemetryParserTxt += " def __init__(self, buf=None, offset=None, filename=None, other=None, namesize=30, valuesize=10, majorVersion=%i, minorVersion=%i):\n\n" % (
obj.versionMajor, obj.versionMinor)
telemetryParserTxt += " description = getDescription(desc_dict=%s_dict, key=(majorVersion, minorVersion))\n" % (
obj.fwObject)
telemetryParserTxt += " bufdict.__init__(self, description=description, version=majorVersion,name=\"%s\",\\\n" % (
obj.fwObject)
telemetryParserTxt += " namesize=namesize, valuesize=valuesize, filename=filename, buf=buf, other=other)\n\n"
telemetryParserTxt += " pass\n"
pyParserFile.write(telemetryParserTxt)
### Creating telemetryCmd.py file in the telemetry commands folder
# telemetryCmdTxt = "import __init__\n" #todo: fix __init__ in gen3/tools/telemetry importing getRoot incorrectly before uncommenting
telemetryCmdTxt = "import sys\n"
telemetryCmdTxt += "import importlib\n"
telemetryCmdTxt += "import os\n\n"
telemetryCmdTxt += "mapping = {\n"
for i in range(len(self.telemetryObjectListAll)):
fwObject = self.telemetryObjectListAll[i][0]
uid = self.telemetryObjectListAll[i][1]
if re.search('^[0-9a-fx]+$', uid.lower()):
telemetryCmdTxt += " %s: \'%s\',\n" % (uid, fwObject)
telemetryCmdTxt += " }\n\n"
telemetryCmdTxt += "class TelemetryObjectCommands(object):\n\n"
telemetryCmdTxt += " def __init__(self, devObj=None):\n"
telemetryCmdTxt += " self._devObj = devObj\n\n"
telemetryCmdTxt += " def parseTelemetry(self, objectId, inFile=None, projObjFile=None):\n"
telemetryCmdTxt += " '''\n"
telemetryCmdTxt += " read the Telemetry object\n"
telemetryCmdTxt += " '''\n"
telemetryCmdTxt += " os.sys.path.insert(1, r'%s'%(projObjFile))\n"
telemetryCmdTxt += " exec(\"from telemetry.parsers.%s import %s\" % (mapping[objectId],mapping[objectId][0].upper()+mapping[objectId][1:]))\n"
telemetryCmdTxt += " myObj = eval(\"%s()\" % (mapping[objectId][0].upper()+mapping[objectId][1:]))\n"
telemetryCmdTxt += " if inFile is not None:\n"
telemetryCmdTxt += " myObj.from_file(filename=inFile)\n"
telemetryCmdTxt += " else:\n"
telemetryCmdTxt += " myObj.from_buf(self._devObj.getReadBuffer())\n"
telemetryCmdTxt += " return myObj\n"
outCmdFile = os.path.join(self.commandsFolder, 'telemetryCmd.py')
with open(outCmdFile, "wb") as pyCmdFile:
pyCmdFile.write(telemetryCmdTxt)
initFileText = "\"\"\""
initFileText += " __init__.py - This file makes this folder a Python package."
initFileText += "\"\"\"\n"
initFileText += "import os\n"
initFileText += "os.sys.path.insert(1,'..')\n"
### Creating __init__.py file in the telemetry folder
outParserInitFile = os.path.join(self.telemetryFolder, '__init__.py')
with open(outParserInitFile, "wb") as pyParserInitFile:
pyParserInitFile.write(initFileText)
### Creating __init__.py file in the telemetry commands folder
outCmdInitFile = os.path.join(self.commandsFolder, '__init__.py')
with open(outCmdInitFile, "wb") as pyCmdInitFile:
pyCmdInitFile.write(initFileText)
### Creating __init__.py file in the telemetry parsers folder
outParserInitFile = os.path.join(self.parsersFolder, '__init__.py')
with open(outParserInitFile, "wb") as pyParserInitFile:
pyParserInitFile.write(initFileText)
### Copying bufdict.py and bufdata.py into the telemetry parsers folder
shutil.copy(os.path.join(self.fwToolsDir, 'bufdict.py'), self.parsersFolder)
shutil.copy(os.path.join(self.fwToolsDir, 'bufdata.py'), self.parsersFolder)
def setAncestryType(self, obj):
"""Performs an mutation on the ancestory of the object."""
if (len(obj.memberList) > 0):
for o in obj.memberList:
o.ancestryTypes = [obj.structType] + obj.ancestryTypes
self.setAncestryType(o)
def getVarNameListToDedup(self, obj):
"""Performs a reduction of duplicated definitions."""
if len(obj.memberList) > 0:
for o in obj.memberList:
self.getVarNameListToDedup(o)
elif obj.subStructSizeGood:
if obj.fwObject in self.varNameList:
if obj.fwObject not in self.varNameListToDedup:
self.varNameListToDedup.append(obj.fwObject)
else:
self.varNameList.append(obj.fwObject)
def dedupVarNames(self, obj, ancestryLevel=0):
"""Performs an reduction of the variable definitions."""
if len(obj.memberList) > 0:
for o in obj.memberList:
self.dedupVarNames(o, ancestryLevel=ancestryLevel)
elif (obj.subStructSizeGood and (obj.fwObject in self.varNameListToDedup)):
# print obj.ancestryNames,obj.fwObject,ancestryLevel,"\tAbout to modify fwObject"
if (obj.ancestryNames[ancestryLevel] != '0') and \
(not matchSequence[4].match(obj.ancestryNames[ancestryLevel])) and \
(not matchSequence[5].match(obj.ancestryNames[ancestryLevel])):
tmpAncestryName = ''
if ('_' in obj.ancestryNames[ancestryLevel]):
tmpAncestryNameWordList = re.sub('_s$', '', obj.ancestryNames[ancestryLevel]).split('_')
for t in tmpAncestryNameWordList:
tmpAncestryName += t[0].upper() + t[1:].lower()
tmpAncestryName = tmpAncestryName[0].lower() + tmpAncestryName[1:]
else:
tmpAncestryName = obj.ancestryNames[ancestryLevel][0].lower() + obj.ancestryNames[ancestryLevel][1:]
obj.fwObject = tmpAncestryName + obj.fwObject[0].upper() + obj.fwObject[1:]
def checkSubstructSizeGood(self, obj):
"""Performs a child definition check."""
if len(obj.memberList) <= 0:
return (obj.subStructSizeGood == 1)
for o in obj.memberList:
if (o.subStructSizeGood != 1):
return False
return True
def paddSubstructSizeIfNeeded(self, obj):
"""Performs a child definition construction."""
if len(obj.memberList) <= 0: return
if (obj.structType != 'union'): return
maxSizeInBits = obj.sizeInBits
for o in obj.memberList:
if (o.sizeInBits < maxSizeInBits):
myObj = None
exec("myObj = GenericObject()")
myObj.subStructSizeGood = 1
myObj.arrayDimList = [1]
myObj.depth = o.depth + 1
myObj.endLineNum = o.endLineNum
myObj.fwObject = o.fwObject
myObj.fwStruct = 'altobj'
myObj.parent = o
myObj.ancestryNames = [o.fwObject] + o.ancestryNames
myObj.memberList = []
myObj.altMemberList = []
myObj.sizeInBits = o.sizeInBits
myObj.startLineNum = o.startLineNum
myObj.structType = 'var'
myObj.isPointer = 0
myObj.uid = o.uid
myObj.versionMajor = 0xBADD ### Default
myObj.versionMajorStr = 'versionMajor' ### Default
myObj.versionMinor = 0xC0DE ### Default
myObj.versionMinorStr = 'versionMinor' ### Default
myObj2 = None
exec("myObj2 = GenericObject()")
myObj2.subStructSizeGood = 1
myObj2.arrayDimList = [1]
myObj2.depth = o.depth + 1
myObj2.endLineNum = o.endLineNum
myObj2.fwObject = o.fwObject + 'pad'
myObj2.fwStruct = 'altobj'
myObj2.parent = o
myObj2.ancestryNames = [o.fwObject] + o.ancestryNames
myObj2.memberList = []
myObj2.altMemberList = []
myObj2.sizeInBits = maxSizeInBits - o.sizeInBits
myObj2.startLineNum = o.startLineNum
myObj2.structType = 'var'
myObj2.isPointer = 0
myObj2.uid = o.uid
myObj2.versionMajor = 0xBADD ### Default
myObj2.versionMajorStr = 'versionMajor' ### Default
myObj2.versionMinor = 0xC0DE ### Default
myObj2.versionMinorStr = 'versionMinor' ### Default
o.altMemberList = [myObj, myObj2]
def generateAltMemberList(self, obj):
"""
When a struct has good size state, but its substructs do not, we need
to fill in its altMemberList with a single new objects of correct size.
"""
if (len(obj.memberList) <= 0):
return
if (not matchSequence[5].match(obj.fwObject)) and (obj.fwObject != '0'):
if (not self.checkSubstructSizeGood(obj)):
myObj = None
exec("myObj = GenericObject()")
myObj.subStructSizeGood = 1
myObj.arrayDimList = [1]
myObj.depth = obj.depth + 1
myObj.endLineNum = obj.endLineNum
myObj.fwObject = obj.fwObject + 'AltObject'
myObj.fwStruct = 'altobj'
myObj.parent = obj
myObj.ancestryNames = [obj.fwObject] + obj.ancestryNames
myObj.memberList = []
myObj.altMemberList = []
myObj.sizeInBits = obj.sizeInBits
myObj.startLineNum = obj.startLineNum
myObj.structType = 'var'
myObj.isPointer = 0
myObj.uid = obj.uid
myObj.versionMajor = 0xBADD ### Default
myObj.versionMajorStr = 'versionMajor' ### Default
myObj.versionMinor = 0xC0DE ### Default
myObj.versionMinorStr = 'versionMinor' ### Default
obj.altMemberList = [myObj]
# self.log.debug('%s fwObject has been created for %s' % (myObj.fwObject,obj.fwObject))
if (obj.structType == 'union'):
self.paddSubstructSizeIfNeeded(obj)
for o in obj.memberList:
self.generateAltMemberList(o)
def generatePythonCtypes(self):
"""Performs the transformation of the fundamental type to Python c-type."""
self.createMasterObjectListUidValue()
self.getObjectsFromStructDefFile()
if (self.options.dataObjToProcess is not None):
specifiedObjectFound = False
for i in range(len(self.objectsInStructDefFile)):
fwObject = self.objectsInStructDefFile[i][0]
fwStruct = self.objectsInStructDefFile[i][1]
uid = self.objectsInStructDefFile[i][2]
if (self.options.dataObjToProcess is not None):
if fwObject != self.options.dataObjToProcess:
continue
else:
specifiedObjectFound = True
obj = self.buildStruct(uid, fwObject, fwStruct)
self.determineObjectSizes(obj)
self.auditStructSizes(obj)
### if struct has good size, but substructs do not. Need to generate altMemberList
self.generateAltMemberList(obj)
if (self.options.extraDebug):
self.outputObjectCsv(obj)
self.setAncestryType(obj)
dedupIteration = 0
self.varNameList = []
self.varNameListToDedup = []
self.getVarNameListToDedup(obj)
while len(self.varNameListToDedup) > 0:
if (self.options.verbose):
print("+++++++++++++++++++++++++++++++++++++++")
print("Variable name dedup iteration: #%i" % (dedupIteration))
print("Dedup %s" % (obj.fwObject))
print("---------------------------------------")
for v in self.varNameListToDedup: print(v)
print("---------------------------------------")
self.dedupVarNames(obj, dedupIteration)
dedupIteration += 1
self.varNameList = []
self.varNameListToDedup = []
self.getVarNameListToDedup(obj)
self.generateObjectParserPy(obj)
if (self.options.dataObjToProcess is not None):
if (not specifiedObjectFound): print("\n>>> Specified ojbect (%s) is not found")
def __skipNamedUnionSubstruct(self, lines, index):
"""Performs a child definition skip."""
depth = 0
bLine = None
eLine = None
### Find the start and end line numbers for the specified object
for i in range(index, len(lines)):
if ('{' in lines[i]):
depth += 1
if depth == 1: bLine = i
elif ('}' in lines[i]):
depth -= 1
if depth == 0:
eLine = i
break
if (self.options.debug):
print("depth: %i, bLine: %i, eLine: %i" % (depth, bLine, eLine))
pressReturnToContinue('skipNamedUnionStruct')
subStructLevel = 0
for i in range(bLine, eLine + 1):
myLine = lines[i].strip()
myLine = re.sub('^ +', '', myLine) # take out spaces before actual text
myLine = re.sub(' +', ' ', myLine) # take out extra spaces after '=' sign
myLine = re.sub(' +$', '', myLine) # take out extra spaces at the end of the line
myLine = re.sub('\] \[', '][', myLine) # take out extra spaces between array dim []
# print i+1,subStructLevel,myLine
# pressReturnToContinue()
if ('{' in myLine):
subStructLevel += 1
elif ('}' in myLine):
subStructLevel -= 1
elif subStructLevel == 1:
# print "Going back"
return bLine, eLine, i
# print "Going back"
return bLine, eLine, 0
def __buildStruct(self, lines, obj, startIndex=None, endIndex=None):
"""Performs a definition construction."""
if (self.options.verbose):
print("\n***********************************")
print("Object at __buildStruct Entry point")
pprint(vars(obj))
print("***********************************")
curStructDepth = obj.depth # @todo unused
if (startIndex is None):
startIndex = obj.startLineNum
if (endIndex is None):
endIndex = obj.endLineNum
# print "startIndex: ",startIndex
# print "endIndex: ",endIndex
# print "obj.fwStruct: ",obj.fwStruct
# print "obj.depth: ",obj.depth
# pressReturnToContinue()
index = startIndex
while (index < endIndex):
### pre-process the text line
myLine = lines[index].strip()
myLine = re.sub('^ +', '', myLine) # take out spaces before actual text
myLine = re.sub(' +', ' ', myLine) # take out extra spaces after '=' sign
myLine = re.sub(' +$', '', myLine) # take out extra spaces at the end of the line
myLine = re.sub('\] \[', '][', myLine) # take out extra spaces between array dim []
if (self.options.debug):
print("\n>>>>%s" % lines[index].strip())
print(">>>>%s\n" % myLine)
###############################################################################
# '^union {'
if matchSequence[6].match(myLine):
m = matchSequence[6].match(myLine)
# self.log.debug('(0) %3i ' % (index+1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText(lines[index], append=True)
obj.structType = 'union'
obj.depth = 1
if (self.options.debug):
pprint(vars(obj))
pressReturnToContinue('0')
index += 1
continue
###############################################################################
# '^struct/structName_t {'
elif matchSequence[7].match(myLine):
m = matchSequence[7].match(myLine)
# self.log.debug('(1) %3i ' % (index+1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText(lines[index], append=True)
obj.depth = 1
obj.structType = 'struct'
if (self.options.debug):
pprint(vars(obj))
pressReturnToContinue('1')
index += 1
continue
###############################################################################
# 'hexnum struct/union {'
elif matchSequence[8].match(myLine):
m = matchSequence[8].match(myLine)
# self.log.debug('(2) %3i' % (index+1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText(lines[index], append=True)
myObj = None
exec("myObj = GenericObject()")
myObj.arrayDimList = [1]
myObj.depth = obj.depth + 1
myObj.endLineNum = obj.endLineNum
myObj.fwObject = m.group(1)
myObj.fwStruct = m.group(2)
# self.log.debug('fwStruct = %s' % myObj.fwStruct)
myObj.parent = obj
myObj.ancestryNames = [obj.fwObject] + obj.ancestryNames
myObj.memberList = []
myObj.altMemberList = []
myObj.sizeInBits = 0
myObj.startLineNum = index + 1
myObj.structType = m.group(2)
myObj.isPointer = 0
myObj.uid = obj.uid
myObj.versionMajor = 0xBADD ### Default
myObj.versionMajorStr = 'versionMajor' ### Default
myObj.versionMinor = 0xC0DE ### Default
myObj.versionMinorStr = 'versionMinor' ### Default
if (self.options.debug):
print("Before -- Index:", index)
pprint(vars(obj))
pressReturnToContinue('2')
obj.memberList.append(self.__buildStruct(lines, obj=myObj))
index = obj.memberList[-1].endLineNum + 1
if (self.options.debug):
pprint(vars(obj))
pressReturnToContinue('2.1')
continue
###############################################################################
# hexNum union thing1 {
elif matchSequence[9].match(myLine):
m = matchSequence[9].match(myLine)
# self.log.debug('(3) %3i ' % (index+1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText(lines[index], append=True)
myObj = None
exec("myObj = GenericObject()")
myObj.arrayDimList = [1]
myObj.depth = obj.depth + 1
myObj.endLineNum = obj.endLineNum
myObj.fwObject = m.group(2)
myObj.fwStruct = 'union'
# self.log.debug('fwStruct = %s' % myObj.fwStruct)
myObj.parent = obj
myObj.ancestryNames = [obj.fwObject] + obj.ancestryNames
myObj.memberList = []
myObj.sizeInBits = 0
myObj.startLineNum = index + 1
myObj.structType = 'union'
myObj.isPointer = 0
myObj.uid = obj.uid
myObj.versionMajor = 0xBADD ### Default
myObj.versionMajorStr = 'versionMajor' ### Default
myObj.versionMinor = 0xC0DE ### Default
myObj.versionMinorStr = 'versionMinor' ### Default
if (self.options.debug):
print("Before -- Index:", index)
pprint(vars(obj))
pressReturnToContinue('3')
bLine, eLine, lineNum = self.__skipNamedUnionSubstruct(lines, index)
if (self.options.debug):
print(bLine, eLine, lineNum)
print(lines[lineNum])
obj.memberList.append(self.__buildStruct(lines, obj=myObj, startIndex=lineNum, endIndex=lineNum + 1))
if (self.options.extraDebug): self.outputObjectStructText(lines[eLine], append=True)
index = eLine + 1
if (self.options.debug):
print("After -- Index:", index)
print("\n>>>>>%s" % lines[index])
print()
pprint(vars(obj.memberList[-1]))
print()
pprint(vars(obj))
pressReturnToContinue('3.1')
continue
###############################################################################
# hexNum struct thing1 {
elif matchSequence[10].match(myLine):
m = matchSequence[10].match(myLine)
# self.log.debug('(4) %3i ' % (index+1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText(lines[index], append=True)
myObj = None
exec("myObj = GenericObject()")
myObj.arrayDimList = [1]
myObj.depth = obj.depth + 1
myObj.endLineNum = obj.endLineNum
myObj.fwObject = m.group(3)
myObj.fwStruct = m.group(2)
# self.log.debug('fwStruct = %s' % myObj.fwStruct)
myObj.parent = obj
myObj.ancestryNames = [obj.fwObject] + obj.ancestryNames
myObj.memberList = []
myObj.sizeInBits = 0
myObj.startLineNum = index + 1
myObj.structType = 'struct'
myObj.isPointer = 0
myObj.uid = obj.uid
myObj.versionMajor = 0xBADD ### Default
myObj.versionMajorStr = 'versionMajor' ### Default
myObj.versionMinor = 0xC0DE ### Default
myObj.versionMinorStr = 'versionMinor' ### Default
if (self.options.debug):
print("Before -- Index:", index)
pprint(vars(obj))
pressReturnToContinue('4.1')
obj.memberList.append(self.__buildStruct(lines, obj=myObj))
index = obj.memberList[-1].endLineNum + 1
if (self.options.debug):
print("After -- Index:", index)
pprint(vars(obj))
pressReturnToContinue('4.2')
continue
###############################################################################
# objName = name_t {
# @todo: Question -- Do we need a separate handler for 'something = union {'??
elif matchSequence[11].match(myLine):
m = matchSequence[11].match(myLine)
# self.log.debug('(5) %3i ' % (index+1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText(lines[index], append=True)
myObj = None
exec("myObj = GenericObject()")
myObj.arrayDimList = [1]
myObj.depth = obj.depth + 1
myObj.endLineNum = obj.endLineNum
myObj.fwObject = m.group(1)
myObj.fwStruct = m.group(2)
# self.log.debug('fwStruct = %s' % myObj.fwStruct)
myObj.parent = obj
myObj.ancestryNames = [obj.fwObject] + obj.ancestryNames
myObj.memberList = []
myObj.sizeInBits = 0
myObj.startLineNum = index + 1
myObj.structType = 'struct'
myObj.isPointer = 0
myObj.uid = obj.uid
myObj.versionMajor = 0xBADD ### Default
myObj.versionMajorStr = 'versionMajor' ### Default
myObj.versionMinor = 0xC0DE ### Default
myObj.versionMinorStr = 'versionMinor' ### Default
if (self.options.debug):
print(">>>>> Before -- Index:", index)
pprint(vars(obj))
print();
print((">>>>>>>" + lines[index]))
pressReturnToContinue('5')
obj.memberList.append(self.__buildStruct(lines, obj=myObj))
index = obj.memberList[-1].endLineNum + 1
if (self.options.debug):
print(">>>>> After -- Index:", index)
pprint(vars(obj))
print();
print((">>>>>>>" + lines[index]))
pressReturnToContinue('5.1')
continue ### Continue since the index is already point to the next line to be processed
###############################################################################
# 'objName = union name_t {' as in 'xyz = struct/union xyz_t {'
elif matchSequence[12].match(myLine):
m = matchSequence[12].match(myLine)
# self.log.debug('(6) %3i ' % (index+1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText(lines[index], append=True)
myObj = None
exec("myObj = GenericObject()")
myObj.arrayDimList = [1]
myObj.depth = obj.depth + 1
myObj.endLineNum = obj.endLineNum
myObj.fwObject = m.group(1)
myObj.fwStruct = m.group(2)
# self.log.debug('fwStruct = %s' % myObj.fwStruct)
myObj.parent = obj
myObj.ancestryNames = [obj.fwObject] + obj.ancestryNames
myObj.memberList = []
myObj.sizeInBits = 0
myObj.startLineNum = index + 1
myObj.structType = 'union'
myObj.isPointer = 0
myObj.uid = obj.uid
myObj.versionMajor = 0xBADD ### Default
myObj.versionMajorStr = 'versionMajor' ### Default
myObj.versionMinor = 0xC0DE ### Default
myObj.versionMinorStr = 'versionMinor' ### Default
if (self.options.debug):
print("Before -- Index:", index)
pprint(vars(obj))
pressReturnToContinue('6')
obj.memberList.append(self.__buildStruct(lines, obj=myObj))
index = obj.memberList[-1].endLineNum + 1
if (self.options.debug):
print("After -- Index:", index)
print("\n>>>>>%s" % lines[index])
print()
pprint(vars(obj.memberList[-1]))
print()
pprint(vars(obj))
pressReturnToContinue('6.1')
continue
###############################################################################
# 'objName = union name_t {'
# Filtering Union then Struct detection...
# @todo Duplication possibly remove. Walk through the code to ensure the case is detected.
elif matchSequence[12].match(myLine):
m = matchSequence[13].match(myLine)
# self.log.debug('(7) %3i ' % (index+1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText(lines[index], append=True)
myObj = None
exec("myObj = GenericObject()")
myObj.arrayDimList = [1]
myObj.depth = obj.depth + 1
myObj.endLineNum = obj.endLineNum
myObj.fwObject = m.group(1)
myObj.fwStruct = m.group(3)
# self.log.debug('fwStruct = %s' % myObj.fwStruct)
myObj.parent = obj
myObj.ancestryNames = [obj.fwObject] + obj.ancestryNames
myObj.memberList = []
myObj.sizeInBits = 0
myObj.startLineNum = index + 1
myObj.structType = 'union'
myObj.isPointer = 0
myObj.uid = obj.uid
myObj.versionMajor = 0xBADD ### Default
myObj.versionMajorStr = 'versionMajor' ### Default
myObj.versionMinor = 0xC0DE ### Default
myObj.versionMinorStr = 'versionMinor' ### Default
if (self.options.debug):
print("Before -- Index:", index)
pprint(vars(obj))
pressReturnToContinue('7.1')
obj.memberList.append(self.__buildStruct(lines, obj=myObj))
index = obj.memberList[-1].endLineNum + 1
if (self.options.debug):
print("After -- Index:", index)
pprint(vars(obj))
pressReturnToContinue('7.2')
index += 1
continue
###############################################################################
# 'objName = struct name_t {' as in 'xyz = struct/union xyz_t {'
elif matchSequence[13].match(myLine):
m = matchSequence[13].match(myLine)
# self.log.debug('(8) %3i ' % (index+1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText(lines[index], append=True)
myObj = None
exec("myObj = GenericObject()")
myObj.arrayDimList = [1]
myObj.depth = obj.depth + 1
myObj.endLineNum = obj.endLineNum
myObj.fwObject = m.group(1)
myObj.fwStruct = m.group(3)
# self.log.debug('fwStruct = %s' % myObj.fwStruct)
myObj.parent = obj
myObj.ancestryNames = [obj.fwObject] + obj.ancestryNames
myObj.memberList = []
myObj.sizeInBits = 0
myObj.startLineNum = index + 1
myObj.structType = 'struct'
myObj.isPointer = 0
myObj.uid = obj.uid
myObj.versionMajor = 0xBADD ### Default
myObj.versionMajorStr = 'versionMajor' ### Default
myObj.versionMinor = 0xC0DE ### Default
myObj.versionMinorStr = 'versionMinor' ### Default
if (self.options.debug):
print("Before -- Index:", index)
pprint(vars(obj))
pressReturnToContinue('8.1')
obj.memberList.append(self.__buildStruct(lines, obj=myObj))
index = obj.memberList[-1].endLineNum
if (self.options.debug):
print("After -- Index:", index)
pprint(vars(obj))
pressReturnToContinue('8.2')
index = index + 1
continue
###############################################################################
# [typeSpecifier ... ] type name;
# default types and any pointer is uint32_t
# @todo special handling of void *cmdHandle;
# @todo special handling of const struct smartSelectiveSelfTestSpan_t *pTestSpan;
elif matchSequence[14].match(myLine):
m = matchSequence[14].match(myLine)
# self.log.debug('(9) %3i ' % (index+1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText(lines[index], append=True)
myObj = None
exec("myObj = GenericObject()")
myObj.arrayDimList = [1]
myObj.depth = obj.depth + 1
myObj.endLineNum = index
myObj.fwObject = m.group(2)
myObj.fwStruct = m.group(1)
# self.log.debug('fwStruct = %s' % myObj.fwStruct)
myObj.parent = obj
myObj.ancestryNames = [obj.fwObject] + obj.ancestryNames
myObj.memberList = []
myObj.sizeInBits = 0
myObj.startLineNum = index
# print("PCheck: " + str(myLine))
isPointerDetected = detectBasicPointer.search(myLine)
if (isPointerDetected):
myObj.fwStruct = 'void'
myObj.isPointer = 1
myObj.sizeInBits = ctypes.sizeof(eval(self.cToPythonCtypeMap["void"]))
myObj.structType = 'void'
# print (" [Pointer]: " + str(myLine))
else:
myObj.isPointer = 0
myObj.sizeInBits = 0
myObj.structType = m.group(1)
myObj.uid = obj.uid
myObj.versionMajor = 0xBADD ### Default
myObj.versionMajorStr = 'versionMajor' ### Default
myObj.versionMinor = 0xC0DE ### Default
myObj.versionMinorStr = 'versionMinor' ### Default
obj.memberList.append(myObj)
# print "\n==================================="
# pprint(vars(obj))
# print "==================================="
# pprint(vars(myObj))
# print "==================================="
# print lines[index].strip()
# print "myLine",myLine
# print "m.group(1)",m.group(1)
# print "m.group(1)",m.group(1)
# print "===================================\n"
# pressReturnToContinue('simple line')
index += 1
continue
# obj.sizeInBits = sizeof(eval(self.cToPythonCtypeMap[obj.fwStruct])) * 8 * eval(arrayDimString)
###############################################################################
# typeSpecifier [typeSpecifier typeSpecifier ...] type arrayName[dim][dim]...;
elif re.search('^([\w ]+) ([\[\w\]]+);$', myLine):
m = re.search('^([\w ]+) ([\[\w\]]+);$', myLine)
# self.log.debug('(10) %3i ' % (index+1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText(lines[index], append=True)
arrayString = re.sub('\]\[', ',', m.group(2))
if (self.options.debug):
print(arrayString);
pressReturnToContinue('10.1')
if re.search('^(\w+)\[([0-9,]+)\]', arrayString):
s = re.search('^(\w+)\[([0-9,]+)\]', arrayString)
dList = list(map(int, s.group(2).split(',')))
if (self.options.debug):
print(dList);
pressReturnToContinue('10.2')
else:
self.log.debug('(*10.3) %3i' % (index + 1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText('ARRAY_DIM_ERROR: ' + lines[index],
append=True)
if (self.options.debug):
dList = None;
pressReturnToContinue('10.3')
if (self.options.debug):
if len(dList) > 1:
print(lines[index])
print(arrayString)
print(dList)
if (self.options.debug):
pressReturnToContinue('10.4')
if re.search('^struct (\w+)', m.group(1)):
n = re.search('^struct (\w+)', m.group(1))
#### @todo Special handling of struct nlogPrimaryBfrState_t primaryBfrState[5];
#### @todo Special handling of struct temperatureMonitoredDieAttribute_t temperatureMonitoredDieInfo[2] [4];
exec("myObj = GenericObject()")
myObj.arrayDimList = dList
myObj.depth = obj.depth + 1
myObj.endLineNum = index
myObj.fwObject = s.group(1)
myObj.fwStruct = n.group(1)
# self.log.debug('fwStruct = %s' % myObj.fwStruct)
myObj.parent = obj
myObj.ancestryNames = [obj.fwObject] + obj.ancestryNames
myObj.memberList = [] ### @todo parse array of substructs
myObj.sizeInBits = 0
myObj.startLineNum = index
myObj.structType = 'array'
myObj.isPointer = 0
myObj.uid = obj.uid
myObj.versionMajor = 0xBADD ### Default
myObj.versionMajorStr = 'versionMajor' ### Default
myObj.versionMinor = 0xC0DE ### Default
myObj.versionMinorStr = 'versionMinor' ### Default
obj.memberList.append(myObj)
# print lines[index].strip()
# print "myLine",myLine
# print "m.group(1)",m.group(1)
# print "n.group(1)",n.group(1)
# print "s.group(1)",s.group(1)
# pressReturnToContinue('if')
elif re.search('^union (\w+)', m.group(1)):
n = re.search('^union (\w+)', m.group(1))
#### @todo Special handling of struct nlogPrimaryBfrState_t primaryBfrState[5];
exec("myObj = GenericObject()")
myObj.arrayDimList = dList
myObj.depth = obj.depth + 1
myObj.endLineNum = index
myObj.fwObject = s.group(1)
myObj.fwStruct = n.group(1)
# self.log.debug('fwStruct = %s' % myObj.fwStruct)
myObj.parent = obj
myObj.ancestryNames = [obj.fwObject] + obj.ancestryNames
myObj.memberList = [] ### @todo parse array of substructs
myObj.sizeInBits = 0
myObj.startLineNum = index
myObj.structType = 'array'
myObj.isPointer = 0
myObj.uid = obj.uid
myObj.versionMajor = 0xBADD ### Default
myObj.versionMajorStr = 'versionMajor' ### Default
myObj.versionMinor = 0xC0DE ### Default
myObj.versionMinorStr = 'versionMinor' ### Default
obj.memberList.append(myObj)
# print lines[index].strip()
# print "myLine",myLine
# print "m.group(1)",m.group(1)
# print "n.group(1)",n.group(1)
# print "s.group(1)",s.group(1)
# pressReturnToContinue('if')
elif re.search('^enum (\w+)', m.group(1)):
n = re.search('^enum (\w+)', m.group(1))
#### @todo Special handling of struct nlogPrimaryBfrState_t primaryBfrState[5];
exec("myObj = GenericObject()")
myObj.arrayDimList = dList
myObj.depth = obj.depth + 1
myObj.endLineNum = index
myObj.fwObject = s.group(1)
myObj.fwStruct = n.group(1)
# self.log.debug('fwStruct = %s' % myObj.fwStruct)
myObj.parent = obj
myObj.ancestryNames = [obj.fwObject] + obj.ancestryNames
myObj.memberList = [] ### @todo parse array of substructs
myObj.sizeInBits = 0
myObj.startLineNum = index
myObj.structType = 'array'
myObj.isPointer = 0
myObj.uid = obj.uid
myObj.versionMajor = 0xBADD ### Default
myObj.versionMajorStr = 'versionMajor' ### Default
myObj.versionMinor = 0xC0DE ### Default
myObj.versionMinorStr = 'versionMinor' ### Default
obj.memberList.append(myObj)
# print lines[index].strip()
# print "myLine",myLine
# print "m.group(1)",m.group(1)
# print "n.group(1)",n.group(1)
# print "s.group(1)",s.group(1)
# pressReturnToContinue('if')
else:
exec("myObj = GenericObject()")
myObj.arrayDimList = dList
myObj.depth = obj.depth + 1
myObj.endLineNum = index
myObj.fwObject = s.group(1)
myObj.fwStruct = m.group(1)
# self.log.debug('fwStruct = %s' % myObj.fwStruct)
myObj.parent = obj
myObj.ancestryNames = [obj.fwObject] + obj.ancestryNames
myObj.memberList = []
myObj.sizeInBits = 0
myObj.startLineNum = index
myObj.structType = 'array'
myObj.isPointer = 0
myObj.uid = obj.uid
myObj.versionMajor = 0xBADD ### Default
myObj.versionMajorStr = 'versionMajor' ### Default
myObj.versionMinor = 0xC0DE ### Default
myObj.versionMinorStr = 'versionMinor' ### Default
obj.memberList.append(myObj)
#### For now, just flatten the 1D array of limited elements
if (len(dList) == 1) and (dList[0] > 1) and (dList[0] <= 64):
for i in range(dList[0]):
o = None
exec("o = GenericObject()")
o.arrayDimList = [1]
o.depth = myObj.depth + 1
o.endLineNum = index
o.fwObject = myObj.fwObject + "_%i" % (i)
o.fwStruct = myObj.fwStruct
o.parent = myObj
o.ancestryNames = [myObj.fwObject] + myObj.ancestryNames
o.memberList = []
o.sizeInBits = myObj.sizeInBits / int(dList[0])
o.startLineNum = index
o.structType = 'array'
o.isPointer = 0
o.uid = myObj.uid
o.versionMajor = 0xBADD ### Default
o.versionMajorStr = 'versionMajor' ### Default
o.versionMinor = 0xC0DE ### Default
o.versionMinorStr = 'versionMinor' ### Default
myObj.memberList.append(o)
# print lines[index].strip()
# print "myLine",myLine
# print "m.group(1)",m.group(1)
# print "s.group(1)",s.group(1)
# print "dList",dList
# pprint(vars(myObj))
# pressReturnToContinue('else')
if (self.options.debug):
print()
pprint(vars(obj))
print()
pprint(vars(obj.memberList[-1]))
pressReturnToContinue('10.5 InArray')
index += 1
continue
### End of a structure when '}' is encountered ################################
elif re.search('\}', myLine):
# self.log.debug('(11) %3i ' % (index+1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText(lines[index], append=True)
if (self.options.debug):
print("\'}\' indicates the end of current struct. Returning the object.")
pprint(vars(obj))
### obj.depth of 1 is the base object. When we reach the end of the base object
### we cannot simply return since we still need to get size, versionMajor/Minor
if (obj.depth > 1):
obj.endLineNum = index
if (self.options.debug): pressReturnToContinue('11')
return obj
if (self.options.debug): pressReturnToContinue('11.1')
index += 1
continue
###############################################################################
###############################################################################
###############################################################################
###############################################################################
# Special handling for 'unnamed type (instance ....'
elif 'unnamed type (instance ' in myLine:
if matchSequence[15].match(myLine):
m = matchSequence[15].match(myLine)
# self.log.debug('(12.0) %3i' % (index+1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText(lines[index], append=True)
exec("myObj = GenericObject()")
myObj.arrayDimList = [1]
myObj.depth = obj.depth + 1
myObj.endLineNum = obj.endLineNum
myObj.startLineNum = index + 1
myObj.fwObject = m.group(1)
myObj.fwStruct = m.group(2)
# self.log.debug('fwStruct = %s' % myObj.fwStruct)
myObj.parent = obj
myObj.ancestryNames = [obj.fwObject] + obj.ancestryNames
myObj.memberList = [] ### @todo parse array of substructs
myObj.sizeInBits = 0
myObj.structType = 'union'
myObj.isPointer = 0
myObj.uid = obj.uid
myObj.versionMajor = 0xBADD ### Default
myObj.versionMajorStr = 'versionMajor' ### Default
myObj.versionMinor = 0xC0DE ### Default
myObj.versionMinorStr = 'versionMinor' ### Default
if (self.options.debug):
print("\nObject ... before calling buildStruct")
pprint(vars(obj))
pressReturnToContinue('12.0')
obj.memberList.append(self.__buildStruct(lines, obj=myObj))
index = obj.memberList[-1].endLineNum + 1
if (self.options.debug):
print(index, lines[index])
print("\nObject ... after calling buildStruct")
pprint(vars(obj))
pressReturnToContinue('12.1')
print("\nSubObject ... after calling buildStruct")
pprint(vars(obj.memberList[-1]))
pressReturnToContinue('12.1.1')
continue
elif matchSequence[16].match(myLine):
# C++ :: Class variable detection
m = matchSequence[16].match(myLine)
# self.log.debug('(12.2) %3i' % (index+1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText(lines[index], append=True)
exec("myObj = GenericObject()")
myObj.arrayDimList = [1]
myObj.depth = obj.depth + 1
myObj.endLineNum = obj.endLineNum
myObj.startLineNum = index + 1
myObj.fwObject = m.group(1)
myObj.fwStruct = m.group(2)
# self.log.debug('fwStruct = %s' % myObj.fwStruct)
myObj.parent = obj
myObj.ancestryNames = [obj.fwObject] + obj.ancestryNames
myObj.memberList = [] ### @todo parse array of substructs
myObj.sizeInBits = 0
myObj.structType = 'bitfield'
myObj.isPointer = 0
myObj.uid = obj.uid
myObj.versionMajor = 0xBADD ### Default
myObj.versionMajorStr = 'versionMajor' ### Default
myObj.versionMinor = 0xC0DE ### Default
myObj.versionMinorStr = 'versionMinor' ### Default
obj.memberList.append(self.__buildStruct(lines, obj=myObj))
index = obj.memberList[-1].endLineNum + 1
if (self.options.debug):
print(index, lines[index])
print()
pprint(vars(obj))
print()
pprint(vars(obj.memberList[-1]))
pressReturnToContinue('12.2')
continue
elif matchSequence[17].match(myLine):
# C++ :: Class variable detection
m = matchSequence[17].match(myLine)
# self.log.debug('(12.3) %3i' % (index+1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText(lines[index], append=True)
exec("myObj = GenericObject()")
myObj.arrayDimList = [1]
myObj.depth = obj.depth + 1
myObj.endLineNum = obj.endLineNum
myObj.startLineNum = index + 1
myObj.fwObject = m.group(1)
myObj.fwStruct = m.group(2)
# self.log.debug('fwStruct = %s' % myObj.fwStruct)
myObj.parent = obj
myObj.ancestryNames = [obj.fwObject] + obj.ancestryNames
myObj.memberList = [] ### @todo parse array of substructs
myObj.sizeInBits = 0
myObj.structType = 'bitfield'
myObj.isPointer = 0
myObj.uid = obj.uid
myObj.versionMajor = 0xBADD ### Default
myObj.versionMajorStr = 'versionMajor' ### Default
myObj.versionMinor = 0xC0DE ### Default
myObj.versionMinorStr = 'versionMinor' ### Default
if (self.options.debug):
print("\nObject ... before calling buildStruct")
pprint(vars(obj))
pressReturnToContinue('12.3')
obj.memberList.append(self.__buildStruct(lines, obj=myObj))
index = obj.memberList[-1].endLineNum + 1
if (self.options.debug):
print(index, lines[index])
print("\nObject ... after calling buildStruct")
pprint(vars(obj))
pressReturnToContinue('12.3.1')
print("\nSubObject ... after calling buildStruct")
pprint(vars(obj.memberList[-1]))
pressReturnToContinue('12.3.2')
continue
else:
self.log.debug('(*12.4) %3i' % (index + 1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText('UNPROCESSED: ' + lines[index],
append=True)
if (self.options.debug): pressReturnToContinue('*12.4')
index += 1
continue
###############################################################################
# ([false..true]) objectName ; 1;
elif re.search('^(.+) (\w+) : (\d+);$', myLine):
m = re.search('^(.+) (\w+) : (\d+);$', myLine)
# self.log.debug('(13) %3i ' % (index+1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText(lines[index], append=True)
exec("myObj = GenericObject()")
myObj.arrayDimList = [1]
myObj.depth = obj.depth + 1
myObj.endLineNum = obj.endLineNum
myObj.startLineNum = index + 1
myObj.fwObject = m.group(2)
myObj.fwStruct = 'bool32_t'
# self.log.debug('fwStruct = %s' % myObj.fwStruct)
myObj.parent = obj
myObj.ancestryNames = [obj.fwObject] + obj.ancestryNames
myObj.memberList = [] ### @todo parse array of substructs
myObj.sizeInBits = int(m.group(3)) * 8
myObj.structType = 'bitfield'
myObj.isPointer = 0
myObj.uid = obj.uid
myObj.versionMajor = 0xBADD ### Default
myObj.versionMajorStr = 'versionMajor' ### Default
myObj.versionMinor = 0xC0DE ### Default
myObj.versionMinorStr = 'versionMinor' ### Default
obj.memberList.append(myObj)
if (self.options.debug):
pprint(vars(obj))
pressReturnToContinue('13')
index += 1
continue
###############################################################################
###############################################################################
###############################################################################
### Get struct size ###########################################################
elif matchSequence[18].match(myLine):
# self.log.debug('(15) %3i ' % (index+1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText(lines[index], append=True)
obj.sizeInBits = int(myLine) * 8
if (self.options.debug):
pprint(vars(obj))
pressReturnToContinue('15')
index += 1
continue
### Get struct version major ##################################################
elif matchSequence[19].match(myLine):
m = matchSequence[25].match(myLine)
# self.log.debug('(16) %3i ' % (index+1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText(lines[index], append=True)
obj.versionMajorStr = m.group(1)
obj.versionMajor = int(m.group(2), 16)
if (self.options.debug):
pprint(vars(obj))
pressReturnToContinue('16')
index += 1
continue
### Get struct version minor ##################################################
elif matchSequence[20].match(myLine):
m = matchSequence[26].match(myLine)
# self.log.debug('(17) %3i ' % (index+1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText(lines[index], append=True)
obj.versionMinorStr = m.group(1)
obj.versionMinor = int(m.group(2), 16)
if (self.options.debug):
pprint(vars(obj))
pressReturnToContinue('17')
index += 1
continue
### Catching unprocessed struct text ##########################################
else:
self.log.debug('(*18) %3i ' % (index + 1) + lines[index])
if (self.options.extraDebug): self.outputObjectStructText('UNPROCESSED: ' + lines[index], append=True)
index += 1
return obj
def outputObjectStructText(self, textLine, append=True):
"""Performs a file definition output."""
if append:
txtFile = open(self.objectTextFile, 'a+')
else:
txtFile = open(self.objectTextFile, 'w+')
txtFile.write("%s" % (textLine))
txtFile.close()
def buildStruct(self, uid, fwObject, fwStruct):
"""Performs a definition construction from output file."""
print("fwStruct: %-25s fwObject: %-25s UID: %i" % (fwStruct, fwObject, uid))
self.objectTextFile = os.path.join(self.parsersFolder, fwObject + extTXT)
if (self.options.extraDebug): self.outputObjectStructText(
'# Struct text for Telemetry data object %s (%s)\n' % (fwObject, fwStruct), append=False)
iFile = open(self.structDefFile, "r")
lines = iFile.readlines()
iFile.close()
startLineNum = 0
endLineNum = 0
structDepth = 0 ### current structure depth
maxStructDepth = 0
### Find the start and end line numbers for the specified object
for i in range(len(lines)):
if ('ObjectBegin==>%s' % (fwObject) in lines[i]):
startLineNum = i ### 0-based line count of lines[]
elif ('ObjectEnd==>%s' % (fwObject) in lines[i]):
endLineNum = i ### 0-based line count of lines[]
break
elif ('{' in lines[i]):
structDepth += 1
if (structDepth > maxStructDepth): maxStructDepth = structDepth
elif ('}' in lines[i]):
structDepth -= 1
if (maxStructDepth > self.maxStructDepth): self.maxStructDepth = maxStructDepth
structDepth = maxStructDepth
# print "Start line number: %i" % (startLineNum)
# print "End line number: %i" % (endLineNum)
# print "Struct depth: %i" % (structDepth)
# print "Max Struct depth: %i" % (self.maxStructDepth)
obj = None
exec("obj = GenericObject()")
obj.arrayDimList = [1]
obj.depth = 0
obj.endLineNum = endLineNum
obj.fwObject = fwObject
obj.fwStruct = fwStruct
obj.parent = None
obj.ancestryNames = []
obj.ancestryTypes = []
obj.memberList = []
obj.sizeInBits = 0
obj.startLineNum = startLineNum + 1
obj.structType = None
obj.isPointer = 0
obj.uid = uid
obj.versionMajor = 0xBADD
obj.versionMajorStr = 'versionMajor'
obj.versionMinor = 0xC0DE
obj.versionMinorStr = 'versionMinor'
return self.__buildStruct(lines, obj)
################################################################################################################
################################################################################################################
def main():
"""Performs the auto parsing of data control to generate telemetry definitions within a python c-type."""
parser = OptionParser(usage)
parser.add_option("--fwbuilddir", dest='fwBuildOutputDir', metavar='<DIR>',
help='FW build directory (ex: projects/objs/alderstream_02)')
parser.add_option("--projectname", dest='projectName', metavar='<PROJ>', help='Project name (ex: alderstream_02)')
parser.add_option("--tools", dest='fwToolsDir', metavar='<TOOLSDIR>', default=None,
help='FW telemetry tools dir where bufdict.py is (ex: tools/telemetry)')
parser.add_option("--uidenumfile", dest='uidEnumFile', metavar='<UIDFILE>', default=None,
help='FW file where eUniqueIdentifier enum is defined (default=datacontrol.h)')
parser.add_option("--multiexeversion", dest='multiExeVersion', metavar='<MULTIEXEVER>', default=None,
help='multi.exe version (Ex: multi_716, default=auto)')
parser.add_option("--extradb", action='store_true', dest='extraDebug', default=False,
help='Output additional debug info.')
parser.add_option("--dataobj", dest='dataObjToProcess', metavar='<OBJECT>', default=None,
help='Process specified data object.')
parser.add_option("--debug", action='store_true', dest='debug', default=False, help='Debug mode.')
parser.add_option("--verbose", action='store_true', dest='verbose', default=False,
help='Verbose printing for debug use.')
parser.add_option("--defineobjs", dest='tObjToDefine', metavar='<OBJDEF>', default=None,
help='Manual imput for telemetry Objects from datacontrol.h that should be defined, by euid. (ex: [0,8,9,115]). NO SPACES. If "all" defaults to ALL. ')
if not ENABLE_CLANG:
parser.add_option("--media", dest='media', metavar='<MEDIA>', default=None,
help='Select media destination I.E. NAND, SXP.')
(options, args) = parser.parse_args()
if (options.fwBuildOutputDir is None) or (options.projectName is None) or (options.fwToolsDir is None):
print('\nPlease specify options')
print(options)
print(args)
quit(22)
fwBuildOutputDirCheck = os.path.abspath(options.fwBuildOutputDir)
if not os.path.exists(fwBuildOutputDirCheck):
print('\nPlease specify "--fwbuilddir" option to specify the folder with elf file')
quit(23)
fwToolsDirCheck = os.path.abspath(options.fwToolsDir)
if not os.path.exists(fwToolsDirCheck):
print('\nInvalid output directory path "%s".' % fwToolsDirCheck)
quit(24)
buffDictionaryCheck = os.path.abspath(os.path.join(fwToolsDirCheck, 'bufdict.py'))
if not os.path.exists(buffDictionaryCheck):
print('\nFailed to locate bufdict.py in the specified FW tools folder (%s).' % options.fwToolsDir)
quit(25)
if not ENABLE_CLANG:
print("MEDIA is " + str(options.media))
if options.media is None:
redefineMedia(True)
elif options.media == "SXP":
redefineMedia(False)
elif options.media == "NAND":
redefineMedia(True)
else:
redefineMedia(True)
print("Choosing media: %s" % str(TRUNK))
redefineFileNames() # Set unique names
print("Build project name: %s" % (options.projectName))
print("Project build output dir: %s" % (options.fwBuildOutputDir))
### Extract FW C-structs #####################
cag = CtypeAutoGen(options)
cag.autoSrcDirScan()
if options.tObjToDefine == None or (options.tObjToDefine.lower() == "default"):
cag.getTelemetryObjectList() # Default: Use datacontrol.h to define which objects to extract definitions of
else:
# _____if --defineobjs specified________
objs = options.tObjToDefine.replace('[', '').replace(']', '')
if objs.lower() == "all":
cag.getTelemetryObjectList("all")
else:
objList = objs.split(',')
print("Defining Telemetry Objs: %s ..." % (objList))
cag.getTelemetryObjectList("justthese", objList)
# cag.dumpAllTypedefs()
cag.extractCstructs()
cag.generatePythonCtypes()
print("Normal End Process")
quit(0)
if __name__ == '__main__':
"""Performs execution delta of the process."""
from datetime import datetime
p = datetime.now()
dataStructureFile = os.path.abspath(os.path.join(os.getcwd(), 'dataStructure.cfg'))
print("Data CFG located at {0}".format(dataStructureFile))
with open(dataStructureFile, "w") as openFile:
openFile.write('#! /usr/bin/python\n')
openFile.write('# -*- coding: utf-8 -*-\n')
openFile.write('# Author(s): <NAME>\n')
openFile.write('# Skynet Machine Programming Config Generator...\n')
openFile.write('AUTOPARSER_DATA_OBJECT_LIST = [\n')
openFile.write(
' # Instance Name, Typedef, VERSION MAJOR macro, VERSION MINOR macro, Pack, versionMajorName, versionMinorName, RNLBA, WNLBA\n')
openFile.close()
try:
main()
except:
print("Fail End Process")
q = datetime.now()
with open(dataStructureFile, "a") as openFile:
openFile.write(']\n')
openFile.close()
print("Data CFG located at {0}".format(dataStructureFile))
print("\nExecution time: " + str(q - p))
## @}
| StarcoderdataPython |
3493470 | import logging
import click
from rdflib import Graph, URIRef
from zib_uploader.fetch_data import OntologyMappingsFetcher
from zib_uploader.process_data import OntologyMappingsProcessor
logger = logging.getLogger(__name__)
@click.command()
@click.argument('output_filepath', type=click.Path())
def main(output_filepath):
"""
Fetch ATC ontology mappings from bioportal, process them, and write to
ttl format. (Serves as example on how to chain OntologyMappingsFetcher and
OntologyMappingsProcessor)
"""
fetcher = OntologyMappingsFetcher()
processor = OntologyMappingsProcessor()
data = processor.process(fetcher.fetch())
g = Graph()
for s, p, o in data:
g.add((URIRef(s), URIRef(p), URIRef(o)))
logger.info(f'Writing ontology mappings to {output_filepath}')
g.serialize(output_filepath, format='turtle')
if __name__ == '__main__':
logging.basicConfig(level='INFO')
main()
| StarcoderdataPython |
1712815 | import numpy as np
def inverse():
print("Inverse of a matrix:")
#Enter rows and columns
row=int(input("Enter number of rows:"))
column=int(input("Enter number of columns:"))
if row==column:
print("Enter the elements of Matrix:")
matrix_a= [[int(input()) for i in range(column)] for i in range(row)]
print("Matrix is: ")
for n in matrix_a:
print(n)
matrix_a=np.array(matrix_a)
print("Inverse of a matrix is:")
inv = np.linalg.inv(matrix_a)
print(inv)
else:
print("Please give square matrix")
| StarcoderdataPython |
5077284 | import sys
sys.path.append("/flash/userapp")
import pyb
import utime
from uvacbot.io.pwm import Pwm
alarm1 = {
'repeat': 10,
'sleep1': 15,
'sleep2': 10,
'sleep3': 300}
alarm2 = {
'repeat': 4,
'sleep1': 100,
'sleep2': 50,
'sleep3': 200}
def playAlarm(params):
pwm = Pwm(pyb.Pin.board.D12, 3, 1, 880.0)
for i in range(10):
for j in range (params['repeat']):
pwm.setDutyPerc(50.0)
utime.sleep_ms(params['sleep1'])
pwm.setDutyPerc(0)
utime.sleep_ms(params['sleep2'])
utime.sleep_ms(params['sleep3'])
pwm.cleanup()
playAlarm(alarm2) | StarcoderdataPython |
5178655 | """
The script generate score for the performance evaluation for kaggle contest "bag of words meets bag of popcorn",
using split data from only train data set
Text represents using tf-idf
Predictive model: naive_bayes
"""
from sklearn.metric import roc_auc_score,roc_curve
import first_attempt
from sklearn.cross_validation import train_test_split
def main():
#loading and preprocessing original train dataset
train_data=pd.read_csv("/labeledTrainData.tsv", header=0,delimiter="\t", quoting=3)
# Split 80-20 train vs test data
split_train_features, split_test_features, split_train_target, split_test_target=
train_test_split(train_features,train_target,test_size=0.20,random_state=0)
#pre-processing split train
vectorizer= TfidfVectorizer(stop_words='english')
split_train_features=corpus_preprocessing(split_train_features)
split_train_features=vectorizer.fit_transform(split_train_features)
tsvd=TruncatedSVD(100)
tsvd.fit(split_train_features)
split_train_features=tsvd.transform(split_train_features)
#pre-processing split test features
split_test_features=corpus_preprocessing(split_test_features)
split_test_features=vectorizer.transform(split_test_features)
split_test_features=tsvd.transform(split_test_features=)
#fit and predict using split data
model.fit(split_train_features,split_train_target)
split_prediction=model.predict(split_test_features)
score=roc_auc_score(split_test_target, split_predict)
print (score(split_test_target, split_predict))
if __name__=="__main__":
main() | StarcoderdataPython |
6560720 | # coding=utf-8
from __future__ import unicode_literals, absolute_import, division, print_function
import sopel.module
# import sopel_modules.SpiceBot as SpiceBot
# from sopel_modules.spicemanip import spicemanip
from upsidedown import transform
# TODO add upsidedown to spicemanip
# TODO add prerun functionality as well as command handling
@sopel.module.rule('^flips (.+)')
@sopel.module.intent('ACTION')
def flips(bot, trigger):
target = trigger.group(1).strip()
if target == 'a table':
bot.say("(╯°□°)╯︵ ┻━┻")
else:
bot.say("(╯°□°)╯︵ %s" % transform(target))
@sopel.module.rule('^rolls (.+)')
@sopel.module.intent('ACTION')
def roll(bot, trigger):
target = trigger.group(1).strip()
if target.endswith(' down a hill'):
target = target[:-12]
tegrat = transform(target)
bot.say("(╮°-°)╯︵ %s %s %s %s %s (@_@;)" % (tegrat, target, tegrat, target, tegrat))
else:
bot.say("(╮°-°)╯︵ %s" % transform(target))
| StarcoderdataPython |
175050 | #!/bin/python_3.9
import hashlib,os
try:
from cryptography_me_she import bases
except:
import bases
class Hashing():
def __init__(self) -> None:
pass
def hashing(self, type_hash: str, masg: str) -> str:
masg: bytes = bytes(masg, "ascii")
hash2: str = ""
if (type_hash.lower() == "md5"):
hash2 = str(hashlib.md5(masg).hexdigest())
elif (type_hash.lower() == "sha1"):
hash2 = str(hashlib.sha1(masg).hexdigest())
elif (type_hash.lower() == "sha256"):
hash2 = str(hashlib.sha256(masg).hexdigest())
elif (type_hash.lower() == "sha3_224"):
hash2 = str(hashlib.sha3_224(masg).hexdigest())
elif (type_hash.lower() == "sha224"):
hash2 = str(hashlib.sha224(masg).hexdigest())
elif (type_hash.lower() == "sha512"):
hash2 = str(hashlib.sha512(masg).hexdigest())
elif ("base" in type_hash.lower()):
hash2 = str(bases.base_encde(type_hash, masg))
dir: str = str(os.getcwd().replace("cryptography_me_she", ""))
try:
if "nt" in os.name:
word = open(dir + "hash_bruutefrocer\\word.list", "a")
word.write('\n' + str(masg)[1:].replace("'", ""))
word.close()
else:
word = open(dir + "hash_bruutefrocer/word.list", "a")
word.write('\n' + str(masg)[1:].replace("'", ""))
word.close()
except:
print("no word list")
return hash2
if __name__=="__main__":
type_hash: str = str(input("hash_type: "))
masg: str = str(input("enter masg"))
HA = Hashing()
hash: str = HA.hashing(type_hash,masg)
print(hash)
| StarcoderdataPython |
5088157 | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from intera_core_msgs/HeadState.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class HeadState(genpy.Message):
_md5sum = "51024ade10ffefe117049c9ba6fd743c"
_type = "intera_core_msgs/HeadState"
_has_header = False #flag to mark the presence of a Header object
_full_text = """float32 pan
bool isTurning
# isBlocked will be true during active cancellation when the desired head position
# can not be reached because of j0's current position
bool isBlocked
# panMode will be one of
uint8 PASSIVE_MODE = 0
uint8 ACTIVE_MODE = 1
uint8 ACTIVE_CANCELLATION_MODE = 2
#
uint8 panMode
"""
# Pseudo-constants
PASSIVE_MODE = 0
ACTIVE_MODE = 1
ACTIVE_CANCELLATION_MODE = 2
__slots__ = ['pan','isTurning','isBlocked','panMode']
_slot_types = ['float32','bool','bool','uint8']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
pan,isTurning,isBlocked,panMode
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(HeadState, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.pan is None:
self.pan = 0.
if self.isTurning is None:
self.isTurning = False
if self.isBlocked is None:
self.isBlocked = False
if self.panMode is None:
self.panMode = 0
else:
self.pan = 0.
self.isTurning = False
self.isBlocked = False
self.panMode = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_f3B().pack(_x.pan, _x.isTurning, _x.isBlocked, _x.panMode))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 7
(_x.pan, _x.isTurning, _x.isBlocked, _x.panMode,) = _get_struct_f3B().unpack(str[start:end])
self.isTurning = bool(self.isTurning)
self.isBlocked = bool(self.isBlocked)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_f3B().pack(_x.pan, _x.isTurning, _x.isBlocked, _x.panMode))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 7
(_x.pan, _x.isTurning, _x.isBlocked, _x.panMode,) = _get_struct_f3B().unpack(str[start:end])
self.isTurning = bool(self.isTurning)
self.isBlocked = bool(self.isBlocked)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_f3B = None
def _get_struct_f3B():
global _struct_f3B
if _struct_f3B is None:
_struct_f3B = struct.Struct("<f3B")
return _struct_f3B
| StarcoderdataPython |
3525702 | <gh_stars>0
from django.conf.urls import url
from rest_framework import routers
# Import standard views
from .api import (
ProjectViewSet,
MculeQuoteViewSet,
BatchViewSet,
TargetViewSet,
MethodViewSet,
ReactionViewSet,
PubChemInfoViewSet,
ProductViewSet,
ReactantViewSet,
CatalogEntryViewSet,
)
# Import action views
from .api import (
AnalyseActionViewSet,
AddActionViewSet,
ExtractActionViewSet,
FilterActionViewSet,
QuenchActionViewSet,
SetTemperatureActionViewSet,
StirActionViewSet,
)
# Import OT Session views
from .api import (
OTProtocolViewSet,
OTBatchProtocolViewSet,
OTSessionViewSet,
DeckViewSet,
PipetteViewSet,
TipRackViewSet,
PlateViewSet,
WellViewSet,
CompoundOrderViewSet,
OTScriptViewSet,
)
# Register standard routes
router = routers.DefaultRouter()
router.register("api/projects", ProjectViewSet, "projects")
router.register("api/mculequotes", MculeQuoteViewSet, "mculequotes")
router.register("api/batches", BatchViewSet, "batches")
router.register("api/targets", TargetViewSet, "targets")
router.register("api/methods", MethodViewSet, "methods")
router.register("api/pubcheminfo", PubChemInfoViewSet, "pubcheminfo")
router.register("api/reactions", ReactionViewSet, "reactions")
router.register("api/products", ProductViewSet, "products")
router.register("api/reactants", ReactantViewSet, "reactants")
router.register("api/catalogentries", CatalogEntryViewSet, "catalogentries")
# Register action routes
router.register("api/analyseactions", AnalyseActionViewSet, "analyseactions")
router.register("api/addactions", AddActionViewSet, "addactions")
router.register("api/extractactions", ExtractActionViewSet, "extractactions")
router.register("api/filteractions", FilterActionViewSet, "filteractions")
router.register("api/quenchactions", QuenchActionViewSet, "quenchactions")
router.register(
"api/set-temperatureactions",
SetTemperatureActionViewSet,
"set-temperatureactions",
)
router.register("api/stiractions", StirActionViewSet, "stiractions")
# Register Ot Session routes
router.register("api/otprotocols", OTProtocolViewSet, "otprotocols")
router.register("api/otbatchprotocols", OTBatchProtocolViewSet, "otbatchprotocols")
router.register("api/otsessions", OTSessionViewSet, "otsessions")
router.register("api/decks", DeckViewSet, "decks")
router.register("api/pipettes", PipetteViewSet, "pipettes")
router.register("api/tipracks", TipRackViewSet, "tipracks")
router.register("api/plates", PlateViewSet, "plates")
router.register("api/wells", WellViewSet, "wells")
router.register("api/compoundorders", CompoundOrderViewSet, "compoundorders")
router.register("api/otscripts", OTScriptViewSet, "otscripts")
urlpatterns = router.urls
| StarcoderdataPython |
1736686 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<<EMAIL>>
# http://binux.me
# Created on 2014-08-09 17:52:49
import json
from tornado import gen
from .base import *
from libs import utils
import traceback
class TPLPushHandler(BaseHandler):
@tornado.web.authenticated
def get(self, tplid):
user = self.current_user
tpl = self.db.tpl.get(tplid, fields=('id', 'userid', 'sitename'))
if not self.permission(tpl, 'w'):
self.evil(+5)
self.finish(u'<span class="alert alert-danger">没有权限</span>')
return
tpls = self.db.tpl.list(userid=None, limit=None, fields=('id', 'sitename'))
self.render('tpl_push.html', tpl=tpl, tpls=tpls)
@tornado.web.authenticated
def post(self, tplid):
user = self.current_user
tplid = int(tplid)
tpl = self.db.tpl.get(tplid, fields=('id', 'userid', ))
if not self.permission(tpl, 'w'):
self.evil(+5)
self.finish(u'<span class="alert alert-danger">没有权限</span>')
return
to_tplid = int(self.get_argument('totpl'))
msg = self.get_argument('msg')
if to_tplid == 0:
to_tplid = None
to_userid = None
else:
totpl = self.db.tpl.get(to_tplid, fields=('id', 'userid', ))
if not totpl:
self.evil(+1)
self.finish(u'<span class="alert alert-danger">模板不存在</span>')
return
to_userid = totpl['userid']
self.db.push_request.add(from_tplid=tpl['id'], from_userid=user['id'],
to_tplid=to_tplid, to_userid=to_userid, msg=msg)
self.db.tpl.mod(tpl['id'], lock=True)
#referer = self.request.headers.get('referer', '/my/')
self.redirect('/pushs')
class TPLVarHandler(BaseHandler):
def get(self, tplid):
user = self.current_user
tpl = self.db.tpl.get(tplid, fields=('id', 'note', 'userid', 'sitename', 'siteurl', 'variables'))
if not self.permission(tpl):
self.evil(+5)
self.finish('<span class="alert alert-danger">没有权限</span>')
return
self.render('task_new_var.html', tpl=tpl, variables=json.loads(tpl['variables']))
class TPLDelHandler(BaseHandler):
@tornado.web.authenticated
def post(self, tplid):
user = self.current_user
tpl = self.check_permission(self.db.tpl.get(tplid, fields=('id', 'userid')), 'w')
self.db.tpl.delete(tplid)
referer = self.request.headers.get('referer', '/my/')
self.redirect(referer)
class TPLRunHandler(BaseHandler):
@gen.coroutine
def post(self, tplid):
self.evil(+5)
user = self.current_user
data = {}
try:
if 'json' in self.request.headers['Content-Type']:
self.request.body = self.request.body.replace(b'\xc2\xa0', b' ')
data = json.loads(self.request.body)
except :
pass
tplid = tplid or data.get('tplid') or self.get_argument('_binux_tplid', None)
tpl = dict()
fetch_tpl = None
if tplid:
tpl = self.check_permission(self.db.tpl.get(tplid, fields=('id', 'userid', 'sitename',
'siteurl', 'tpl', 'interval', 'last_success')))
fetch_tpl = self.db.user.decrypt(tpl['userid'], tpl['tpl'])
if not fetch_tpl:
fetch_tpl = data.get('tpl')
if not fetch_tpl:
try:
fetch_tpl = json.loads(self.get_argument('tpl'))
except:
raise HTTPError(400)
env = data.get('env')
if not env:
try:
env = dict(
variables = json.loads(self.get_argument('env')),
session = []
)
except:
raise HTTPError(400)
try:
url = utils.parse_url(env['variables'].get('_binux_proxy'))
if url:
proxy = {
'host': url['host'],
'port': url['port'],
}
result = yield self.fetcher.do_fetch(fetch_tpl, env, [proxy])
elif self.current_user:
result = yield self.fetcher.do_fetch(fetch_tpl, env)
else:
result = yield self.fetcher.do_fetch(fetch_tpl, env, proxies=[])
except Exception as e:
traceback.print_exc()
self.render('tpl_run_failed.html', log=str(e))
return
if tpl:
self.db.tpl.incr_success(tpl['id'])
self.render('tpl_run_success.html', log = result.get('variables', {}).get('__log__'))
return
class PublicTPLHandler(BaseHandler):
def get(self):
tpls = self.db.tpl.list(userid=None, limit=None, fields=('id', 'siteurl', 'sitename', 'banner', 'note', 'disabled', 'lock', 'last_success', 'ctime', 'mtime', 'fork', 'success_count'))
tpls = sorted(tpls, key=lambda t: -t['success_count'])
self.render('tpls_public.html', tpls=tpls)
class TPLGroupHandler(BaseHandler):
@tornado.web.authenticated
def get(self, tplid):
user = self.current_user
groupNow = self.db.tpl.get(tplid, fields=('_groups'))['_groups']
tasks = []
_groups = []
tpls = self.db.tpl.list(userid=user['id'], fields=('_groups'), limit=None)
for tpl in tpls:
temp = tpl['_groups']
if (temp not in _groups):
_groups.append(temp)
self.render('tpl_setgroup.html', tplid=tplid, _groups=_groups, groupNow=groupNow)
@tornado.web.authenticated
def post(self, tplid):
envs = {}
for key in self.request.body_arguments:
envs[key] = self.get_body_arguments(key)
New_group = envs['New_group'][0].strip()
if New_group != "" :
target_group = New_group
else:
for value in envs:
if envs[value][0] == 'on':
target_group = value.strip()
break
else:
target_group = 'None'
self.db.tpl.mod(tplid, _groups=target_group)
self.redirect('/my/')
handlers = [
('/tpl/(\d+)/push', TPLPushHandler),
('/tpl/(\d+)/var', TPLVarHandler),
('/tpl/(\d+)/del', TPLDelHandler),
('/tpl/?(\d+)?/run', TPLRunHandler),
('/tpls/public', PublicTPLHandler),
('/tpl/(\d+)/group', TPLGroupHandler),
]
| StarcoderdataPython |
11330663 | <reponame>sixfwa/fastapi-basics
import datetime as _dt
import sqlalchemy as _sql
import sqlalchemy.orm as _orm
import database as _database
class User(_database.Base):
__tablename__ = "users"
id = _sql.Column(_sql.Integer, primary_key=True, index=True)
email = _sql.Column(_sql.String, unique=True, index=True)
hashed_password = _sql.Column(_sql.String)
is_active = _sql.Column(_sql.Boolean, default=True)
posts = _orm.relationship("Post", back_populates="owner")
class Post(_database.Base):
__tablename__ = "posts"
id = _sql.Column(_sql.Integer, primary_key=True, index=True)
title = _sql.Column(_sql.String, index=True)
content = _sql.Column(_sql.String, index=True)
owner_id = _sql.Column(_sql.Integer, _sql.ForeignKey("users.id"))
date_created = _sql.Column(_sql.DateTime, default=_dt.datetime.utcnow)
date_last_updated = _sql.Column(_sql.DateTime, default=_dt.datetime.utcnow)
owner = _orm.relationship("User", back_populates="posts") | StarcoderdataPython |
5090658 | <reponame>rpytel1/vipriors-challenges-toolkit
"""
Arrange training, validation and test data for VIPriors object detection
challenge.
"""
import tqdm
import os
import shutil
if __name__ == '__main__':
if os.path.exists("TEST_MARKER_DO_NOT_REMOVE"):
raise ValueError("Test set has already been updated. Don't run this "
"script again!")
if os.path.exists("test-images"):
raise ValueError("Test set writing directory exists. Please make sure "
"the writing directory does not exist, as the "
"resulting folder must be empty except for the newly "
"written test set.")
os.makedirs("test-images")
print("Loading VIPriors testing split settings...")
with open('annotations/test_image_mappings.txt', 'r') as f:
lines = f.read().split("\n")
test_mappings = {}
for line in lines:
key, val = line.strip().split(",")
test_mappings[int(key)] = int(val)
for old_id, new_id in tqdm.tqdm(test_mappings.items(), desc='Processing images'):
shutil.copyfile(f"val2017/{old_id:012d}.jpg", f"test-images/{new_id:012d}.jpg")
# Add a marker file to the folder to indicate that test data was updated
with open("TEST_MARKER_DO_NOT_REMOVE", 'w') as f:
f.write("This file exists to mark the test data as having been updated "
"to match the challenge format.\n\nThis file should not be "
"removed.")
# Remove MS COCO directory
shutil.rmtree("val2017")
# Rename MS COCO "train2017" to VIPriors "images"
os.rename("train2017", "images") | StarcoderdataPython |
118514 | import pandas as pd
def resample_and_merge(df_blocks: pd.DataFrame,
df_prices: pd.DataFrame,
dict_params: dict,
freq: str = "5T"):
df = resample(df_blocks, freq, dict_params)
df_prices = resample(df_prices, freq, dict_params)
# we add the 24h lagged variable of the mean gas price
df["mean_gas_price_24h_lagged"] = df["mean_gas_price"].shift(288, axis=0)
# we incorporate the eth price into our main DataFrame
df["eth_usd_price"] = df_prices["eth_usd_price"]
df["eth_usd_price"] = df["eth_usd_price"].ffill()
return df.drop([df.index[i] for i in range(288)]) # We drop the first day because of the shift that includ NaNs.
def resample(df: pd.DataFrame,
freq: str,
dict_params: dict) -> pd.DataFrame:
"""
:param df:
:param freq: e: '5T' for 5 minutes
:param dict_params:
:return:
"""
columns = []
for c in df.columns:
if c in list(dict_params.keys()):
op = dict_params[c]
if op == "mean":
columns.append(df[c].resample(freq, label="right").mean())
elif op == "last":
columns.append(df[c].resample(freq, label="right").last())
else:
raise RuntimeError(f"{op} is not a valid resampling operation:"
f" currently supported are 'mean' or 'last'")
return pd.concat(columns, axis=1)
def clip_bounds(df: pd.DataFrame,
dict_params: pd.DataFrame) -> pd.DataFrame:
"""
:param df: the df to clip
:param dict_params: ex : {col1: {'min': 0, 'max': 30}, col2: {'min': -10, 'max': 80}}
:return:
"""
for c in df.columns:
if c in list(dict_params.keys()):
low_bound = dict_params[c]["min"]
up_bound = dict_params[c]["max"]
df_c_clipped = df[c].clip(lower=low_bound, upper=up_bound,
axis=0, inplace=False)
df[c] = df_c_clipped
return df
def clip_std(df: pd.DataFrame,
dict_params: pd.DataFrame) -> pd.DataFrame:
"""
:param df: the df to clip
:param dict_params: ex : {col1: 1.5, col2: 2}
:return:
"""
for c in df.columns:
if c in list(dict_params.keys()):
std_mult = dict_params[c]
while True:
mean, std = df[c].mean(), df[c].std()
low_bound = mean - std_mult * std
up_bound = mean + std_mult * std
df_c_clipped = df[c].clip(lower=low_bound, upper=up_bound,
axis=0, inplace=False)
if ((df_c_clipped - df[c]) ** 2).max() < 0.01:
break
df[c] = df_c_clipped
return df
| StarcoderdataPython |
1670230 | <gh_stars>0
from datetime import datetime
import xlrd
from openpyxl import load_workbook
from csv import reader
import json
from pathlib import Path
import os
class XlsObj:
typd = {
'str' : lambda v,_ : str(v),
'float' : lambda v,_ : float(v) if v!='' else None,
'int' : lambda v,_ : int(float(v)) if v!='' else None,
'date' : lambda v,s : datetime.strptime(v,s['datefmt']),
'xldate' : lambda v,_ : v,
}
def trim(self,val,triml): return self.trim(val.replace(triml[0],''),triml[1:]) \
if triml else val
def __init__(self,row,xos):
for k,v in xos.fields.items():
rawval = row[v['col']-1]
rawvals = rawval.strip() if isinstance(rawval,str) else rawval
trimval = self.trim(rawvals,v['trim'])
typfn = self.typd[v['typ']]
conval = typfn(trimval,v)
self.__dict__.update({k:conval})
class xls:
def toval(self,c): return xlrd.xldate.xldate_as_datetime(c.value,self.datemode) \
if c.ctype == xlrd.XL_CELL_DATE else c.value
def rows(self,flnm,sheet=0):
book = xlrd.open_workbook(flnm)
sh = book.sheets()[sheet]
self.datemode = book.datemode
return [ [self.toval(c) for c in r] for r in sh.get_rows() ]
class xlsx:
def rows(self,flnm,sheet=0): return [
[ (c.value if c.value!=None else '') for c in r ]
for r in load_workbook(flnm,read_only=True).get_active_sheet()
]
class csv:
def rows(self,flnm,sheet=0): return reader(open(flnm,errors='ignore'))
class tsv:
def rows(self,flnm,sheet=0): return reader(open(flnm,errors='ignore'),delimiter='\t')
class XlsObjs:
_globals = {
'typ':'str', 'strtrow':1, 'endpatcol':1, 'endpat':'', 'trim':[], 'remark':'',
}
def __iter__(self): return iter(self.objs)
# Either specfile or specname need to be specified, specfile needs to be a
# file path. specname is just the basename of the json file and requires
# XLS2PYSPECDIR env var to be set
def __init__(self,flnm,specfile=None,specname=None,sheet=0):
specflnm = specfile if specfile else (
os.environ['XLS2PYSPECDIR']+ '/' + specname+ '.json'
)
spec = json.load(open(specflnm))
self.g = { **self._globals, **spec.get('globals',{}) }
self.fields = { k:{ **self.g, **v } for k,v in spec['fields'].items() }
strtrow = self.g['strtrow'] - 1
endpatcol = self.g['endpatcol'] - 1
endpat = self.g['endpat']
ftyp = globals()[Path(flnm).suffix[1:]]
self.objs = []
for i,r in enumerate(ftyp().rows(flnm)):
if i < strtrow : continue
if r[endpatcol].strip() == endpat: break
self.objs = self.objs + [ XlsObj(r,self) ]
| StarcoderdataPython |
117199 | <filename>Commands/ping.py
from discord.commands import slash_command
from discord.ext import commands
from config import VERSIONS, EMOTES
import discord
class Ping(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.description = "Measure the bot latency"
self.category = "Miscellaneous"
@slash_command()
async def ping(self, ctx: commands.Context):
"""Measures the bot latency"""
latency = round(self.bot.latency * 1000)
oknos = EMOTES[f"{self.bot.user.id}"]
Embed = discord.Embed(description=f"{oknos} My current lantecy: `{latency}ms`", color=VERSIONS[f"{self.bot.user.id}"])
await ctx.respond(embed=Embed)
def setup(bot):
bot.add_cog(Ping(bot)) | StarcoderdataPython |
4988618 | <filename>models/weatherRSConditions.py
class WeatherRSConditions(object):
""" Clouds, rain, snow and wind response data """
def __init__(self):
self._clouds = None
self._rain = None
self._snow = None
self._wind_deg = None
self._wind_speed = None
def set_conditions(self, weather):
if 'clouds' in weather:
if 'all' in weather['clouds']:
self._clouds = weather['clouds']['all']
if 'rain' in weather:
if '3h' in weather['rain']:
self._rain = weather['rain']['3h']
if 'snow' in weather:
if '3h' in weather['snow']:
self._snow = weather['snow']['3h']
if 'wind' in weather:
if 'deg' in weather['wind']:
self._wind_deg = weather['wind']['deg']
if 'speed' in weather['wind']:
self._wind_speed = weather['wind']['speed']
return None
@property
def clouds(self):
return self._clouds
@clouds.setter
def clouds(self, clouds):
self._clouds = clouds
return None
@property
def rain(self):
return self._rain
@rain.setter
def rain(self, rain):
self._rain = rain
return None
@property
def snow(self):
return self._snow
@snow.setter
def snow(self, snow):
self._snow = snow
return None
@property
def wind_deg(self):
return self._wind_deg
@wind_deg.setter
def wind_deg(self, wind_deg):
self._wind_deg = wind_deg
return None
@property
def wind_speed(self):
return self._wind_speed
@wind_speed.setter
def wind_speed(self, wind_speed):
self._wind_speed = wind_speed
return None
| StarcoderdataPython |
11306583 | <filename>PaddleCV/Research/webvision2018/utils/class_accuracy.py<gh_stars>1-10
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
def accuracy(targets, preds):
"""Get the class-level top1 and top5 of model.
Usage:
.. code-blcok::python
top1, top5 = accuracy(targets, preds)
:params args: evaluate the prediction of model.
:type args: numpy.array
"""
top1 = np.zeros((5000,), dtype=np.float32)
top5 = np.zeros((5000,), dtype=np.float32)
count = np.zeros((5000,), dtype=np.float32)
for index in range(targets.shape[0]):
target = targets[index]
if target == preds[index,0]:
top1[target] += 1
top5[target] += 1
elif np.sum(target == preds[index,:5]):
top5[target] += 1
count[target] += 1
return (top1/(count+1e-12)).mean(), (top5/(count+1e-12)).mean()
| StarcoderdataPython |
1782320 | """
An Example on domain auction
"""
from boa.interop.System.Storage import *
from boa.interop.System.Runtime import *
from boa.interop.System.ExecutionEngine import *
from boa.builtins import concat
from boa.interop.Ontology.Native import *
from boa.builtins import state
ctx = GetContext()
selfAddr = GetExecutingScriptHash()
#ONT native contract address
contractAddress = bytearray(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01')
def Main(operation, args):
if operation == 'register':
acct = args[0]
url = args[1]
if CheckWitness(acct):
return register(acct,url)
Notify('CheckWitness failed!')
return False
if operation == 'sell':
acct = args[0]
url = args[1]
price = args[2]
if CheckWitness(acct):
return sell(acct,url,price)
Notify('CheckWitness failed!')
return False
if operation == 'query':
url = args[0]
return query(url)
if operation == 'buy':
acct = args[0]
url = args[1]
price = args[2]
if CheckWitness(acct):
return buy(acct,url,price)
Notify('CheckWitness failed!')
return False
if operation == 'done':
acct = args[0]
url = args[1]
if CheckWitness(acct):
return done(acct,url)
Notify('CheckWitness failed!')
return False
if operation == 'transfer':
fromacct = args[0]
amount = args[1]
return transferONT(fromacct,selfAddr,amount)
Notify('Not a supported operation!')
return True
def register(account,domain):
"""
register an domain for account
:param account:
:param domain:
:return:
"""
if not Get(ctx,domain):
Put(ctx,domain,account)
Notify('register succeed!')
return True
Notify('already registered!')
return False
def sell(account,url, price):
"""
sell the domain at a price
:param account:
:param url:
:param price:
:return:
"""
owner = Get(ctx,url)
if owner == account :
Put(ctx,concat('Original_Owner_',url),account)
Put(ctx,concat('Price_',url),price)
Put(ctx,url,selfAddr)
Notify('sell succeed!')
return True
Notify('Not a owner')
return False
def query(url):
"""
query a domain owner
:param url:
:return:
"""
owner = Get(ctx,url)
Notify(concat('owner is ',owner))
return owner
def buy(acct,url,price):
"""
buy a domain a price
:param acct:
:param url:
:param price:
:return:
"""
owner = Get(ctx,url)
if owner != selfAddr:
Notify("url not in sale!")
return False
prevBuyer = Get(ctx,concat('TP_',url))
currentPrice = Get(ctx,concat('Price_',url))
#no buyer before case
if not prevBuyer:
if price >= currentPrice:
if transferONT(acct, selfAddr, price):
Put(ctx,concat('TP_',url),acct)
if price > currentPrice:
Put(ctx,concat('Price_',url),price)
Notify('buy succeed!')
return True
else:
Notify('Transfer Failed')
return False
Notify('Price is lower than current price')
return False
# has buyer before case
if price <= currentPrice:
Notify('Price is lower than current price')
return False
if transferONT(selfAddr,acct,currentPrice):
Put(ctx,concat('TP_',url),acct)
Put(ctx,concat('Price_',url),price)
Notify('refund succeed!')
return True
else:
Notify('refund failed')
return False
def transferONT(fromacct,toacct,amount):
"""
transfer ONT
:param fromacct:
:param toacct:
:param amount:
:return:
"""
if CheckWitness(fromacct):
param = makeState(fromacct, toacct, amount)
res = Invoke(1,contractAddress,'transfer',[param])
Notify(res)
if res and res == b'\x01':
Notify('transfer succeed')
return True
else:
Notify('transfer failed')
return False
else:
Notify('checkWitness failed')
return False
def done(acct,url):
"""
finish the domain auction
:param acct:
:param url:
:return:
"""
currentOwner = Get(ctx, url)
if currentOwner != selfAddr:
Notify('not in sell')
return False
preOwner = Get(ctx,concat('Original_Owner_', url))
if preOwner != acct:
Notify('not owner')
return False
amount = Get(ctx, concat('Price_', url))
param = makeState(selfAddr, acct, amount)
res = Invoke(1, contractAddress, 'transfer', [param])
if res and res == b'\x01':
buyer = Get(ctx, concat('TP_',url))
Put(ctx, url, buyer)
Delete(ctx,concat('TP_', url))
Delete(ctx,concat('Price_', url))
Delete(ctx,concat('Original_Owner_', url))
Notify('done succeed!')
return True
else:
Notify('transfer failed')
return False
def makeState(fromacct,toacct,amount):
return state(fromacct, toacct, amount) | StarcoderdataPython |
11343775 | #!/usr/bin/env python
"""
Read a ftm text export and convert it to a json structure closely matching stb binary format
"""
import copy
import ftmtxt
import ftmmanip
import json
import sys
# Parameters
SOURCE_FILE_PATH = sys.argv[1]
MAX_OPTIM_PASSES = 0 if len(sys.argv) < 3 else int(sys.argv[2])
# Logging
def log(msg):
sys.stderr.write('{}\n'.format(msg))
def info(msg):
log(' INFO: {}'.format(msg))
def warn(msg):
log('WARNING: {}'.format(msg))
ftmmanip.warn = warn
# Read original file
with open(SOURCE_FILE_PATH, 'r') as f:
music = ftmtxt.to_dict(f)
# Simplify structure
# * pre-interpreting effects not handled by the engine
# * standardizing things
# * ... (whatever is a direct translation of the original with less things to handle)
music = ftmmanip.get_num_channels(music)
music = ftmmanip.flatten_orders(music)
music = ftmmanip.unroll_f_effect(music)
music = ftmmanip.cut_at_b_effect(music)
music = ftmmanip.apply_g_effect(music)
music = ftmmanip.apply_d_effect(music)
music = ftmmanip.apply_s_effect(music)
music = ftmmanip.apply_a_effect(music)
music = ftmmanip.remove_instruments(music)
music = ftmmanip.remove_useless_pitch_effects(music)
music = ftmmanip.apply_q_effect(music)
music = ftmmanip.apply_r_effect(music)
music = ftmmanip.repeat_3_effect(music)
music = ftmmanip.apply_3_effect(music)
music = ftmmanip.apply_4_effect(music)
# Compatibility checks
music = ftmmanip.warn_instruments(music)
music = ftmmanip.warn_effects(music)
# Transform to stb audio format
music = ftmmanip.remove_superfluous_volume(music)
music = ftmmanip.remove_superfluous_duty(music)
music = ftmmanip.std_empty_row(music)
music = ftmmanip.to_uncompressed_format(music)
music = ftmmanip.remove_duplicates(music)
music = ftmmanip.aggregate_lines(music)
music = ftmmanip.compute_note_length(music)
music = ftmmanip.to_mod_format(music)
# Optimize
music = ftmmanip.optim_pulse_opcodes_to_meta(music)
optimal = False
pass_num = 0
while not optimal and pass_num < MAX_OPTIM_PASSES:
pass_num += 1
info('optimization pass #{}'.format(pass_num))
original_music_mod = copy.deepcopy(music['mod'])
music = ftmmanip.split_samples(music)
music = ftmmanip.reuse_samples(music)
music = ftmmanip.remove_unused_samples(music)
if music['mod'] == original_music_mod:
optimal = True
# Transform to asm
music = ftmmanip.samples_to_source(music)
# Compute misc info
music = ftmmanip.compute_stats(music)
# Show result
print(json.dumps(music))
| StarcoderdataPython |
44318 | <gh_stars>1-10
#!/usr/bin/env python
from __future__ import print_function
import os
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import numpy as np
from numpy.fft import fftn, ifftn
import scipy.integrate as integrate
from scipy import stats
import PlotScripts
import ReadScripts
import AllVars
def calculate_HI_frac(XHII, density):
"""
Calculates the mass-weighted fraction of ionized hydrogen for a given
ionization grid.
Parameters
---------
XHII: 3-Dimensional Array of floats. Required.
Grid that contains the fraction of ionized hydrogen (XHII) in each
cell.
density: 3-Dimensional Array of floats. Required.
Grid that contains the overdensity (rho/<rho>) of dark matter in each
cell.
Returns
-------
HI: Float.
Fraction of ionized hydrogen.
Units
-----
XHII and HI are unitless.
Density is unitless (overdensity, rho/<rho>).
"""
HI = 1.0 - np.sum(XHII * density / np.sum(density))
print("")
print("Mass averaged HI fraction is {0:.4f}".format(HI))
return HI
def determine_close_idx(fname_HII, fname_density, SnapList, GridSize,
precision, target_XHI_fraction, model_tags):
XHII_fraction = np.zeros_like(SnapList, dtype=np.float32)
for model_number in range(len(fname_HII)):
for snapnum in range(len(SnapList[model_number])):
HII_fname = "{0}_{1:03d}".format(fname_HII[model_number],
SnapList[model_number][snapnum])
HII = ReadScripts.read_binary_grid(HII_fname,
GridSize[model_number],
precision[model_number])
density_fname = "{0}{1:03d}.dens.dat".format(fname_density[model_number],
SnapList[model_number][snapnum])
density = ReadScripts.read_binary_grid(density_fname,
GridSize[model_number],
precision[model_number])
HI_frac = calculate_HI_frac(HII, density)
XHII_fraction[model_number][snapnum] = HI_frac
SnapList = []
for model_number in range(len(fname_HII)):
SnapList.append([])
print("Model {0}".format(model_tags[model_number]))
for val in target_XHI_fraction:
idx = (np.abs(XHII_fraction[model_number] - val)).argmin()
print("HI Fract {0}: Nearest Idx {1} with value {2}".format(val,
idx,
XHII_fraction[model_number][idx]))
SnapList[model_number].append(idx)
return SnapList
def determine_MH_fesc_constants(MH_low, MH_high, fesc_low, fesc_high):
log_A = (np.log10(fesc_high) - (np.log10(fesc_low)*np.log10(MH_high)/np.log10(MH_low))) * pow(1 - (np.log10(MH_high) / np.log10(MH_low)), -1)
B = (np.log10(fesc_low) - log_A) / np.log10(MH_low)
A = pow(10, log_A)
return A, B
def plot_Anne_MH(MH_low, MH_high, fesc_low, fesc_high, pos_scaling, ax1):
halomass = np.arange(7.0, 18.0, 0.01)
halomass = pow(10, halomass)
if pos_scaling:
fesc = 1.0 - pow((1.0 - fesc_low) * (1.0-fesc_low)/(1.0-fesc_high), -np.log10(halomass/MH_low)/np.log10(MH_high/MH_low))
fesc[fesc < fesc_low] = fesc_low
fesc[fesc > fesc_high] = fesc_high
else:
fesc = pow(fesc_low * fesc_low/fesc_high, -np.log10(halomass/MH_low)/np.log10(MH_high/MH_low))
fesc[fesc > fesc_low] = fesc_low
fesc[fesc < fesc_high] = fesc_high
ax1.plot(np.log10(halomass),
fesc, ls = '-', color = 'k',
label = "Anne")
ax1.set_xlabel("Halo Mass [Msun]")
ax1.set_ylabel("fesc")
ax1.set_ylim([0.0, 1.1])
return ax1
def plot_my_MH(MH_low, MH_high, fesc_low, fesc_high, ax1):
halomass = np.arange(7.0, 18.0, 0.01)
halomass = pow(10, halomass)
alpha, beta = determine_MH_fesc_constants(MH_low, MH_high,
fesc_low, fesc_high)
print("Alpha = {0} Beta = {1}".format(alpha, beta))
fesc = alpha*pow(halomass,beta)
if fesc_low > fesc_high:
fesc[fesc > fesc_low] = fesc_low
fesc[fesc < fesc_high] = fesc_high
else:
fesc[fesc < fesc_low] = fesc_low
fesc[fesc > fesc_high] = fesc_high
ax1.plot(np.log10(halomass),
fesc, ls = '--', color = 'r',
label = "Mine")
ax1.set_xlabel("Halo Mass [Msun]")
ax1.set_ylabel("fesc")
ax1.set_ylim([0.0, 1.1])
return ax1
def plot_MHs(MH_low, MH_high, fesc_low, fesc_high):
fig1 = plt.figure(figsize = (8,8))
ax1 = fig1.add_subplot(111)
if fesc_high > fesc_low:
pos_scaling = 1
else:
pos_scaling = 0
ax1 = plot_Anne_MH(MH_low, MH_high, fesc_low, fesc_high, pos_scaling, ax1)
ax1 = plot_my_MH(MH_low, MH_high, fesc_low, fesc_high, ax1)
leg = ax1.legend(loc='upper right', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(10)
outputFile1 = "./fescMH_{0:.2e}_{1:.2e}_{2}_{3}.png".format(MH_low,
MH_high,
fesc_low,
fesc_high)
fig1.savefig(outputFile1, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile1))
plt.close(fig1)
def plot_SFR_fesc(alpha, beta, delta):
fig1 = plt.figure(figsize = (8,8))
ax1 = fig1.add_subplot(111)
SFR = np.arange(-5, 2, 0.01)
for alpha_val, beta_val, delta_val in zip(alpha, beta, delta):
fesc = delta_val / (1.0 + np.exp(-alpha_val*(SFR-beta_val)))
label = r"$\alpha = " + str(alpha_val) + r", \beta = " + str(beta_val) +\
r", \delta = " + str(delta_val) + "$"
print(label)
ax1.plot(SFR, fesc, label=label)
leg = ax1.legend(loc='upper left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(10)
outputFile1 = "./fesc_SFR.png"
fig1.savefig(outputFile1, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile1))
plt.close(fig1)
if __name__ == "__main__":
MH_low = 1.0e8
MH_high = 1.0e12
fesc_high = 0.05
#for fesc_low in [0.95]:
# plot_MHs(MH_low, MH_high, fesc_low, fesc_high)
alpha = [0.2, 0.3, 0.63, 1.0, 4.50]
beta = [4.5, 2.3, 1.5, 1.0, 0.5]
delta = [1.0, 1.0, 1.0, 1.0, 1.0]
plot_SFR_fesc(alpha, beta, delta)
| StarcoderdataPython |
11268461 | <reponame>Techno-Philes/python-short-programs
def mergeArray(arr1,arr2):
n1=len(arr1)
n2=len(arr2)
arr4=[]
i=0
j=0
k=0
while i<n1 and j<n2:
if arr1[i]<arr2[j]:
arr4.append(arr1[i])
i=i+1
else:
arr4.append(arr2[j])
j=j+1
while i<n1:
arr4.append(arr1[i])
i=i+1
while j<n2:
arr4.append(arr2[j])
j=j+1
return arr4
def merge(arr1,arr2,arr3):
arr=[]
arr=mergeArray(arr1,arr2)
return mergeArray(arr,arr3)
arr1=[1,4,7,10]
arr2=[2,5,8]
arr3=[3,6,9]
print(merge(arr1,arr2,arr3))
| StarcoderdataPython |
3597523 | import pandas as pd
import matplotlib.pyplot as plt
gap = [0.05, 0.025, 0.01, 0.001, 0.0001]
def Draw(cutoff_gap) :
input_file = open(str(cutoff_gap) + "cutoff.count_array.txt", 'r')
input_table = input_file.readlines()
histogram_vector = []
for line in input_table : histogram_vector.append(int(line.split()[2]))
plt.hist(histogram_vector)
plt.show()
input_file.close()
return
Draw(0.05)
| StarcoderdataPython |
3238575 | """
Python 3 Object-Oriented Programming
Chapter 13. Testing Object-Oriented Programs.
"""
import unittest
import sys
class SkipTests(unittest.TestCase):
@unittest.expectedFailure
def test_fails(self) -> None:
self.assertEqual(False, True)
@unittest.skip("Test is useless")
def test_skip(self) -> None:
self.assertEqual(False, True)
@unittest.expectedFailure # Remove this to see the effect of version number tests.
@unittest.skipIf(sys.version_info.minor == 8, "broken on 3.8")
def test_skipif(self) -> None:
self.assertEqual(False, True)
@unittest.skipUnless(sys.platform.startswith("linux"), "broken unless on linux")
def test_skipunless(self) -> None:
self.assertEqual(False, True)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
4955795 | import tqdm
from torch.utils.data import Dataset
from markuplmft.data.tag_utils import tags_dict
import pickle
import os
import constants
class SwdeFeature(object):
def __init__(self,
html_path,
input_ids,
token_type_ids,
attention_mask,
xpath_tags_seq,
xpath_subs_seq,
labels,
involved_first_tokens_pos,
involved_first_tokens_xpaths,
involved_first_tokens_types,
involved_first_tokens_text,
):
"""
html_path: indicate which page the feature belongs to
input_ids: RT
token_type_ids: RT
attention_mask: RT
xpath_tags_seq: RT
xpath_subs_seq: RT
labels: RT
involved_first_tokens_pos: a list, indicate the positions of the first-tokens in this feature
involved_first_tokens_xpaths: the xpaths of the first-tokens, used to build dict
involved_first_tokens_types: the types of the first-tokens
involved_first_tokens_text: the text of the first tokens
Note that `involved_xxx` are not fixed-length array, so they shouldn't be sent into our model
They are just used for evaluation
"""
self.html_path = html_path
self.input_ids = input_ids
self.token_type_ids = token_type_ids
self.attention_mask = attention_mask
self.xpath_tags_seq = xpath_tags_seq
self.xpath_subs_seq = xpath_subs_seq
self.labels = labels
self.involved_first_tokens_pos = involved_first_tokens_pos
self.involved_first_tokens_xpaths = involved_first_tokens_xpaths
self.involved_first_tokens_types = involved_first_tokens_types
self.involved_first_tokens_text = involved_first_tokens_text
class SwdeDataset(Dataset):
def __init__(self,
all_input_ids,
all_attention_mask,
all_token_type_ids,
all_xpath_tags_seq,
all_xpath_subs_seq,
all_labels=None,
):
'''
print(type(all_input_ids))
print(type(all_attention_mask))
print(type(all_token_type_ids))
print(type(all_xpath_tags_seq))
print(type(all_xpath_subs_seq))
print(type(all_labels))
raise ValueError
'''
self.tensors = [all_input_ids, all_attention_mask, all_token_type_ids,
all_xpath_tags_seq, all_xpath_subs_seq]
if not all_labels is None:
self.tensors.append(all_labels)
def __len__(self):
return len(self.tensors[0])
def __getitem__(self, index):
return tuple(tensor[index] for tensor in self.tensors)
def process_xpath(xpath: str):
if xpath.endswith("/tail"):
xpath = xpath[:-5]
xpath_tags_seq, xpath_subs_seq = [], []
units = xpath.split("/")
for unit in units:
if not unit:
continue
if '[' not in unit:
xpath_tags_seq.append(tags_dict.get(unit, 215))
xpath_subs_seq.append(0)
else:
xx = unit.split('[')
name = xx[0]
id = int(xx[1][:-1])
xpath_tags_seq.append(tags_dict.get(name, 215))
xpath_subs_seq.append(min(id, 1000))
assert len(xpath_subs_seq) == len(xpath_tags_seq)
if len(xpath_tags_seq) > 50:
xpath_tags_seq = xpath_tags_seq[-50:]
xpath_subs_seq = xpath_subs_seq[-50:]
xpath_tags_seq = xpath_tags_seq + [216] * (50 - len(xpath_tags_seq))
xpath_subs_seq = xpath_subs_seq + [1001] * (50 - len(xpath_subs_seq))
return xpath_tags_seq, xpath_subs_seq
def get_swde_features(root_dir, vertical, website, tokenizer,
doc_stride, max_length, prev_nodes, n_pages):
real_max_token_num = max_length - 2 # for cls and sep
padded_xpath_tags_seq = [216] * 50
padded_xpath_subs_seq = [1001] * 50
filename = os.path.join(root_dir, f"{vertical}-{website}-{n_pages}.pickle")
with open(filename, "rb") as f:
raw_data = pickle.load(f)
features = []
for index in tqdm.tqdm(raw_data, desc=f"Processing {vertical}-{website}-{n_pages} features ..."):
html_path = f"{vertical}-{website}-{index}.htm"
needed_docstrings_id_set = set()
for i in range(len(raw_data[index])):
doc_string_type = raw_data[index][i][2]
if doc_string_type == "fixed-node":
continue
# we take i-3, i-2, i-1 into account
needed_docstrings_id_set.add(i)
used_prev = 0
prev_id = i - 1
while prev_id >= 0 and used_prev < prev_nodes:
if raw_data[index][prev_id][0].strip():
needed_docstrings_id_set.add(prev_id)
used_prev += 1
prev_id -= 1
needed_docstrings_id_list = sorted(list(needed_docstrings_id_set))
all_token_ids_seq = []
all_xpath_tags_seq = []
all_xpath_subs_seq = []
token_to_ori_map_seq = []
all_labels_seq = []
first_token_pos = []
first_token_xpaths = []
first_token_type = []
first_token_text = []
for i, needed_id in enumerate(needed_docstrings_id_list):
text = raw_data[index][needed_id][0]
xpath = raw_data[index][needed_id][1]
type = raw_data[index][needed_id][2]
token_ids = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
xpath_tags_seq, xpath_subs_seq = process_xpath(xpath)
all_token_ids_seq += token_ids
all_xpath_tags_seq += [xpath_tags_seq] * len(token_ids)
all_xpath_subs_seq += [xpath_subs_seq] * len(token_ids)
token_to_ori_map_seq += [i] * len(token_ids)
if type == "fixed-node":
all_labels_seq += [-100] * len(token_ids)
else:
# we always use the first token to predict
first_token_pos.append(len(all_labels_seq))
first_token_type.append(type)
first_token_xpaths.append(xpath)
first_token_text.append(text)
all_labels_seq += [constants.ATTRIBUTES_PLUS_NONE[vertical].index(type)] * len(token_ids)
assert len(all_token_ids_seq) == len(all_xpath_tags_seq)
assert len(all_token_ids_seq) == len(all_xpath_subs_seq)
assert len(all_token_ids_seq) == len(all_labels_seq)
# we have all the pos of variable nodes in all_token_ids_seq
# now we need to assign them into each feature
start_pos = 0
flag = False
curr_first_token_index = 0
while True:
# invloved is [ start_pos , end_pos )
token_type_ids = [0] * max_length # that is always this
end_pos = start_pos + real_max_token_num
# add start_pos ~ end_pos as a feature
splited_token_ids_seq = [tokenizer.cls_token_id] + all_token_ids_seq[start_pos:end_pos] + [
tokenizer.sep_token_id]
splited_xpath_tags_seq = [padded_xpath_tags_seq] + all_xpath_tags_seq[start_pos:end_pos] + [
padded_xpath_tags_seq]
splited_xpath_subs_seq = [padded_xpath_subs_seq] + all_xpath_subs_seq[start_pos:end_pos] + [
padded_xpath_subs_seq]
splited_labels_seq = [-100] + all_labels_seq[start_pos:end_pos] + [-100]
# locate first-tokens in them
involved_first_tokens_pos = []
involved_first_tokens_xpaths = []
involved_first_tokens_types = []
involved_first_tokens_text = []
while curr_first_token_index < len(first_token_pos) \
and end_pos > first_token_pos[curr_first_token_index] >= start_pos:
involved_first_tokens_pos.append(
first_token_pos[curr_first_token_index] - start_pos + 1) # +1 for [cls]
involved_first_tokens_xpaths.append(first_token_xpaths[curr_first_token_index])
involved_first_tokens_types.append(first_token_type[curr_first_token_index])
involved_first_tokens_text.append(first_token_text[curr_first_token_index])
curr_first_token_index += 1
# we abort this feature if no useful node in it
if len(involved_first_tokens_pos) == 0:
break
if end_pos >= len(all_token_ids_seq):
flag = True
# which means we need to pad in this feature
current_len = len(splited_token_ids_seq)
splited_token_ids_seq += [tokenizer.pad_token_id] * (max_length - current_len)
splited_xpath_tags_seq += [padded_xpath_tags_seq] * (max_length - current_len)
splited_xpath_subs_seq += [padded_xpath_subs_seq] * (max_length - current_len)
splited_labels_seq += [-100] * (max_length - current_len)
attention_mask = [1] * current_len + [0] * (max_length - current_len)
else:
# no need to pad, the splited seq is exactly with the length `max_length`
assert len(splited_token_ids_seq) == max_length
attention_mask = [1] * max_length
features.append(
SwdeFeature(
html_path=html_path,
input_ids=splited_token_ids_seq,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
xpath_tags_seq=splited_xpath_tags_seq,
xpath_subs_seq=splited_xpath_subs_seq,
labels=splited_labels_seq,
involved_first_tokens_pos=involved_first_tokens_pos,
involved_first_tokens_xpaths=involved_first_tokens_xpaths,
involved_first_tokens_types=involved_first_tokens_types,
involved_first_tokens_text=involved_first_tokens_text,
)
)
start_pos = end_pos - doc_stride
if flag:
break
return features
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.