code
stringlengths
22
1.05M
apis
listlengths
1
3.31k
extract_api
stringlengths
75
3.25M
from dataclasses import dataclass, field from typing import Optional from xsdata.models.datatype import XmlDateTime __NAMESPACE__ = "NISTSchema-SV-IV-atomic-dateTime-minInclusive-3-NS" @dataclass class NistschemaSvIvAtomicDateTimeMinInclusive3: class Meta: name = "NISTSchema-SV-IV-atomic-dateTime-minInclusive-3" namespace = "NISTSchema-SV-IV-atomic-dateTime-minInclusive-3-NS" value: Optional[XmlDateTime] = field( default=None, metadata={ "required": True, "min_inclusive": XmlDateTime(1978, 11, 30, 10, 14, 33), } )
[ "xsdata.models.datatype.XmlDateTime" ]
[((545, 582), 'xsdata.models.datatype.XmlDateTime', 'XmlDateTime', (['(1978)', '(11)', '(30)', '(10)', '(14)', '(33)'], {}), '(1978, 11, 30, 10, 14, 33)\n', (556, 582), False, 'from xsdata.models.datatype import XmlDateTime\n')]
#! /usr/bin/env python3 import argparse import os import sys from arcadeutils import FileBytes, BinaryDiff from naomi import NaomiEEPRom, NaomiRom from naomi.settings import SettingsManager # The root of the repo. root = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")) def main() -> int: # Create the argument parser parser = argparse.ArgumentParser( description="Command-Line Utility for patching different game defaults into a Naomi ROM.", ) parser.add_argument( 'rom', metavar='ROM', type=str, help='The ROM we should generate a patch for.', ) parser.add_argument( 'eeprom', metavar='EEPROM', type=str, help='The EEPROM settings file we should use to generate the patch.', ) parser.add_argument( '--output-file', metavar='BIN', type=str, default=None, help='A different file to output to instead of updating the ROM specified directly.', ) parser.add_argument( '--patch-file', metavar='PATCH', type=str, default=None, help='Write changed bytes to a patch instead of generating a new ROM.', ) parser.add_argument( '--settings-directory', metavar='DIR', type=str, default=os.path.join(root, 'naomi', 'settings', 'definitions'), help='The directory containing settings definition files. Defaults to %(default)s.', ) # Grab what we're doing args = parser.parse_args() if args.output_file and args.patch_file: raise Exception("Cannot write both a patch and a new ROM!") # First, try to open the EEPRom file. with open(args.eeprom, "rb") as fp: eeprom = NaomiEEPRom(fp.read()) manager = SettingsManager(args.settings_directory) defaults = manager.from_serial(eeprom.serial) defaulteeprom = NaomiEEPRom(manager.to_eeprom(defaults)) with open(args.rom, "rb" if args.output_file else "rb+") as fp: # type: ignore data = FileBytes(fp) original = data.clone() rom = NaomiRom(data) defaultbytes = defaulteeprom.game.data updatedbytes = eeprom.game.data if len(defaultbytes) != len(updatedbytes): raise Exception("EEPROM sections aren't the same length!") for exe in [rom.main_executable, rom.test_executable]: for section in exe.sections: start = section.offset end = section.offset + section.length print(f"Searching {start} to {end}...") while True: found = data.search(defaultbytes, start=start, end=end) if found is not None: print(f"Patching offset {found}!") data[found:(found + len(updatedbytes))] = updatedbytes start = found + 1 else: # Done! break if args.patch_file: print(f"Generating EEPROM settings patch and writing to {args.patch_file}.") changes = ["# Description: patch default game settings", *BinaryDiff.diff(original, data)] with open(args.patch_file, "w") as fps: fps.write(os.linesep.join(changes) + os.linesep) else: if args.output_file: print(f"Patched default game EEPROM settings to {args.output_file}.") with open(args.output_file, "wb") as fp: data.write_changes(fp) else: print(f"Patched default game EEPROM settings to {args.rom}.") data.write_changes() return 0 if __name__ == "__main__": sys.exit(main())
[ "argparse.ArgumentParser", "arcadeutils.BinaryDiff.diff", "os.path.realpath", "naomi.settings.SettingsManager", "arcadeutils.FileBytes", "naomi.NaomiRom", "os.linesep.join", "os.path.join" ]
[((373, 497), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Command-Line Utility for patching different game defaults into a Naomi ROM."""'}), "(description=\n 'Command-Line Utility for patching different game defaults into a Naomi ROM.'\n )\n", (396, 497), False, 'import argparse\n'), ((1814, 1854), 'naomi.settings.SettingsManager', 'SettingsManager', (['args.settings_directory'], {}), '(args.settings_directory)\n', (1829, 1854), False, 'from naomi.settings import SettingsManager\n'), ((2074, 2087), 'arcadeutils.FileBytes', 'FileBytes', (['fp'], {}), '(fp)\n', (2083, 2087), False, 'from arcadeutils import FileBytes, BinaryDiff\n'), ((2134, 2148), 'naomi.NaomiRom', 'NaomiRom', (['data'], {}), '(data)\n', (2142, 2148), False, 'from naomi import NaomiEEPRom, NaomiRom\n'), ((270, 296), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (286, 296), False, 'import os\n'), ((1344, 1398), 'os.path.join', 'os.path.join', (['root', '"""naomi"""', '"""settings"""', '"""definitions"""'], {}), "(root, 'naomi', 'settings', 'definitions')\n", (1356, 1398), False, 'import os\n'), ((3218, 3249), 'arcadeutils.BinaryDiff.diff', 'BinaryDiff.diff', (['original', 'data'], {}), '(original, data)\n', (3233, 3249), False, 'from arcadeutils import FileBytes, BinaryDiff\n'), ((3329, 3353), 'os.linesep.join', 'os.linesep.join', (['changes'], {}), '(changes)\n', (3344, 3353), False, 'import os\n')]
from ops.data import OpsClass, OpsField, DszObject, DszCommandObject, cmd_definitions import dsz class AuditingEventData(DszObject, ): def __init__(self, dszpath='', cmdid=None, opsclass=None, parent=None, debug=False): DszObject.__init__(self, dszpath, cmdid, dszauditevent, parent, debug) def __getAuditingSuccess(self): return (self.audit_event_success == 1) auditing_success = property(__getAuditingSuccess) def __getAuditingFailure(self): return (self.audit_event_failure == 1) auditing_failure = property(__getAuditingFailure) if ('audit' not in cmd_definitions): dszauditevent = OpsClass('event', {'audit_event_success': OpsField('audit_event_success', dsz.TYPE_BOOL), 'audit_event_failure': OpsField('audit_event_failure', dsz.TYPE_BOOL), 'category': OpsField('category', dsz.TYPE_STRING), 'categorynative': OpsField('categorynative', dsz.TYPE_STRING), 'subcategory': OpsField('subcategory', dsz.TYPE_STRING), 'subcategorynative': OpsField('subcategorynative', dsz.TYPE_STRING), 'categoryguid': OpsField('categoryguid', dsz.TYPE_STRING), 'subcategoryguid': OpsField('subcategoryguid', dsz.TYPE_STRING)}, AuditingEventData, single=False) audit = OpsClass('status', {'event': dszauditevent, 'audit_mode': OpsField('audit_mode', dsz.TYPE_BOOL), 'audit_status_avail': OpsField('audit_status_avail', dsz.TYPE_BOOL)}, DszObject) auditcommand = OpsClass('audit', {'status': audit}, DszCommandObject) cmd_definitions['audit'] = auditcommand
[ "ops.data.OpsField", "ops.data.DszObject.__init__", "ops.data.OpsClass" ]
[((1407, 1461), 'ops.data.OpsClass', 'OpsClass', (['"""audit"""', "{'status': audit}", 'DszCommandObject'], {}), "('audit', {'status': audit}, DszCommandObject)\n", (1415, 1461), False, 'from ops.data import OpsClass, OpsField, DszObject, DszCommandObject, cmd_definitions\n'), ((235, 305), 'ops.data.DszObject.__init__', 'DszObject.__init__', (['self', 'dszpath', 'cmdid', 'dszauditevent', 'parent', 'debug'], {}), '(self, dszpath, cmdid, dszauditevent, parent, debug)\n', (253, 305), False, 'from ops.data import OpsClass, OpsField, DszObject, DszCommandObject, cmd_definitions\n'), ((681, 727), 'ops.data.OpsField', 'OpsField', (['"""audit_event_success"""', 'dsz.TYPE_BOOL'], {}), "('audit_event_success', dsz.TYPE_BOOL)\n", (689, 727), False, 'from ops.data import OpsClass, OpsField, DszObject, DszCommandObject, cmd_definitions\n'), ((752, 798), 'ops.data.OpsField', 'OpsField', (['"""audit_event_failure"""', 'dsz.TYPE_BOOL'], {}), "('audit_event_failure', dsz.TYPE_BOOL)\n", (760, 798), False, 'from ops.data import OpsClass, OpsField, DszObject, DszCommandObject, cmd_definitions\n'), ((812, 849), 'ops.data.OpsField', 'OpsField', (['"""category"""', 'dsz.TYPE_STRING'], {}), "('category', dsz.TYPE_STRING)\n", (820, 849), False, 'from ops.data import OpsClass, OpsField, DszObject, DszCommandObject, cmd_definitions\n'), ((869, 912), 'ops.data.OpsField', 'OpsField', (['"""categorynative"""', 'dsz.TYPE_STRING'], {}), "('categorynative', dsz.TYPE_STRING)\n", (877, 912), False, 'from ops.data import OpsClass, OpsField, DszObject, DszCommandObject, cmd_definitions\n'), ((929, 969), 'ops.data.OpsField', 'OpsField', (['"""subcategory"""', 'dsz.TYPE_STRING'], {}), "('subcategory', dsz.TYPE_STRING)\n", (937, 969), False, 'from ops.data import OpsClass, OpsField, DszObject, DszCommandObject, cmd_definitions\n'), ((992, 1038), 'ops.data.OpsField', 'OpsField', (['"""subcategorynative"""', 'dsz.TYPE_STRING'], {}), "('subcategorynative', dsz.TYPE_STRING)\n", (1000, 1038), False, 'from ops.data import OpsClass, OpsField, DszObject, DszCommandObject, cmd_definitions\n'), ((1056, 1097), 'ops.data.OpsField', 'OpsField', (['"""categoryguid"""', 'dsz.TYPE_STRING'], {}), "('categoryguid', dsz.TYPE_STRING)\n", (1064, 1097), False, 'from ops.data import OpsClass, OpsField, DszObject, DszCommandObject, cmd_definitions\n'), ((1118, 1162), 'ops.data.OpsField', 'OpsField', (['"""subcategoryguid"""', 'dsz.TYPE_STRING'], {}), "('subcategoryguid', dsz.TYPE_STRING)\n", (1126, 1162), False, 'from ops.data import OpsClass, OpsField, DszObject, DszCommandObject, cmd_definitions\n'), ((1268, 1305), 'ops.data.OpsField', 'OpsField', (['"""audit_mode"""', 'dsz.TYPE_BOOL'], {}), "('audit_mode', dsz.TYPE_BOOL)\n", (1276, 1305), False, 'from ops.data import OpsClass, OpsField, DszObject, DszCommandObject, cmd_definitions\n'), ((1329, 1374), 'ops.data.OpsField', 'OpsField', (['"""audit_status_avail"""', 'dsz.TYPE_BOOL'], {}), "('audit_status_avail', dsz.TYPE_BOOL)\n", (1337, 1374), False, 'from ops.data import OpsClass, OpsField, DszObject, DszCommandObject, cmd_definitions\n')]
from json import dumps from werkzeug.security import generate_password_hash, check_password_hash from . import db from .utils import timestamp class User(db.Model): """The User model""" __tablename__ = 'users' id = db.Column(db.Integer, primary_key=True) email = db.Column(db.String(256), nullable=False, unique=True) created_at = db.Column(db.Integer, default=timestamp) password_hash = db.Column(db.String(256), nullable=False) def verify_password(self, password): return check_password_hash(self.password_hash, password) def to_dict(self): """Export user to a dictionary.""" return { 'id': self.id, 'email': self.email, 'created_at': self.created_at } @staticmethod def create(email, password): user = User() user.password_hash = generate_password_hash(password) user.email = email return user @staticmethod def get_user(email, password): for user in User.query.all(): if user.email == email and user.verify_password(password): return user def __repr__(self): return dumps(self.to_dict()) @property def is_authenticated(self): return True @property def is_active(self): return True @property def is_anonymous(self): return False def get_id(self): return str(self.id)
[ "werkzeug.security.check_password_hash", "werkzeug.security.generate_password_hash" ]
[((532, 581), 'werkzeug.security.check_password_hash', 'check_password_hash', (['self.password_hash', 'password'], {}), '(self.password_hash, password)\n', (551, 581), False, 'from werkzeug.security import generate_password_hash, check_password_hash\n'), ((894, 926), 'werkzeug.security.generate_password_hash', 'generate_password_hash', (['password'], {}), '(password)\n', (916, 926), False, 'from werkzeug.security import generate_password_hash, check_password_hash\n')]
from __future__ import print_function import csv import sys from django.contrib.auth import authenticate from django.contrib.auth import get_user_model from django.core.management.base import BaseCommand User = get_user_model() class Command(BaseCommand): """ Format CSV: name,email,password """ help = 'Creates volunteers accounts' def add_arguments(self, parser): parser.add_argument('csv_filename', default=False, help='csv filename') def handle(self, *args, **options): with open(options['csv_filename']) as csv_f: for row in csv.reader(csv_f): name = row[0] email = row[1] password = row[2] try: user = User.objects.filter(email=email).first() if not user: print('Creating user {0}.'.format(email)) user = User.objects.create_user(name=name, email=email) user.set_password(password) else: print('Updating permissions for user {0}.'.format(email)) user.is_volunteer = True user.save() assert authenticate(email=email, password=password) print('User {0} successfully created.'.format(email)) except: print('There was a problem creating the user: {0}. Error: {1}.' .format(email, sys.exc_info()[1]))
[ "csv.reader", "django.contrib.auth.get_user_model", "django.contrib.auth.authenticate", "sys.exc_info" ]
[((214, 230), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (228, 230), False, 'from django.contrib.auth import get_user_model\n'), ((646, 663), 'csv.reader', 'csv.reader', (['csv_f'], {}), '(csv_f)\n', (656, 663), False, 'import csv\n'), ((1293, 1337), 'django.contrib.auth.authenticate', 'authenticate', ([], {'email': 'email', 'password': 'password'}), '(email=email, password=password)\n', (1305, 1337), False, 'from django.contrib.auth import authenticate\n'), ((1564, 1578), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1576, 1578), False, 'import sys\n')]
from django.conf import settings from django.db import models class HouseInfo(models.Model): class Meta: db_table = "houseinfo" houseID = models.CharField(primary_key=True, max_length=50) community = models.CharField(max_length=50) decoration = models.CharField(max_length=50) direction = models.CharField(max_length=50) floor = models.CharField(max_length=50) followinfo = models.CharField(max_length=50) direction = models.CharField(max_length=50) housetype = models.CharField(max_length=50) link = models.CharField(max_length=50) square = models.CharField(max_length=50) taxtype = models.CharField(max_length=50) title = models.CharField(max_length=50) totalPrice = models.CharField(max_length=50) unitPrice = models.CharField(max_length=50) validdate = models.DateTimeField('validdate') years = models.CharField(max_length=50)
[ "django.db.models.CharField", "django.db.models.DateTimeField" ]
[((157, 206), 'django.db.models.CharField', 'models.CharField', ([], {'primary_key': '(True)', 'max_length': '(50)'}), '(primary_key=True, max_length=50)\n', (173, 206), False, 'from django.db import models\n'), ((223, 254), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (239, 254), False, 'from django.db import models\n'), ((272, 303), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (288, 303), False, 'from django.db import models\n'), ((320, 351), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (336, 351), False, 'from django.db import models\n'), ((364, 395), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (380, 395), False, 'from django.db import models\n'), ((413, 444), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (429, 444), False, 'from django.db import models\n'), ((461, 492), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (477, 492), False, 'from django.db import models\n'), ((509, 540), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (525, 540), False, 'from django.db import models\n'), ((552, 583), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (568, 583), False, 'from django.db import models\n'), ((597, 628), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (613, 628), False, 'from django.db import models\n'), ((643, 674), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (659, 674), False, 'from django.db import models\n'), ((687, 718), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (703, 718), False, 'from django.db import models\n'), ((736, 767), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (752, 767), False, 'from django.db import models\n'), ((784, 815), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (800, 815), False, 'from django.db import models\n'), ((832, 865), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""validdate"""'], {}), "('validdate')\n", (852, 865), False, 'from django.db import models\n'), ((878, 909), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (894, 909), False, 'from django.db import models\n')]
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. '''This module includes a set of optimizers for updating model parameters. It replaces the old optimizers from optimizer.py''' from singa import tensor from singa import autograd from . import singa_wrap as singa class Optimizer(object): r"""Base optimizer. Args: config (Dict): specify the default values of configurable variables. """ def __init__(self, config): self.default_config = config self.iter = 0 self.param2config = {} self.param2state = {} def update(self, param, grad): r"""Update the param values with given gradients. Args: param(Tensor): param values to be updated in-place grad(Tensor): param gradients; the values may be updated in this function; do not use it anymore """ pass def step(self): r"""To increment the step counter""" self.iter += 1 def register(self, param_group, config): for param in param_group: assert param not in self.param2config, 'param is already registered' self.param2config[param] = config def load(self): pass def save(self): pass class SGD(Optimizer): r"""Implements stochastic gradient descent (optionally with momentum). Nesterov momentum is based on the formula from `On the importance of initialization and momentum in deep learning`__. Args: lr(float): learning rate momentum(float, optional): momentum factor(default: 0) weight_decay(float, optional): weight decay(L2 penalty)(default: 0) dampening(float, optional): dampening for momentum(default: 0) nesterov(bool, optional): enables Nesterov momentum(default: False) Example: >> > from singa import opt >> > optimizer = opt.SGD(lr=0.1, momentum=0.9) >> > optimizer.update() __ http: // www.cs.toronto.edu / %7Ehinton / absps / momentum.pdf .. note:: The implementation of SGD with Momentum / Nesterov subtly differs from Sutskever et. al. and implementations in some other frameworks. Considering the specific case of Momentum, the update can be written as .. math:: v = \rho * v + g \\ p = p - lr * v where p, g, v and: math: `\rho` denote the parameters, gradient, velocity, and momentum respectively. This is in contrast to Sutskever et. al. and other frameworks which employ an update of the form .. math:: v = \rho * v + lr * g \\ p = p - v The Nesterov version is analogously modified. """ def __init__(self, lr=0.1, momentum=0, dampening=0, weight_decay=0, nesterov=False): if momentum < 0.0: raise ValueError("Invalid momentum value: {}".format(momentum)) if weight_decay < 0.0: raise ValueError( "Invalid weight_decay value: {}".format(weight_decay)) defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov) if nesterov and (momentum <= 0 or dampening != 0): raise ValueError( "Nesterov momentum requires a momentum and zero dampening") super(SGD, self).__init__(defaults) def update(self, param, grad): """Performs a single optimization step. Arguments: param(Tensor): param values to be update in-place grad(Tensor): param gradients; the values may be updated in this function; cannot use it anymore """ assert param.shape == grad.shape, ("shape mismatch", param.shape, grad.shape) group = self.default_config if param in self.param2config: group = self.param2config[param] weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] if weight_decay != 0: singa.Axpy(weight_decay, param.data, grad.data) if momentum != 0: if param not in self.param2state: self.param2state[param] = {} param_state = self.param2state[param] if 'momentum_buffer' not in param_state: buf = param_state[ 'momentum_buffer'] = tensor.zeros_like(param) buf *= momentum singa.Axpy(1.0, grad.data, buf.data) else: buf = param_state['momentum_buffer'] buf *= momentum singa.Axpy(1.0 - dampening, grad.data, buf.data) if nesterov: singa.Axpy(momentum, buf.data, grad.data) else: grad = buf singa.Axpy(-group['lr'], grad.data, param.data) def backward_and_update(self, loss): for p, g in autograd.backward(loss): self.update(p, g) class DistOpt(object): def __init__(self, opt=SGD(), nccl_id=None, gpu_num=None, gpu_per_node=None, buffSize=4194304): # The class is designed to wrap an optimizer to do disttributed training. # opt: The optimizer to be wrapped. nDev: number of devices(GPUs) a # process will control/use. # nccl_id: an nccl id holder object for a unique communication id # gpu_num: the GPU id in a single node # gpu_per_node: the number of GPUs in a single node # buffSize: the buffSize used in nccl communicator, default is 16 MB # world_size: total number of processes. # rank_in_local: local rank of a process on the current node. # rank_in_global: global rank of a process self.opt = opt if nccl_id is None: # constructure for application using MPI self.communicator = singa.Communicator(buffSize) else: # constructor for application using python multi-process module self.communicator = singa.Communicator(gpu_num, gpu_per_node, nccl_id, buffSize) self.world_size = self.communicator.totalMPIRanksInGlobal self.rank_in_local = self.communicator.MPIRankInLocal self.rank_in_global = self.communicator.MPIRankInGlobal def update(self, param, grad): grad /= self.world_size self.opt.update(param, grad) def all_reduce(self, tensor): self.communicator.synch(tensor) def fused_all_reduce(self, tensor): tensor = singa.VecTensor(tensor) self.communicator.fusedSynch(tensor) def all_reduce_half(self, tensor): self.communicator.synchHalf(tensor) def fused_all_reduce_half(self, tensor): tensor = singa.VecTensor(tensor) self.communicator.fusedSynchHalf(tensor) def sparsification(self, tensor, accumulation, spars, topK): if accumulation is None: self.communicator.sparsification(tensor, spars, topK) else: self.communicator.sparsification(tensor, accumulation, spars, topK) def fused_sparsification(self, tensor, accumulation, spars, topK): tensor = singa.VecTensor(tensor) if accumulation is None: self.communicator.fusedSparsification(tensor, spars, topK) else: self.communicator.fusedSparsification(tensor, accumulation, spars, topK) def wait(self): self.communicator.wait() def backward_and_update(self, loss, threshold = 2097152): # backward propagation from the loss and parameter update # it applies tensor fusion which fuses all the tensor smaller than the threshold value plist = [] acc = 0 glist = [] for p, g in autograd.backward(loss): if g.size() > threshold: # larger than threshold -> reduced directly self.all_reduce(g.data) else: # smaller than threshold -> accumulate glist.append(g.data) acc += g.size() if (acc > threshold): self.fused_all_reduce(glist) acc = 0 glist = [] plist.append((p, g)) if glist: self.fused_all_reduce(glist) self.wait() for p, g in plist: self.update(p, g) def backward_and_update_half(self, loss, threshold = 2097152, clipping = False, clip_Value = 100): # THIS IS A EXPERIMENTAL FUNCTION FOR RESEARCH PURPOSE: # It converts the gradients to 16 bits half precision format before allreduce # To assist training, this functions provide an option to perform gradient clipping plist = [] acc = 0 glist = [] for p, g in autograd.backward(loss): if clipping: g = autograd.clip(g, -clip_Value, clip_Value) if g.size() > threshold: # larger than threshold -> reduced directly self.all_reduce_half(g.data) else: # smaller than threshold -> accumulate glist.append(g.data) acc += g.size() if (acc > threshold): self.fused_all_reduce_half(glist) acc = 0 glist = [] plist.append((p, g)) if glist: self.fused_all_reduce_half(glist) self.wait() for p, g in plist: self.update(p, g) def backward_and_partial_update(self, loss, threshold = 2097152): # THIS IS A EXPERIMENTAL FUNCTION FOR RESEARCH PURPOSE: # It performs asychronous training where one parameter partition is all-reduced per iteration # The size of the parameter partition depends on the threshold value # self.partial is the counter to determine which partition to perform all-reduce if not hasattr(self, "partial"): self.partial = 0 self.partial += 1 k = 0 plist = [] acc = 0 tenlist = [] reduced = [] for p, g in autograd.backward(loss): # every parameters update locally self.opt.update(p, g) # then do the partial parameter sychronization if p.size() > threshold: # larger than threshold -> reduced directly # k is the partition number of the full gradient set k += 1 if (k == self.partial): self.all_reduce(p.data) reduced.append(p) else: # smaller than threshold -> accumulate plist.append(p.data) tenlist.append(p) acc += p.size() if (acc > threshold): k += 1 if (k == self.partial): self.fused_all_reduce(plist) reduced = tenlist acc = 0 plist = [] tenlist = [] if plist: k += 1 if (k == self.partial): self.fused_all_reduce(plist) reduced = tenlist self.wait() # the all-reduced parameters needed to be averaged for r in reduced: r /= self.world_size # the counter returns to zero after a cycle of partial update if (k == self.partial): self.partial = 0 def backward_and_spars_update(self, loss, threshold = 2097152, spars = 0.05, topK = False, corr = True): r"""THIS IS A EXPERIMENTAL FUNCTION FOR RESEARCH PURPOSE: Performs backward propagation from the loss and parameter update with sparsification. It fuses the tensors with size smaller than the threshold value to reduce network latency, as well as using sparsification scheme to transfer only gradient elements which are significant. Arguments: loss(Tensor): loss is the objective function of the deep learning model optimization, e.g. for classification problem it can be the output of the softmax_cross_entropy function. threshold(int): threshold is a parameter to control performance in fusing the tensors. For the tensors of sizes smaller than threshold, they are to be accumulated and fused before the all reduce operation. For the tensors of its size larger than the threshold value, they are to be reduced directly without fusion. spars(float): a parameter to control sparsity as defined below topK(bool): When topK is False, it sparsifies the gradient with absolute value >= sparsWhen topK is True, it sparsifies a fraction of total gradient number equals to spars, E.g. when spars = 0.01, it sparsifies 1 % of the total gradient elements corr(bool): whether to use the local accumulate gradient for correction Attributes: self.sparsInit: A counter to determine which partition to perform all-reduce. self.gradAccumulation: Local gradient accumulation """ if ((not hasattr(self, "sparsInit")) and corr): self.gradAccumulation = [] self.sparsInit = False plist = [] acc = 0 k = -1 glist = [] for p, g in autograd.backward(loss): if g.size() > threshold: # larger than threshold -> reduced directly k += 1 if (corr and (not self.sparsInit)): # create a tensor for the gradient accumulation self.gradAccumulation.append(tensor.Tensor((g.size(),), p.device, p.dtype)) self.gradAccumulation[k].set_value(0.0) if corr: self.sparsification(g.data, self.gradAccumulation[k].data, spars, topK) else: self.sparsification(g.data, None, spars, topK) else: # smaller than threshold -> accumulate glist.append(g.data) acc += g.size() if (acc > threshold): k += 1 if (corr and (not self.sparsInit)): # create a tensor for the gradient accumulation self.gradAccumulation.append(tensor.Tensor((acc,), p.device, p.dtype)) self.gradAccumulation[k].set_value(0.0) if corr: self.fused_sparsification(glist, self.gradAccumulation[k].data, spars, topK) else: self.fused_sparsification(glist, None, spars, topK) acc = 0 glist = [] plist.append((p, g)) if glist: k += 1 if (corr and (not self.sparsInit)): # create a tensor for the gradient accumulation self.gradAccumulation.append(tensor.Tensor((acc,), p.device, p.dtype)) self.gradAccumulation[k].set_value(0.0) if corr: self.fused_sparsification(glist, self.gradAccumulation[k].data, spars, topK) else: self.fused_sparsification(glist, None, spars, topK) self.wait() for p, g in plist: self.update(p, g) self.sparsInit = True
[ "singa.tensor.Tensor", "singa.tensor.zeros_like", "singa.autograd.clip", "singa.autograd.backward" ]
[((5797, 5820), 'singa.autograd.backward', 'autograd.backward', (['loss'], {}), '(loss)\n', (5814, 5820), False, 'from singa import autograd\n'), ((8597, 8620), 'singa.autograd.backward', 'autograd.backward', (['loss'], {}), '(loss)\n', (8614, 8620), False, 'from singa import autograd\n'), ((9658, 9681), 'singa.autograd.backward', 'autograd.backward', (['loss'], {}), '(loss)\n', (9675, 9681), False, 'from singa import autograd\n'), ((11011, 11034), 'singa.autograd.backward', 'autograd.backward', (['loss'], {}), '(loss)\n', (11028, 11034), False, 'from singa import autograd\n'), ((14389, 14412), 'singa.autograd.backward', 'autograd.backward', (['loss'], {}), '(loss)\n', (14406, 14412), False, 'from singa import autograd\n'), ((5273, 5297), 'singa.tensor.zeros_like', 'tensor.zeros_like', (['param'], {}), '(param)\n', (5290, 5297), False, 'from singa import tensor\n'), ((9728, 9769), 'singa.autograd.clip', 'autograd.clip', (['g', '(-clip_Value)', 'clip_Value'], {}), '(g, -clip_Value, clip_Value)\n', (9741, 9769), False, 'from singa import autograd\n'), ((16048, 16088), 'singa.tensor.Tensor', 'tensor.Tensor', (['(acc,)', 'p.device', 'p.dtype'], {}), '((acc,), p.device, p.dtype)\n', (16061, 16088), False, 'from singa import tensor\n'), ((15424, 15464), 'singa.tensor.Tensor', 'tensor.Tensor', (['(acc,)', 'p.device', 'p.dtype'], {}), '((acc,), p.device, p.dtype)\n', (15437, 15464), False, 'from singa import tensor\n')]
# Copyright 2017-2018 Intel Corporation., Tieto # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """VSPERF VPP implementation using DPDK and vhostuser vports """ import os import copy import re import pexpect from src.dpdk import dpdk from conf import settings as S from vswitches.vswitch import IVSwitch from tools import tasks from tools.version import Version # pylint: disable=too-many-public-methods class VppDpdkVhost(IVSwitch, tasks.Process): """ VPP with DPDK support """ _proc_name = 'vpp' _bridge_idx_counter = 100 def __init__(self): """See IVswitch for general description """ super().__init__() name, ext = os.path.splitext(S.getValue('LOG_FILE_VPP')) rename_vpplf = "{name}_{uid}{ex}".format(name=name, uid=S.getValue( 'LOG_TIMESTAMP'), ex=ext) self._logfile = os.path.join(S.getValue('RESULTS_PATH'), rename_vpplf) self._expect = r'vpp#' self._cmd_template = ['sudo', '-E', S.getValue('TOOLS')['vpp']] self._phy_ports = [] self._virt_ports = [] self._vpp_ctl = ['sudo', S.getValue('TOOLS')['vppctl']] # configure DPDK NICs tmp_args = copy.deepcopy(S.getValue('VSWITCH_VPP_ARGS')) if 'dpdk' not in tmp_args: tmp_args['dpdk'] = [] # override socket-mem settings for tmp_arg in tmp_args['dpdk']: if tmp_arg.startswith('socket-mem'): tmp_args['dpdk'].remove(tmp_arg) tmp_args['dpdk'].append('socket-mem ' + ','.join(S.getValue('DPDK_SOCKET_MEM'))) # create directory for vhostuser sockets if needed if not os.path.exists(S.getValue('TOOLS')['ovs_var_tmp']): tasks.run_task(['sudo', 'mkdir', '-p', S.getValue('TOOLS')['ovs_var_tmp']], self._logger) # configure path to the plugins tmp_args['plugin_path'] = S.getValue('TOOLS')['vpp_plugin_path'] # cli sock file must be used for VPP 17.10 and newer if S.getValue('VSWITCH_VPP_CLI_SOCK'): self._vpp_ctl += ['-s', S.getValue('VSWITCH_VPP_CLI_SOCK')] tmp_args['unix'].append('cli-listen {}'.format( S.getValue('VSWITCH_VPP_CLI_SOCK'))) mqs = int(S.getValue('VSWITCH_DPDK_MULTI_QUEUES')) tmp_rxqs = '' if mqs: tmp_rxqs = " {{ num-rx-queues {} }}".format(mqs) # configure physical ports for nic in S.getValue('NICS'): tmp_args['dpdk'].append("dev {}{}".format(nic['pci'], tmp_rxqs)) self._vswitch_args = self._process_vpp_args(tmp_args) def _get_nic_info(self, key='Name'): """Read NIC info from VPP and return NIC details in a dictionary indexed by given ``key`` :param key: Name of the key to be used for indexing result dictionary :returns: Dictionary with NIC infos including their PCI addresses """ result = {} output = self.run_vppctl(['show', 'hardware', 'brief']) # parse output and store basic info about NICS ifaces = output[0].split('\n') keys = ifaces[0].split() keys.append('Pci') keyidx = keys.index(key) for iface in ifaces[1:]: tmpif = iface.split() if not tmpif: continue # get PCI address of given interface output = self.run_vppctl(['show', 'hardware', tmpif[1], 'detail']) match = re.search(r'pci address:\s*([\d:\.]+)', output[0]) if match: # normalize PCI address, e.g. 0000:05:10.01 => 0000:05:10.1 tmp_pci = match.group(1).split('.') tmp_pci[1] = str(int(tmp_pci[1])) tmpif.append('.'.join(tmp_pci)) else: tmpif.append(None) # store only NICs with reasonable index if tmpif[keyidx] is not None: result[tmpif[keyidx]] = dict(zip(keys, tmpif)) return result def _process_vpp_args(self, args): """Produce VPP CLI args from input dictionary ``args`` """ cli_args = [] for cfg_key in args: cli_args.append(cfg_key) if isinstance(args[cfg_key], str): cli_args.append(args[cfg_key]) else: cli_args.append("{{ {} }}".format(' '.join(args[cfg_key]))) self._logger.debug("VPP CLI args: %s", cli_args) return cli_args def start(self): """Activates DPDK kernel modules and starts VPP :raises: pexpect.EOF, pexpect.TIMEOUT """ dpdk.init() self._logger.info("Starting VPP...") self._cmd = self._cmd_template + self._vswitch_args try: tasks.Process.start(self) self.relinquish() except (pexpect.EOF, pexpect.TIMEOUT) as exc: self._logger.error("Exception during VPP start.") raise exc self._logger.info("VPP...Started.") def stop(self): """See IVswitch for general description Kills VPP and removes DPDK kernel modules. """ self._logger.info("Terminating VPP...") self.kill() self._logger.info("VPP...Terminated.") dpdk.cleanup() def kill(self, signal='-15', sleep=10): """See IVswitch for general description Kills ``vpp`` """ if self.is_running(): # try to get VPP pid output = self.run_vppctl(['show', 'version', 'verbose']) match = re.search(r'Current PID:\s*([0-9]+)', output[0]) if match: vpp_pid = match.group(1) tasks.terminate_task(vpp_pid, logger=self._logger) # in case, that pid was not detected or sudo envelope # has not been terminated yet tasks.Process.kill(self, signal, sleep) def get_version(self): """See IVswitch for general description """ versions = [] output = self.run_vppctl(['show', 'version', 'verbose']) if output[1]: self._logger.warning("VPP version can not be read!") return versions match = re.search(r'Version:\s*(.+)', output[0]) if match: versions.append(Version(S.getValue('VSWITCH'), match.group(1))) match = re.search(r'DPDK Version:\s*DPDK (.+)', output[0]) if match: versions.append(Version('dpdk', match.group(1))) return versions def add_switch(self, switch_name, dummy_params=None): """See IVswitch for general description """ # pylint: disable=unused-argument if switch_name in self._switches: self._logger.warning("switch %s already exists...", switch_name) else: self._switches[switch_name] = self._bridge_idx_counter self._bridge_idx_counter += 1 def del_switch(self, switch_name): """See IVswitch for general description """ if switch_name in self._switches: del self._switches[switch_name] def add_phy_port(self, dummy_switch_name): """See IVswitch for general description :raises: RuntimeError """ # pylint: disable=unused-argument # get list of physical interfaces with PCI addresses vpp_nics = self._get_nic_info(key='Pci') # check if there are any NICs left if len(self._phy_ports) >= len(S.getValue('NICS')): raise RuntimeError("Can't add phy port! There are only {} ports defined " "by WHITELIST_NICS parameter!".format(len(S.getValue('NICS')))) nic = S.getValue('NICS')[len(self._phy_ports)] if not nic['pci'] in vpp_nics: raise RuntimeError('VPP cannot access nic with PCI address: {}'.format(nic['pci'])) nic_name = vpp_nics[nic['pci']]['Name'] self._phy_ports.append(nic_name) self.run_vppctl(['set', 'int', 'state', nic_name, 'up']) return (nic_name, vpp_nics[nic['pci']]['Idx']) def add_vport(self, dummy_switch_name): """See IVswitch for general description """ # pylint: disable=unused-argument socket_name = S.getValue('TOOLS')['ovs_var_tmp'] + 'dpdkvhostuser' + str(len(self._virt_ports)) if S.getValue('VSWITCH_VHOSTUSER_SERVER_MODE'): mode = ['server'] else: mode = [] output = self.run_vppctl(['create', 'vhost-user', 'socket', socket_name] + mode + S.getValue('VSWITCH_VPP_VHOSTUSER_ARGS')) if output[0].find('returned') >= 0: raise RuntimeError('VPP VhostUser interface cannot be created.') nic_name = output[0].strip() self._virt_ports.append(nic_name) self.run_vppctl(['set', 'int', 'state', nic_name, 'up']) return (nic_name, None) def del_port(self, switch_name, port_name): """See IVswitch for general description """ if port_name in self._phy_ports: self.run_vppctl(['set', 'int', 'state', port_name, 'down']) self._phy_ports.remove(port_name) elif port_name in self._virt_ports: self.run_vppctl(['set', 'int', 'state', port_name, 'down']) self.run_vppctl(['delete', 'vhost-user', port_name]) self._virt_ports.remove(port_name) else: self._logger.warning("Port %s is not configured.", port_name) def add_l2patch(self, port1, port2): """Create l2patch connection between given ports """ self.run_vppctl(['test', 'l2patch', 'rx', port1, 'tx', port2]) def add_xconnect(self, port1, port2): """Create l2patch connection between given ports """ self.run_vppctl(['set', 'interface', 'l2', 'xconnect', port1, port2]) def add_bridge(self, switch_name, port1, port2): """Add given ports to bridge ``switch_name`` """ self.run_vppctl(['set', 'interface', 'l2', 'bridge', port1, str(self._switches[switch_name])]) self.run_vppctl(['set', 'interface', 'l2', 'bridge', port2, str(self._switches[switch_name])]) def add_connection(self, switch_name, port1, port2, traffic=None): """See IVswitch for general description :raises: RuntimeError """ if traffic: self._logger.warning("VPP add_connection() does not support 'traffic' options.") mode = S.getValue('VSWITCH_VPP_L2_CONNECT_MODE') if mode == 'l2patch': self.add_l2patch(port1, port2) elif mode == 'xconnect': self.add_xconnect(port1, port2) elif mode == 'bridge': self.add_bridge(switch_name, port1, port2) else: raise RuntimeError('VPP: Unsupported l2 connection mode detected %s' % mode) def del_l2patch(self, port1, port2): """Remove l2patch connection between given ports :param port1: port to be used in connection :param port2: port to be used in connection """ self.run_vppctl(['test', 'l2patch', 'rx', port1, 'tx', port2, 'del']) def del_xconnect(self, port1, port2): """Remove xconnect connection between given ports """ self.run_vppctl(['set', 'interface', 'l3', port1]) self.run_vppctl(['set', 'interface', 'l3', port2]) def del_bridge(self, _dummy_switch_name, port1, port2): """Remove given ports from the bridge """ self.run_vppctl(['set', 'interface', 'l3', port1]) self.run_vppctl(['set', 'interface', 'l3', port2]) def del_connection(self, switch_name, port1=None, port2=None): """See IVswitch for general description :raises: RuntimeError """ if port1 and port2: mode = S.getValue('VSWITCH_VPP_L2_CONNECT_MODE') if mode == 'l2patch': self.del_l2patch(port1, port2) elif mode == 'xconnect': self.del_xconnect(port1, port2) elif mode == 'bridge': self.del_bridge(switch_name, port1, port2) else: raise RuntimeError('VPP: Unsupported l2 connection mode detected %s' % mode) def dump_l2patch(self): """Dump l2patch connections """ self.run_vppctl(['show', 'l2patch']) def dump_xconnect(self): """Dump l2 xconnect connections """ self.run_vppctl(['show', 'mode'] + self._phy_ports + self._virt_ports) def dump_bridge(self, switch_name): """Show bridge details :param switch_name: switch on which to operate """ self.run_vppctl(['show', 'bridge-domain', str(self._switches[switch_name]), 'int']) def dump_connections(self, switch_name): """See IVswitch for general description :raises: RuntimeError """ mode = S.getValue('VSWITCH_VPP_L2_CONNECT_MODE') if mode == 'l2patch': self.dump_l2patch() elif mode == 'xconnect': self.dump_xconnect() elif mode == 'bridge': self.dump_bridge(switch_name) else: raise RuntimeError('VPP: Unsupported l2 connection mode detected %s' % mode) def run_vppctl(self, args, check_error=False): """Run ``vppctl`` with supplied arguments. :param args: Arguments to pass to ``vppctl`` :param check_error: Throw exception on error :return: None """ cmd = self._vpp_ctl + args return tasks.run_task(cmd, self._logger, 'Running vppctl...', check_error) # # Validate methods # def validate_add_switch(self, _dummy_result, switch_name, _dummy_params=None): """Validate - Create a new logical switch with no ports """ return switch_name in self._switches def validate_del_switch(self, _dummy_result, switch_name): """Validate removal of switch """ return not self.validate_add_switch(_dummy_result, switch_name) def validate_add_phy_port(self, result, _dummy_switch_name): """ Validate that physical port was added to bridge. """ return result[0] in self._phy_ports def validate_add_vport(self, result, _dummy_switch_name): """ Validate that virtual port was added to bridge. """ return result[0] in self._virt_ports def validate_del_port(self, _dummy_result, _dummy_switch_name, port_name): """ Validate that port_name was removed from bridge. """ return not (port_name in self._phy_ports or port_name in self._virt_ports) # pylint: disable=no-self-use def validate_add_connection(self, _dummy_result, _dummy_switch_name, _dummy_port1, _dummy_port2, _dummy_traffic=None): """ Validate that connection was added """ return True def validate_del_connection(self, _dummy_result, _dummy_switch_name, _dummy_port1, _dummy_port2): """ Validate that connection was deleted """ return True def validate_dump_connections(self, _dummy_result, _dummy_switch_name): """ Validate dump connections call """ return True def validate_run_vppctl(self, result, _dummy_args, _dummy_check_error=False): """validate execution of ``vppctl`` with supplied arguments. """ # there shouldn't be any stderr return not result[1] # # Non implemented methods # def add_route(self, switch_name, network, destination): """See IVswitch for general description """ raise NotImplementedError() def set_tunnel_arp(self, ip_addr, mac_addr, switch_name): """See IVswitch for general description """ raise NotImplementedError() def add_tunnel_port(self, switch_name, remote_ip, tunnel_type='vxlan', params=None): """See IVswitch for general description """ raise NotImplementedError() def get_ports(self, switch_name): """See IVswitch for general description """ raise NotImplementedError()
[ "tools.tasks.run_task", "src.dpdk.dpdk.init", "tools.tasks.Process.kill", "src.dpdk.dpdk.cleanup", "tools.tasks.Process.start", "conf.settings.getValue", "re.search", "tools.tasks.terminate_task" ]
[((2684, 2718), 'conf.settings.getValue', 'S.getValue', (['"""VSWITCH_VPP_CLI_SOCK"""'], {}), "('VSWITCH_VPP_CLI_SOCK')\n", (2694, 2718), True, 'from conf import settings as S\n'), ((3119, 3137), 'conf.settings.getValue', 'S.getValue', (['"""NICS"""'], {}), "('NICS')\n", (3129, 3137), True, 'from conf import settings as S\n'), ((5282, 5293), 'src.dpdk.dpdk.init', 'dpdk.init', ([], {}), '()\n', (5291, 5293), False, 'from src.dpdk import dpdk\n'), ((5921, 5935), 'src.dpdk.dpdk.cleanup', 'dpdk.cleanup', ([], {}), '()\n', (5933, 5935), False, 'from src.dpdk import dpdk\n'), ((6863, 6903), 're.search', 're.search', (['"""Version:\\\\s*(.+)"""', 'output[0]'], {}), "('Version:\\\\s*(.+)', output[0])\n", (6872, 6903), False, 'import re\n'), ((7015, 7065), 're.search', 're.search', (['"""DPDK Version:\\\\s*DPDK (.+)"""', 'output[0]'], {}), "('DPDK Version:\\\\s*DPDK (.+)', output[0])\n", (7024, 7065), False, 'import re\n'), ((8995, 9038), 'conf.settings.getValue', 'S.getValue', (['"""VSWITCH_VHOSTUSER_SERVER_MODE"""'], {}), "('VSWITCH_VHOSTUSER_SERVER_MODE')\n", (9005, 9038), True, 'from conf import settings as S\n'), ((11191, 11232), 'conf.settings.getValue', 'S.getValue', (['"""VSWITCH_VPP_L2_CONNECT_MODE"""'], {}), "('VSWITCH_VPP_L2_CONNECT_MODE')\n", (11201, 11232), True, 'from conf import settings as S\n'), ((13620, 13661), 'conf.settings.getValue', 'S.getValue', (['"""VSWITCH_VPP_L2_CONNECT_MODE"""'], {}), "('VSWITCH_VPP_L2_CONNECT_MODE')\n", (13630, 13661), True, 'from conf import settings as S\n'), ((14261, 14328), 'tools.tasks.run_task', 'tasks.run_task', (['cmd', 'self._logger', '"""Running vppctl..."""', 'check_error'], {}), "(cmd, self._logger, 'Running vppctl...', check_error)\n", (14275, 14328), False, 'from tools import tasks\n'), ((1189, 1215), 'conf.settings.getValue', 'S.getValue', (['"""LOG_FILE_VPP"""'], {}), "('LOG_FILE_VPP')\n", (1199, 1215), True, 'from conf import settings as S\n'), ((1507, 1533), 'conf.settings.getValue', 'S.getValue', (['"""RESULTS_PATH"""'], {}), "('RESULTS_PATH')\n", (1517, 1533), True, 'from conf import settings as S\n'), ((1839, 1869), 'conf.settings.getValue', 'S.getValue', (['"""VSWITCH_VPP_ARGS"""'], {}), "('VSWITCH_VPP_ARGS')\n", (1849, 1869), True, 'from conf import settings as S\n'), ((2572, 2591), 'conf.settings.getValue', 'S.getValue', (['"""TOOLS"""'], {}), "('TOOLS')\n", (2582, 2591), True, 'from conf import settings as S\n'), ((2924, 2963), 'conf.settings.getValue', 'S.getValue', (['"""VSWITCH_DPDK_MULTI_QUEUES"""'], {}), "('VSWITCH_DPDK_MULTI_QUEUES')\n", (2934, 2963), True, 'from conf import settings as S\n'), ((4132, 4184), 're.search', 're.search', (['"""pci address:\\\\s*([\\\\d:\\\\.]+)"""', 'output[0]'], {}), "('pci address:\\\\s*([\\\\d:\\\\.]+)', output[0])\n", (4141, 4184), False, 'import re\n'), ((5426, 5451), 'tools.tasks.Process.start', 'tasks.Process.start', (['self'], {}), '(self)\n', (5445, 5451), False, 'from tools import tasks\n'), ((6216, 6264), 're.search', 're.search', (['"""Current PID:\\\\s*([0-9]+)"""', 'output[0]'], {}), "('Current PID:\\\\s*([0-9]+)', output[0])\n", (6225, 6264), False, 'import re\n'), ((6516, 6555), 'tools.tasks.Process.kill', 'tasks.Process.kill', (['self', 'signal', 'sleep'], {}), '(self, signal, sleep)\n', (6534, 6555), False, 'from tools import tasks\n'), ((8348, 8366), 'conf.settings.getValue', 'S.getValue', (['"""NICS"""'], {}), "('NICS')\n", (8358, 8366), True, 'from conf import settings as S\n'), ((12540, 12581), 'conf.settings.getValue', 'S.getValue', (['"""VSWITCH_VPP_L2_CONNECT_MODE"""'], {}), "('VSWITCH_VPP_L2_CONNECT_MODE')\n", (12550, 12581), True, 'from conf import settings as S\n'), ((1330, 1357), 'conf.settings.getValue', 'S.getValue', (['"""LOG_TIMESTAMP"""'], {}), "('LOG_TIMESTAMP')\n", (1340, 1357), True, 'from conf import settings as S\n'), ((1624, 1643), 'conf.settings.getValue', 'S.getValue', (['"""TOOLS"""'], {}), "('TOOLS')\n", (1634, 1643), True, 'from conf import settings as S\n'), ((1744, 1763), 'conf.settings.getValue', 'S.getValue', (['"""TOOLS"""'], {}), "('TOOLS')\n", (1754, 1763), True, 'from conf import settings as S\n'), ((2756, 2790), 'conf.settings.getValue', 'S.getValue', (['"""VSWITCH_VPP_CLI_SOCK"""'], {}), "('VSWITCH_VPP_CLI_SOCK')\n", (2766, 2790), True, 'from conf import settings as S\n'), ((6344, 6394), 'tools.tasks.terminate_task', 'tasks.terminate_task', (['vpp_pid'], {'logger': 'self._logger'}), '(vpp_pid, logger=self._logger)\n', (6364, 6394), False, 'from tools import tasks\n'), ((8131, 8149), 'conf.settings.getValue', 'S.getValue', (['"""NICS"""'], {}), "('NICS')\n", (8141, 8149), True, 'from conf import settings as S\n'), ((9229, 9269), 'conf.settings.getValue', 'S.getValue', (['"""VSWITCH_VPP_VHOSTUSER_ARGS"""'], {}), "('VSWITCH_VPP_VHOSTUSER_ARGS')\n", (9239, 9269), True, 'from conf import settings as S\n'), ((2208, 2237), 'conf.settings.getValue', 'S.getValue', (['"""DPDK_SOCKET_MEM"""'], {}), "('DPDK_SOCKET_MEM')\n", (2218, 2237), True, 'from conf import settings as S\n'), ((2330, 2349), 'conf.settings.getValue', 'S.getValue', (['"""TOOLS"""'], {}), "('TOOLS')\n", (2340, 2349), True, 'from conf import settings as S\n'), ((2868, 2902), 'conf.settings.getValue', 'S.getValue', (['"""VSWITCH_VPP_CLI_SOCK"""'], {}), "('VSWITCH_VPP_CLI_SOCK')\n", (2878, 2902), True, 'from conf import settings as S\n'), ((6958, 6979), 'conf.settings.getValue', 'S.getValue', (['"""VSWITCH"""'], {}), "('VSWITCH')\n", (6968, 6979), True, 'from conf import settings as S\n'), ((8902, 8921), 'conf.settings.getValue', 'S.getValue', (['"""TOOLS"""'], {}), "('TOOLS')\n", (8912, 8921), True, 'from conf import settings as S\n'), ((2446, 2465), 'conf.settings.getValue', 'S.getValue', (['"""TOOLS"""'], {}), "('TOOLS')\n", (2456, 2465), True, 'from conf import settings as S\n'), ((8311, 8329), 'conf.settings.getValue', 'S.getValue', (['"""NICS"""'], {}), "('NICS')\n", (8321, 8329), True, 'from conf import settings as S\n')]
from typing import List, Optional from moodle import MoodleWarning, ResponsesFactory from moodle.attr import dataclass, field @dataclass class ActivityCompletion: """Activity Completion Args: cmid (int): comment ID modname (str): activity module name instance (int): instance ID state (int): completion state value: 0 means incomplete, 1 complete, 2 complete pass, 3 complete fail timecompleted (int): timestamp for completed activity tracking (int): type of tracking: 0 means none, 1 manual, 2 automatic overrideby (Optional[int]): The user id who has overriden the status, or null valueused (Optional[int]): Whether the completion status affects the availability of another activity. """ cmid: int modname: str instance: int state: int timecompleted: int tracking: int overrideby: Optional[int] valueused: Optional[int] @dataclass class ActivityCompletionStatus(ResponsesFactory[ActivityCompletion]): """Activity Completion Statuses (List of activities completion status) Args: statuses (List[Activity]): List of activities completion status warnings (List[Warning]): list of warnings """ statuses: List[ActivityCompletion] = field(factory=list) warnings: List[MoodleWarning] = field(factory=list) @property def items(self) -> List[ActivityCompletion]: return self.statuses
[ "moodle.attr.field" ]
[((1275, 1294), 'moodle.attr.field', 'field', ([], {'factory': 'list'}), '(factory=list)\n', (1280, 1294), False, 'from moodle.attr import dataclass, field\n'), ((1331, 1350), 'moodle.attr.field', 'field', ([], {'factory': 'list'}), '(factory=list)\n', (1336, 1350), False, 'from moodle.attr import dataclass, field\n')]
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 from spaceone.api.power_scheduler.v1 import schedule_rule_pb2 as spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2 class ScheduleRuleStub(object): """Missing associated documentation comment in .proto file.""" def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.create = channel.unary_unary( '/spaceone.api.power_scheduler.v1.ScheduleRule/create', request_serializer=spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.CreateScheduleRuleRequest.SerializeToString, response_deserializer=spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.RuleInfo.FromString, ) self.update = channel.unary_unary( '/spaceone.api.power_scheduler.v1.ScheduleRule/update', request_serializer=spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.UpdateScheduleRuleRequest.SerializeToString, response_deserializer=spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.RuleInfo.FromString, ) self.delete = channel.unary_unary( '/spaceone.api.power_scheduler.v1.ScheduleRule/delete', request_serializer=spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.ScheduleRuleRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.get = channel.unary_unary( '/spaceone.api.power_scheduler.v1.ScheduleRule/get', request_serializer=spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.GetScheduleRuleRequest.SerializeToString, response_deserializer=spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.RuleInfo.FromString, ) self.list = channel.unary_unary( '/spaceone.api.power_scheduler.v1.ScheduleRule/list', request_serializer=spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.ScheduleRuleQuery.SerializeToString, response_deserializer=spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.RulesInfo.FromString, ) self.stat = channel.unary_unary( '/spaceone.api.power_scheduler.v1.ScheduleRule/stat', request_serializer=spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.ScheduleRuleStatQuery.SerializeToString, response_deserializer=google_dot_protobuf_dot_struct__pb2.Struct.FromString, ) class ScheduleRuleServicer(object): """Missing associated documentation comment in .proto file.""" def create(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def update(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def delete(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def get(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def list(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def stat(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_ScheduleRuleServicer_to_server(servicer, server): rpc_method_handlers = { 'create': grpc.unary_unary_rpc_method_handler( servicer.create, request_deserializer=spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.CreateScheduleRuleRequest.FromString, response_serializer=spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.RuleInfo.SerializeToString, ), 'update': grpc.unary_unary_rpc_method_handler( servicer.update, request_deserializer=spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.UpdateScheduleRuleRequest.FromString, response_serializer=spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.RuleInfo.SerializeToString, ), 'delete': grpc.unary_unary_rpc_method_handler( servicer.delete, request_deserializer=spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.ScheduleRuleRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'get': grpc.unary_unary_rpc_method_handler( servicer.get, request_deserializer=spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.GetScheduleRuleRequest.FromString, response_serializer=spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.RuleInfo.SerializeToString, ), 'list': grpc.unary_unary_rpc_method_handler( servicer.list, request_deserializer=spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.ScheduleRuleQuery.FromString, response_serializer=spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.RulesInfo.SerializeToString, ), 'stat': grpc.unary_unary_rpc_method_handler( servicer.stat, request_deserializer=spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.ScheduleRuleStatQuery.FromString, response_serializer=google_dot_protobuf_dot_struct__pb2.Struct.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'spaceone.api.power_scheduler.v1.ScheduleRule', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class ScheduleRule(object): """Missing associated documentation comment in .proto file.""" @staticmethod def create(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/spaceone.api.power_scheduler.v1.ScheduleRule/create', spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.CreateScheduleRuleRequest.SerializeToString, spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.RuleInfo.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def update(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/spaceone.api.power_scheduler.v1.ScheduleRule/update', spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.UpdateScheduleRuleRequest.SerializeToString, spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.RuleInfo.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def delete(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/spaceone.api.power_scheduler.v1.ScheduleRule/delete', spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.ScheduleRuleRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def get(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/spaceone.api.power_scheduler.v1.ScheduleRule/get', spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.GetScheduleRuleRequest.SerializeToString, spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.RuleInfo.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def list(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/spaceone.api.power_scheduler.v1.ScheduleRule/list', spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.ScheduleRuleQuery.SerializeToString, spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.RulesInfo.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def stat(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/spaceone.api.power_scheduler.v1.ScheduleRule/stat', spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.ScheduleRuleStatQuery.SerializeToString, google_dot_protobuf_dot_struct__pb2.Struct.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
[ "grpc.method_handlers_generic_handler", "grpc.unary_unary_rpc_method_handler", "grpc.experimental.unary_unary" ]
[((7212, 7322), 'grpc.method_handlers_generic_handler', 'grpc.method_handlers_generic_handler', (['"""spaceone.api.power_scheduler.v1.ScheduleRule"""', 'rpc_method_handlers'], {}), "(\n 'spaceone.api.power_scheduler.v1.ScheduleRule', rpc_method_handlers)\n", (7248, 7322), False, 'import grpc\n'), ((4936, 5253), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.create'], {'request_deserializer': 'spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.CreateScheduleRuleRequest.FromString', 'response_serializer': 'spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.RuleInfo.SerializeToString'}), '(servicer.create, request_deserializer=\n spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.\n CreateScheduleRuleRequest.FromString, response_serializer=\n spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.\n RuleInfo.SerializeToString)\n', (4971, 5253), False, 'import grpc\n'), ((5332, 5649), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.update'], {'request_deserializer': 'spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.UpdateScheduleRuleRequest.FromString', 'response_serializer': 'spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.RuleInfo.SerializeToString'}), '(servicer.update, request_deserializer=\n spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.\n UpdateScheduleRuleRequest.FromString, response_serializer=\n spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.\n RuleInfo.SerializeToString)\n', (5367, 5649), False, 'import grpc\n'), ((5728, 5997), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.delete'], {'request_deserializer': 'spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.ScheduleRuleRequest.FromString', 'response_serializer': 'google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString'}), '(servicer.delete, request_deserializer=\n spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.\n ScheduleRuleRequest.FromString, response_serializer=\n google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString)\n', (5763, 5997), False, 'import grpc\n'), ((6078, 6389), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.get'], {'request_deserializer': 'spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.GetScheduleRuleRequest.FromString', 'response_serializer': 'spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.RuleInfo.SerializeToString'}), '(servicer.get, request_deserializer=\n spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.\n GetScheduleRuleRequest.FromString, response_serializer=\n spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.\n RuleInfo.SerializeToString)\n', (6113, 6389), False, 'import grpc\n'), ((6466, 6774), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.list'], {'request_deserializer': 'spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.ScheduleRuleQuery.FromString', 'response_serializer': 'spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.RulesInfo.SerializeToString'}), '(servicer.list, request_deserializer=\n spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.\n ScheduleRuleQuery.FromString, response_serializer=\n spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.\n RulesInfo.SerializeToString)\n', (6501, 6774), False, 'import grpc\n'), ((6851, 7122), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.stat'], {'request_deserializer': 'spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.ScheduleRuleStatQuery.FromString', 'response_serializer': 'google_dot_protobuf_dot_struct__pb2.Struct.SerializeToString'}), '(servicer.stat, request_deserializer=\n spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.\n ScheduleRuleStatQuery.FromString, response_serializer=\n google_dot_protobuf_dot_struct__pb2.Struct.SerializeToString)\n', (6886, 7122), False, 'import grpc\n'), ((7850, 8288), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/spaceone.api.power_scheduler.v1.ScheduleRule/create"""', 'spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.CreateScheduleRuleRequest.SerializeToString', 'spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.RuleInfo.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/spaceone.api.power_scheduler.v1.ScheduleRule/create',\n spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.\n CreateScheduleRuleRequest.SerializeToString,\n spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.\n RuleInfo.FromString, options, channel_credentials, insecure,\n call_credentials, compression, wait_for_ready, timeout, metadata)\n", (7879, 8288), False, 'import grpc\n'), ((8631, 9069), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/spaceone.api.power_scheduler.v1.ScheduleRule/update"""', 'spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.UpdateScheduleRuleRequest.SerializeToString', 'spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.RuleInfo.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/spaceone.api.power_scheduler.v1.ScheduleRule/update',\n spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.\n UpdateScheduleRuleRequest.SerializeToString,\n spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.\n RuleInfo.FromString, options, channel_credentials, insecure,\n call_credentials, compression, wait_for_ready, timeout, metadata)\n", (8660, 9069), False, 'import grpc\n'), ((9412, 9806), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/spaceone.api.power_scheduler.v1.ScheduleRule/delete"""', 'spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.ScheduleRuleRequest.SerializeToString', 'google_dot_protobuf_dot_empty__pb2.Empty.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/spaceone.api.power_scheduler.v1.ScheduleRule/delete',\n spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.\n ScheduleRuleRequest.SerializeToString,\n google_dot_protobuf_dot_empty__pb2.Empty.FromString, options,\n channel_credentials, insecure, call_credentials, compression,\n wait_for_ready, timeout, metadata)\n", (9441, 9806), False, 'import grpc\n'), ((10147, 10579), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/spaceone.api.power_scheduler.v1.ScheduleRule/get"""', 'spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.GetScheduleRuleRequest.SerializeToString', 'spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.RuleInfo.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/spaceone.api.power_scheduler.v1.ScheduleRule/get',\n spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.\n GetScheduleRuleRequest.SerializeToString,\n spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.\n RuleInfo.FromString, options, channel_credentials, insecure,\n call_credentials, compression, wait_for_ready, timeout, metadata)\n", (10176, 10579), False, 'import grpc\n'), ((10920, 11349), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/spaceone.api.power_scheduler.v1.ScheduleRule/list"""', 'spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.ScheduleRuleQuery.SerializeToString', 'spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.RulesInfo.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/spaceone.api.power_scheduler.v1.ScheduleRule/list',\n spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.\n ScheduleRuleQuery.SerializeToString,\n spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.\n RulesInfo.FromString, options, channel_credentials, insecure,\n call_credentials, compression, wait_for_ready, timeout, metadata)\n", (10949, 11349), False, 'import grpc\n'), ((11690, 12086), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/spaceone.api.power_scheduler.v1.ScheduleRule/stat"""', 'spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.ScheduleRuleStatQuery.SerializeToString', 'google_dot_protobuf_dot_struct__pb2.Struct.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/spaceone.api.power_scheduler.v1.ScheduleRule/stat',\n spaceone_dot_api_dot_power__scheduler_dot_v1_dot_schedule__rule__pb2.\n ScheduleRuleStatQuery.SerializeToString,\n google_dot_protobuf_dot_struct__pb2.Struct.FromString, options,\n channel_credentials, insecure, call_credentials, compression,\n wait_for_ready, timeout, metadata)\n", (11719, 12086), False, 'import grpc\n')]
import sys, os sys.path.append('yolov3_detector') from yolov3_custom_helper import yolo_detector from darknet import Darknet sys.path.append('pytorch-YOLOv4') from tool.darknet2pytorch import Darknet as DarknetYolov4 import argparse import cv2,time import numpy as np from tool.plateprocessing import find_coordinates, plate_to_string, padder, get_color from tool.utils import alphanumeric_segemntor,plot_boxes_cv2 from tool.torch_utils import * import time from utility_codes.tsv_converter import ConverterTSV use_cuda = True use_cuda = True #################### Vehicle #################### cfg_v4_veh = '/home/himanshu/pytorch-YOLOv4/cfg/yolov4.cfg' weight_v4_veh = 'weights/yolov4.weights' m_vehicle = DarknetYolov4(cfg_v4_veh) m_vehicle.load_weights(weight_v4_veh) num_classes = m_vehicle.num_classes # class_names_veh = {'car':2,'motorbike':3,'bus':5,'truck':7} class_names_veh = ['car','motorbike','bus','truck'] print('Loading weights from %s... Done!' % (weight_v4_veh)) if use_cuda: m_vehicle.cuda() # m_alpha.cuda() # yolo_vehicle.cuda() print("Starting Detection...") image_dir = 'SIH_hackathon/Detection_Day3/Day3' image_files = os.listdir(image_dir) image_files.sort() OUTPUT_SIZE = (1280, 720) class_names_veh = ['car','motorbike','bus','truck'] vehicle_save_filename = 'tsv_files/vehicle_tester.tsv' vehicle_writer = ConverterTSV(vehicle_save_filename,file_type='vehicle') cv2.namedWindow('Image', cv2.WINDOW_NORMAL) for img_name in image_files: frame = cv2.imread(os.path.join(image_dir, img_name))#Give the frame here' # print(frame.shape) h, w = frame.shape[0:2] sized = cv2.resize(frame, (m_vehicle.width, m_vehicle.height)) sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB) # print(m_vehicle.width, m_vehicle.height) # frame = cv2.resize(frame, OUTPUT_SIZE, interpolation = cv2.INTER_AREA) confidence_vehicle = 0.25 boxes = do_detect(m_vehicle, sized, confidence_vehicle, 0.3, use_cuda) # print(boxesq[0]) result_img, cls_conf_plate, coordinates_all, labels = plot_boxes_cv2(frame, boxes[0],classes_to_detect=class_names_veh,fontScale=0.5,thick=2,savename=False) cls_conf_plate = float(cls_conf_plate) for i,co in enumerate(coordinates_all): print(co) data = [img_name, co, labels[i]] vehicle_writer.put_vehicle(img_name, co, labels[i]) cv2.imshow('Image', result_img) if cv2.waitKey(1) & 0xff == ord('q'): break cv2.destroyAllWindows()
[ "sys.path.append", "cv2.resize", "os.path.join", "cv2.cvtColor", "cv2.waitKey", "cv2.imshow", "tool.utils.plot_boxes_cv2", "utility_codes.tsv_converter.ConverterTSV", "cv2.destroyAllWindows", "tool.darknet2pytorch.Darknet", "os.listdir", "cv2.namedWindow" ]
[((15, 49), 'sys.path.append', 'sys.path.append', (['"""yolov3_detector"""'], {}), "('yolov3_detector')\n", (30, 49), False, 'import sys, os\n'), ((125, 158), 'sys.path.append', 'sys.path.append', (['"""pytorch-YOLOv4"""'], {}), "('pytorch-YOLOv4')\n", (140, 158), False, 'import sys, os\n'), ((709, 734), 'tool.darknet2pytorch.Darknet', 'DarknetYolov4', (['cfg_v4_veh'], {}), '(cfg_v4_veh)\n', (722, 734), True, 'from tool.darknet2pytorch import Darknet as DarknetYolov4\n'), ((1153, 1174), 'os.listdir', 'os.listdir', (['image_dir'], {}), '(image_dir)\n', (1163, 1174), False, 'import sys, os\n'), ((1346, 1402), 'utility_codes.tsv_converter.ConverterTSV', 'ConverterTSV', (['vehicle_save_filename'], {'file_type': '"""vehicle"""'}), "(vehicle_save_filename, file_type='vehicle')\n", (1358, 1402), False, 'from utility_codes.tsv_converter import ConverterTSV\n'), ((1402, 1445), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Image"""', 'cv2.WINDOW_NORMAL'], {}), "('Image', cv2.WINDOW_NORMAL)\n", (1417, 1445), False, 'import cv2, time\n'), ((2377, 2400), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2398, 2400), False, 'import cv2, time\n'), ((1609, 1663), 'cv2.resize', 'cv2.resize', (['frame', '(m_vehicle.width, m_vehicle.height)'], {}), '(frame, (m_vehicle.width, m_vehicle.height))\n', (1619, 1663), False, 'import cv2, time\n'), ((1673, 1711), 'cv2.cvtColor', 'cv2.cvtColor', (['sized', 'cv2.COLOR_BGR2RGB'], {}), '(sized, cv2.COLOR_BGR2RGB)\n', (1685, 1711), False, 'import cv2, time\n'), ((2006, 2116), 'tool.utils.plot_boxes_cv2', 'plot_boxes_cv2', (['frame', 'boxes[0]'], {'classes_to_detect': 'class_names_veh', 'fontScale': '(0.5)', 'thick': '(2)', 'savename': '(False)'}), '(frame, boxes[0], classes_to_detect=class_names_veh,\n fontScale=0.5, thick=2, savename=False)\n', (2020, 2116), False, 'from tool.utils import alphanumeric_segemntor, plot_boxes_cv2\n'), ((2294, 2325), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'result_img'], {}), "('Image', result_img)\n", (2304, 2325), False, 'import cv2, time\n'), ((1496, 1529), 'os.path.join', 'os.path.join', (['image_dir', 'img_name'], {}), '(image_dir, img_name)\n', (1508, 1529), False, 'import sys, os\n'), ((2330, 2344), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2341, 2344), False, 'import cv2, time\n')]
"""The tests for local file camera component.""" from unittest.mock import mock_open, patch, PropertyMock import pytest from homeassistant.components import camera from homeassistant.components.camera import STATE_STREAMING, STATE_IDLE from homeassistant.exceptions import HomeAssistantError from homeassistant.setup import async_setup_component @pytest.fixture def demo_camera(hass): """Initialize a demo camera platform.""" hass.loop.run_until_complete(async_setup_component(hass, 'camera', { camera.DOMAIN: { 'platform': 'demo' } })) return hass.data['camera'].get_entity('camera.demo_camera') async def test_init_state_is_streaming(hass, demo_camera): """Demo camera initialize as streaming.""" assert demo_camera.state == STATE_STREAMING mock_on_img = mock_open(read_data=b'ON') with patch('homeassistant.components.camera.demo.open', mock_on_img, create=True): image = await camera.async_get_image(hass, demo_camera.entity_id) assert mock_on_img.called assert mock_on_img.call_args_list[0][0][0][-6:] \ in ['_0.jpg', '_1.jpg', '_2.jpg', '_3.jpg'] assert image.content == b'ON' async def test_turn_on_state_back_to_streaming(hass, demo_camera): """After turn on state back to streaming.""" assert demo_camera.state == STATE_STREAMING await camera.async_turn_off(hass, demo_camera.entity_id) await hass.async_block_till_done() assert demo_camera.state == STATE_IDLE await camera.async_turn_on(hass, demo_camera.entity_id) await hass.async_block_till_done() assert demo_camera.state == STATE_STREAMING async def test_turn_off_image(hass, demo_camera): """After turn off, Demo camera raise error.""" await camera.async_turn_off(hass, demo_camera.entity_id) await hass.async_block_till_done() with pytest.raises(HomeAssistantError) as error: await camera.async_get_image(hass, demo_camera.entity_id) assert error.args[0] == 'Camera is off' async def test_turn_off_invalid_camera(hass, demo_camera): """Turn off non-exist camera should quietly fail.""" assert demo_camera.state == STATE_STREAMING await camera.async_turn_off(hass, 'camera.invalid_camera') await hass.async_block_till_done() assert demo_camera.state == STATE_STREAMING async def test_turn_off_unsupport_camera(hass, demo_camera): """Turn off unsupported camera should quietly fail.""" assert demo_camera.state == STATE_STREAMING with patch('homeassistant.components.camera.demo.DemoCamera' '.supported_features', new_callable=PropertyMock) as m: m.return_value = 0 await camera.async_turn_off(hass, demo_camera.entity_id) await hass.async_block_till_done() assert demo_camera.state == STATE_STREAMING async def test_motion_detection(hass): """Test motion detection services.""" # Setup platform await async_setup_component(hass, 'camera', { 'camera': { 'platform': 'demo' } }) # Fetch state and check motion detection attribute state = hass.states.get('camera.demo_camera') assert not state.attributes.get('motion_detection') # Call service to turn on motion detection camera.enable_motion_detection(hass, 'camera.demo_camera') await hass.async_block_till_done() # Check if state has been updated. state = hass.states.get('camera.demo_camera') assert state.attributes.get('motion_detection')
[ "homeassistant.components.camera.async_turn_off", "homeassistant.setup.async_setup_component", "homeassistant.components.camera.async_get_image", "unittest.mock.patch", "homeassistant.components.camera.async_turn_on", "pytest.raises", "unittest.mock.mock_open", "homeassistant.components.camera.enable_motion_detection" ]
[((820, 846), 'unittest.mock.mock_open', 'mock_open', ([], {'read_data': "b'ON'"}), "(read_data=b'ON')\n", (829, 846), False, 'from unittest.mock import mock_open, patch, PropertyMock\n'), ((3283, 3341), 'homeassistant.components.camera.enable_motion_detection', 'camera.enable_motion_detection', (['hass', '"""camera.demo_camera"""'], {}), "(hass, 'camera.demo_camera')\n", (3313, 3341), False, 'from homeassistant.components import camera\n'), ((467, 543), 'homeassistant.setup.async_setup_component', 'async_setup_component', (['hass', '"""camera"""', "{camera.DOMAIN: {'platform': 'demo'}}"], {}), "(hass, 'camera', {camera.DOMAIN: {'platform': 'demo'}})\n", (488, 543), False, 'from homeassistant.setup import async_setup_component\n'), ((856, 932), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.camera.demo.open"""', 'mock_on_img'], {'create': '(True)'}), "('homeassistant.components.camera.demo.open', mock_on_img, create=True)\n", (861, 932), False, 'from unittest.mock import mock_open, patch, PropertyMock\n'), ((1385, 1435), 'homeassistant.components.camera.async_turn_off', 'camera.async_turn_off', (['hass', 'demo_camera.entity_id'], {}), '(hass, demo_camera.entity_id)\n', (1406, 1435), False, 'from homeassistant.components import camera\n'), ((1530, 1579), 'homeassistant.components.camera.async_turn_on', 'camera.async_turn_on', (['hass', 'demo_camera.entity_id'], {}), '(hass, demo_camera.entity_id)\n', (1550, 1579), False, 'from homeassistant.components import camera\n'), ((1781, 1831), 'homeassistant.components.camera.async_turn_off', 'camera.async_turn_off', (['hass', 'demo_camera.entity_id'], {}), '(hass, demo_camera.entity_id)\n', (1802, 1831), False, 'from homeassistant.components import camera\n'), ((1881, 1914), 'pytest.raises', 'pytest.raises', (['HomeAssistantError'], {}), '(HomeAssistantError)\n', (1894, 1914), False, 'import pytest\n'), ((2215, 2267), 'homeassistant.components.camera.async_turn_off', 'camera.async_turn_off', (['hass', '"""camera.invalid_camera"""'], {}), "(hass, 'camera.invalid_camera')\n", (2236, 2267), False, 'from homeassistant.components import camera\n'), ((2535, 2641), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.camera.demo.DemoCamera.supported_features"""'], {'new_callable': 'PropertyMock'}), "('homeassistant.components.camera.demo.DemoCamera.supported_features',\n new_callable=PropertyMock)\n", (2540, 2641), False, 'from unittest.mock import mock_open, patch, PropertyMock\n'), ((2961, 3032), 'homeassistant.setup.async_setup_component', 'async_setup_component', (['hass', '"""camera"""', "{'camera': {'platform': 'demo'}}"], {}), "(hass, 'camera', {'camera': {'platform': 'demo'}})\n", (2982, 3032), False, 'from homeassistant.setup import async_setup_component\n'), ((971, 1022), 'homeassistant.components.camera.async_get_image', 'camera.async_get_image', (['hass', 'demo_camera.entity_id'], {}), '(hass, demo_camera.entity_id)\n', (993, 1022), False, 'from homeassistant.components import camera\n'), ((1939, 1990), 'homeassistant.components.camera.async_get_image', 'camera.async_get_image', (['hass', 'demo_camera.entity_id'], {}), '(hass, demo_camera.entity_id)\n', (1961, 1990), False, 'from homeassistant.components import camera\n'), ((2704, 2754), 'homeassistant.components.camera.async_turn_off', 'camera.async_turn_off', (['hass', 'demo_camera.entity_id'], {}), '(hass, demo_camera.entity_id)\n', (2725, 2754), False, 'from homeassistant.components import camera\n')]
import os import subprocess LIBRARY_DEPENDENCY = "dev-dependencies" def create_directory(path): if not os.path.exists(path): os.makedirs(path) def create_directories(directory_list: list): for directory in directory_list: create_directory(directory) def execute_command(home, command): subprocess.run(command, shell=True, cwd=home) def get_git(): git = "git" exported_git_path = os.environ.get('git_path') if exported_git_path: git = "\"" + exported_git_path + "\"" return git def git_command(command): return get_git() + " " + command def pull_project(home): git_directory = home + "/.git" if os.path.exists(git_directory): execute_command(home, git_command("pull")) def clone_project(root, project, url): git_branch = os.environ.get('gitBranch') branch = "" if git_branch and git_branch != "": branch += "-b " + git_branch + " " if url != "": command = git_command("clone ") + branch + url + " " + project execute_command(root, command) def setup_project(home): module_directory = home + "/setup.py" if os.path.exists(module_directory): execute_command(home, "python setup.py develop") def clone_and_setup(root, project, url, path): if not os.path.exists(path): clone_project(root, project, url) setup_project(path) def pull_and_setup_project(home): pull_project(home) setup_project(home) def clone_pull_setup(projects: dict): root = projects['dir'] create_directory(root) repositories: dict = projects['repositories'] repository_names = repositories.keys() for name in repository_names: print("\n\n\n\n-------------------------------------------------------------------------------------") print("Working with repository: " + name + ", source: " + root) print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") path = os.path.join(root, name) repository = repositories.get(name) clone_and_setup(root, name, repository, path) pull_and_setup_project(path) print("-------------------------------------------------------------------------------------") source_projects = { "dir": LIBRARY_DEPENDENCY, "repositories": { "xxxxxx": "https://github.com/problemfighter/xxxxxxx.git", } } def bismillah_sw(): clone_pull_setup(source_projects) if __name__ == '__main__': bismillah_sw()
[ "subprocess.run", "os.makedirs", "os.path.exists", "os.environ.get", "os.path.join" ]
[((321, 366), 'subprocess.run', 'subprocess.run', (['command'], {'shell': '(True)', 'cwd': 'home'}), '(command, shell=True, cwd=home)\n', (335, 366), False, 'import subprocess\n'), ((424, 450), 'os.environ.get', 'os.environ.get', (['"""git_path"""'], {}), "('git_path')\n", (438, 450), False, 'import os\n'), ((671, 700), 'os.path.exists', 'os.path.exists', (['git_directory'], {}), '(git_directory)\n', (685, 700), False, 'import os\n'), ((811, 838), 'os.environ.get', 'os.environ.get', (['"""gitBranch"""'], {}), "('gitBranch')\n", (825, 838), False, 'import os\n'), ((1143, 1175), 'os.path.exists', 'os.path.exists', (['module_directory'], {}), '(module_directory)\n', (1157, 1175), False, 'import os\n'), ((110, 130), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (124, 130), False, 'import os\n'), ((140, 157), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (151, 157), False, 'import os\n'), ((1294, 1314), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1308, 1314), False, 'import os\n'), ((1991, 2015), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (2003, 2015), False, 'import os\n')]
# Импорт математической библиотеки # import math from math import log, sin, pi # Ввод значений а и х # a = float(input("Введите а: ")) x = float(input("Введите x: ")) # Выбор формулы # print(" 1 - Вычислить функцию G\n " " 2 - Вычислить функцию F\n " " 3 - Вычислить функцию Y\n ") num1 = float(input('Значение')) if num1 == 1: if (x != 0) and (a != 0): G = (-2*(-5*(a**2)+3*a*x+2*(x**2))) / (5*(a**2)+9*a*x-2*(x**2)) print('A = {}, X = {}, Result: {}'.format(a,x,G)) else: print(" Введите корректное значение ") elif num1 == 2: F = sin(pi*(10*(a**2)+37*a*x+7*(x**2))) / (pi*(10*(a**2)+37*a*x+7*(x**2))) print('A = {}, X = {}, Result: {}'.format(a,x,F)) elif num1 == 3: if (a < 0): Y = log(-5*(a**2)-16*a*x+16*(x**2)+1) / log(2) print('A = {}, X = {}, Result: {}'.format(a,x,Y)) else: print(" Введите отрицательное число ") else: print("Нет такого значения")
[ "math.log", "math.sin" ]
[((586, 635), 'math.sin', 'sin', (['(pi * (10 * a ** 2 + 37 * a * x + 7 * x ** 2))'], {}), '(pi * (10 * a ** 2 + 37 * a * x + 7 * x ** 2))\n', (589, 635), False, 'from math import log, sin, pi\n'), ((755, 802), 'math.log', 'log', (['(-5 * a ** 2 - 16 * a * x + 16 * x ** 2 + 1)'], {}), '(-5 * a ** 2 - 16 * a * x + 16 * x ** 2 + 1)\n', (758, 802), False, 'from math import log, sin, pi\n'), ((791, 797), 'math.log', 'log', (['(2)'], {}), '(2)\n', (794, 797), False, 'from math import log, sin, pi\n')]
import json import pytest from builtins import str from httpretty import httpretty from rasa_core import utils from rasa_core.utils import EndpointConfig def test_is_int(): assert utils.is_int(1) assert utils.is_int(1.0) assert not utils.is_int(None) assert not utils.is_int(1.2) assert not utils.is_int("test") def test_subsample_array_read_only(): t = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] r = utils.subsample_array(t, 5, can_modify_incoming_array=False) assert len(r) == 5 assert set(r).issubset(t) def test_subsample_array(): t = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] # this will modify the original array and shuffle it r = utils.subsample_array(t, 5) assert len(r) == 5 assert set(r).issubset(t) def test_on_hot(): r = utils.one_hot(4, 6) assert (r[[0, 1, 2, 3, 5]] == 0).all() assert r[4] == 1 def test_on_hot_out_of_range(): with pytest.raises(ValueError): utils.one_hot(4, 3) def test_list_routes(default_agent): from rasa_core import server app = server.create_app(default_agent, auth_token=None) routes = utils.list_routes(app) assert len(routes) > 0 def test_cap_length(): assert utils.cap_length("mystring", 6) == "mys..." def test_cap_length_without_ellipsis(): assert utils.cap_length("mystring", 3, append_ellipsis=False) == "mys" def test_cap_length_with_short_string(): assert utils.cap_length("my", 3) == "my" def test_read_lines(): lines = utils.read_lines("data/test_stories/stories.md", max_line_limit=2, line_pattern="\*.*") lines = list(lines) assert len(lines) == 2 def test_endpoint_config(): endpoint = EndpointConfig( "https://abc.defg/", params={"A": "B"}, headers={"X-Powered-By": "Rasa"}, basic_auth={"username": "user", "password": "<PASSWORD>"}, token="mytoken", token_name="letoken" ) httpretty.register_uri( httpretty.POST, 'https://abc.defg/test', status=500, body='') httpretty.enable() endpoint.request("post", subpath="test", content_type="application/text", json={"c": "d"}, params={"P": "1"}) httpretty.disable() r = httpretty.latest_requests[-1] assert json.loads(str(r.body.decode("utf-8"))) == {"c": "d"} assert r.headers.get("X-Powered-By") == "Rasa" assert r.headers.get("Authorization") == "Basic dXNlcjpwYXNz" assert r.querystring.get("A") == ["B"] assert r.querystring.get("P") == ["1"] assert r.querystring.get("letoken") == ["mytoken"]
[ "rasa_core.utils.subsample_array", "rasa_core.utils.one_hot", "rasa_core.utils.is_int", "httpretty.httpretty.register_uri", "rasa_core.utils.EndpointConfig", "rasa_core.utils.cap_length", "rasa_core.server.create_app", "httpretty.httpretty.enable", "rasa_core.utils.list_routes", "rasa_core.utils.read_lines", "pytest.raises", "httpretty.httpretty.disable" ]
[((188, 203), 'rasa_core.utils.is_int', 'utils.is_int', (['(1)'], {}), '(1)\n', (200, 203), False, 'from rasa_core import utils\n'), ((215, 232), 'rasa_core.utils.is_int', 'utils.is_int', (['(1.0)'], {}), '(1.0)\n', (227, 232), False, 'from rasa_core import utils\n'), ((424, 484), 'rasa_core.utils.subsample_array', 'utils.subsample_array', (['t', '(5)'], {'can_modify_incoming_array': '(False)'}), '(t, 5, can_modify_incoming_array=False)\n', (445, 484), False, 'from rasa_core import utils\n'), ((704, 731), 'rasa_core.utils.subsample_array', 'utils.subsample_array', (['t', '(5)'], {}), '(t, 5)\n', (725, 731), False, 'from rasa_core import utils\n'), ((815, 834), 'rasa_core.utils.one_hot', 'utils.one_hot', (['(4)', '(6)'], {}), '(4, 6)\n', (828, 834), False, 'from rasa_core import utils\n'), ((1079, 1128), 'rasa_core.server.create_app', 'server.create_app', (['default_agent'], {'auth_token': 'None'}), '(default_agent, auth_token=None)\n', (1096, 1128), False, 'from rasa_core import server\n'), ((1143, 1165), 'rasa_core.utils.list_routes', 'utils.list_routes', (['app'], {}), '(app)\n', (1160, 1165), False, 'from rasa_core import utils\n'), ((1543, 1635), 'rasa_core.utils.read_lines', 'utils.read_lines', (['"""data/test_stories/stories.md"""'], {'max_line_limit': '(2)', 'line_pattern': '"""\\\\*.*"""'}), "('data/test_stories/stories.md', max_line_limit=2,\n line_pattern='\\\\*.*')\n", (1559, 1635), False, 'from rasa_core import utils\n'), ((1787, 1982), 'rasa_core.utils.EndpointConfig', 'EndpointConfig', (['"""https://abc.defg/"""'], {'params': "{'A': 'B'}", 'headers': "{'X-Powered-By': 'Rasa'}", 'basic_auth': "{'username': 'user', 'password': '<PASSWORD>'}", 'token': '"""mytoken"""', 'token_name': '"""letoken"""'}), "('https://abc.defg/', params={'A': 'B'}, headers={\n 'X-Powered-By': 'Rasa'}, basic_auth={'username': 'user', 'password':\n '<PASSWORD>'}, token='mytoken', token_name='letoken')\n", (1801, 1982), False, 'from rasa_core.utils import EndpointConfig\n'), ((2081, 2169), 'httpretty.httpretty.register_uri', 'httpretty.register_uri', (['httpretty.POST', '"""https://abc.defg/test"""'], {'status': '(500)', 'body': '""""""'}), "(httpretty.POST, 'https://abc.defg/test', status=500,\n body='')\n", (2103, 2169), False, 'from httpretty import httpretty\n'), ((2220, 2238), 'httpretty.httpretty.enable', 'httpretty.enable', ([], {}), '()\n', (2236, 2238), False, 'from httpretty import httpretty\n'), ((2420, 2439), 'httpretty.httpretty.disable', 'httpretty.disable', ([], {}), '()\n', (2437, 2439), False, 'from httpretty import httpretty\n'), ((248, 266), 'rasa_core.utils.is_int', 'utils.is_int', (['None'], {}), '(None)\n', (260, 266), False, 'from rasa_core import utils\n'), ((282, 299), 'rasa_core.utils.is_int', 'utils.is_int', (['(1.2)'], {}), '(1.2)\n', (294, 299), False, 'from rasa_core import utils\n'), ((315, 335), 'rasa_core.utils.is_int', 'utils.is_int', (['"""test"""'], {}), "('test')\n", (327, 335), False, 'from rasa_core import utils\n'), ((942, 967), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (955, 967), False, 'import pytest\n'), ((977, 996), 'rasa_core.utils.one_hot', 'utils.one_hot', (['(4)', '(3)'], {}), '(4, 3)\n', (990, 996), False, 'from rasa_core import utils\n'), ((1229, 1260), 'rasa_core.utils.cap_length', 'utils.cap_length', (['"""mystring"""', '(6)'], {}), "('mystring', 6)\n", (1245, 1260), False, 'from rasa_core import utils\n'), ((1326, 1380), 'rasa_core.utils.cap_length', 'utils.cap_length', (['"""mystring"""', '(3)'], {'append_ellipsis': '(False)'}), "('mystring', 3, append_ellipsis=False)\n", (1342, 1380), False, 'from rasa_core import utils\n'), ((1472, 1497), 'rasa_core.utils.cap_length', 'utils.cap_length', (['"""my"""', '(3)'], {}), "('my', 3)\n", (1488, 1497), False, 'from rasa_core import utils\n')]
"""Monitor Docker main component.""" import asyncio import logging import time import threading import voluptuous as vol from datetime import timedelta import homeassistant.helpers.config_validation as cv from .helpers import DockerAPI from homeassistant.const import ( CONF_MONITORED_CONDITIONS, CONF_NAME, CONF_SCAN_INTERVAL, CONF_URL, ) from .const import ( API, CONF_CERTPATH, CONF_CONTAINERS, CONF_CONTAINERS_EXCLUDE, CONF_MEMORYCHANGE, CONF_PRECISION_CPU, CONF_PRECISION_MEMORY_MB, CONF_PRECISION_MEMORY_PERCENTAGE, CONF_PRECISION_NETWORK_KB, CONF_PRECISION_NETWORK_MB, CONF_PREFIX, CONF_RENAME, CONF_RETRY, CONF_SENSORNAME, CONF_SWITCHENABLED, CONF_SWITCHNAME, CONFIG, CONTAINER_INFO_ALLINONE, DOMAIN, DEFAULT_NAME, DEFAULT_RETRY, DEFAULT_SENSORNAME, DEFAULT_SWITCHNAME, MONITORED_CONDITIONS_LIST, PRECISION, ) _LOGGER = logging.getLogger(__name__) DEFAULT_SCAN_INTERVAL = timedelta(seconds=10) DOCKER_SCHEMA = vol.Schema( { vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_PREFIX, default=""): cv.string, vol.Optional(CONF_URL, default=None): vol.Any(cv.string, None), vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL): cv.time_period, vol.Optional( CONF_MONITORED_CONDITIONS, default=MONITORED_CONDITIONS_LIST ): vol.All( cv.ensure_list, [vol.In(MONITORED_CONDITIONS_LIST + list([CONTAINER_INFO_ALLINONE]))], ), vol.Optional(CONF_CONTAINERS, default=[]): cv.ensure_list, vol.Optional(CONF_CONTAINERS_EXCLUDE, default=[]): cv.ensure_list, vol.Optional(CONF_RENAME, default={}): dict, vol.Optional(CONF_SENSORNAME, default=DEFAULT_SENSORNAME): cv.string, vol.Optional(CONF_SWITCHENABLED, default=True): cv.boolean, vol.Optional(CONF_SWITCHNAME, default=DEFAULT_SWITCHNAME): cv.string, vol.Optional(CONF_CERTPATH, default=""): cv.string, vol.Optional(CONF_RETRY, default=DEFAULT_RETRY): cv.positive_int, vol.Optional(CONF_MEMORYCHANGE, default=100): cv.positive_int, vol.Optional(CONF_PRECISION_CPU, default=PRECISION): cv.positive_int, vol.Optional(CONF_PRECISION_MEMORY_MB, default=PRECISION): cv.positive_int, vol.Optional( CONF_PRECISION_MEMORY_PERCENTAGE, default=PRECISION ): cv.positive_int, vol.Optional(CONF_PRECISION_NETWORK_KB, default=PRECISION): cv.positive_int, vol.Optional(CONF_PRECISION_NETWORK_MB, default=PRECISION): cv.positive_int, } ) CONFIG_SCHEMA = vol.Schema( {DOMAIN: vol.All(cv.ensure_list, [vol.Any(DOCKER_SCHEMA)])}, extra=vol.ALLOW_EXTRA ) ################################################################# async def async_setup(hass, config): """Will setup the Monitor Docker platform.""" def RunDocker(hass, entry): """Wrapper around function for a separated thread.""" # Create out asyncio loop, because we are already inside # a def (not main) we need to do create/set loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) # Create docker instance, it will have asyncio threads hass.data[DOMAIN][entry[CONF_NAME]] = {} hass.data[DOMAIN][entry[CONF_NAME]][CONFIG] = entry startCount = 0 while True: doLoop = True try: hass.data[DOMAIN][entry[CONF_NAME]][API] = DockerAPI( hass, entry, startCount ) except Exception as err: doLoop = False if entry[CONF_RETRY] == 0: raise else: _LOGGER.error("Failed Docker connect: %s", str(err)) _LOGGER.error("Retry in %d seconds", entry[CONF_RETRY]) time.sleep(entry[CONF_RETRY]) startCount += 1 if doLoop: # Now run forever in this separated thread loop.run_forever() # We only get here if a docker instance disconnected or HASS is stopping if not hass.data[DOMAIN][entry[CONF_NAME]][API]._dockerStopped: # If HASS stopped, do not retry break # Create domain monitor_docker data variable hass.data[DOMAIN] = {} # Now go through all possible entries, we support 1 or more docker hosts (untested) for entry in config[DOMAIN]: # Check if CONF_MONITORED_CONDITIONS has only ALLINONE, then expand to all if ( len(entry[CONF_MONITORED_CONDITIONS]) == 1 and CONTAINER_INFO_ALLINONE in entry[CONF_MONITORED_CONDITIONS] ): entry[CONF_MONITORED_CONDITIONS] = list(MONITORED_CONDITIONS_LIST) + list( [CONTAINER_INFO_ALLINONE] ) if entry[CONF_NAME] in hass.data[DOMAIN]: _LOGGER.error( "Instance %s is duplicate, please assign an unique name", entry[CONF_NAME], ) return False # Each docker hosts runs in its own thread. We need to pass hass too, for the load_platform thread = threading.Thread( target=RunDocker, kwargs={"hass": hass, "entry": entry} ) thread.start() return True
[ "threading.Thread", "voluptuous.Optional", "voluptuous.Any", "asyncio.set_event_loop", "logging.getLogger", "time.sleep", "datetime.timedelta", "asyncio.new_event_loop" ]
[((951, 978), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (968, 978), False, 'import logging\n'), ((1004, 1025), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (1013, 1025), False, 'from datetime import timedelta\n'), ((1069, 1114), 'voluptuous.Optional', 'vol.Optional', (['CONF_NAME'], {'default': 'DEFAULT_NAME'}), '(CONF_NAME, default=DEFAULT_NAME)\n', (1081, 1114), True, 'import voluptuous as vol\n'), ((1135, 1172), 'voluptuous.Optional', 'vol.Optional', (['CONF_PREFIX'], {'default': '""""""'}), "(CONF_PREFIX, default='')\n", (1147, 1172), True, 'import voluptuous as vol\n'), ((1193, 1229), 'voluptuous.Optional', 'vol.Optional', (['CONF_URL'], {'default': 'None'}), '(CONF_URL, default=None)\n', (1205, 1229), True, 'import voluptuous as vol\n'), ((1265, 1328), 'voluptuous.Optional', 'vol.Optional', (['CONF_SCAN_INTERVAL'], {'default': 'DEFAULT_SCAN_INTERVAL'}), '(CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL)\n', (1277, 1328), True, 'import voluptuous as vol\n'), ((1354, 1428), 'voluptuous.Optional', 'vol.Optional', (['CONF_MONITORED_CONDITIONS'], {'default': 'MONITORED_CONDITIONS_LIST'}), '(CONF_MONITORED_CONDITIONS, default=MONITORED_CONDITIONS_LIST)\n', (1366, 1428), True, 'import voluptuous as vol\n'), ((1591, 1632), 'voluptuous.Optional', 'vol.Optional', (['CONF_CONTAINERS'], {'default': '[]'}), '(CONF_CONTAINERS, default=[])\n', (1603, 1632), True, 'import voluptuous as vol\n'), ((1658, 1707), 'voluptuous.Optional', 'vol.Optional', (['CONF_CONTAINERS_EXCLUDE'], {'default': '[]'}), '(CONF_CONTAINERS_EXCLUDE, default=[])\n', (1670, 1707), True, 'import voluptuous as vol\n'), ((1733, 1770), 'voluptuous.Optional', 'vol.Optional', (['CONF_RENAME'], {'default': '{}'}), '(CONF_RENAME, default={})\n', (1745, 1770), True, 'import voluptuous as vol\n'), ((1786, 1843), 'voluptuous.Optional', 'vol.Optional', (['CONF_SENSORNAME'], {'default': 'DEFAULT_SENSORNAME'}), '(CONF_SENSORNAME, default=DEFAULT_SENSORNAME)\n', (1798, 1843), True, 'import voluptuous as vol\n'), ((1864, 1910), 'voluptuous.Optional', 'vol.Optional', (['CONF_SWITCHENABLED'], {'default': '(True)'}), '(CONF_SWITCHENABLED, default=True)\n', (1876, 1910), True, 'import voluptuous as vol\n'), ((1932, 1989), 'voluptuous.Optional', 'vol.Optional', (['CONF_SWITCHNAME'], {'default': 'DEFAULT_SWITCHNAME'}), '(CONF_SWITCHNAME, default=DEFAULT_SWITCHNAME)\n', (1944, 1989), True, 'import voluptuous as vol\n'), ((2010, 2049), 'voluptuous.Optional', 'vol.Optional', (['CONF_CERTPATH'], {'default': '""""""'}), "(CONF_CERTPATH, default='')\n", (2022, 2049), True, 'import voluptuous as vol\n'), ((2070, 2117), 'voluptuous.Optional', 'vol.Optional', (['CONF_RETRY'], {'default': 'DEFAULT_RETRY'}), '(CONF_RETRY, default=DEFAULT_RETRY)\n', (2082, 2117), True, 'import voluptuous as vol\n'), ((2144, 2188), 'voluptuous.Optional', 'vol.Optional', (['CONF_MEMORYCHANGE'], {'default': '(100)'}), '(CONF_MEMORYCHANGE, default=100)\n', (2156, 2188), True, 'import voluptuous as vol\n'), ((2215, 2266), 'voluptuous.Optional', 'vol.Optional', (['CONF_PRECISION_CPU'], {'default': 'PRECISION'}), '(CONF_PRECISION_CPU, default=PRECISION)\n', (2227, 2266), True, 'import voluptuous as vol\n'), ((2293, 2350), 'voluptuous.Optional', 'vol.Optional', (['CONF_PRECISION_MEMORY_MB'], {'default': 'PRECISION'}), '(CONF_PRECISION_MEMORY_MB, default=PRECISION)\n', (2305, 2350), True, 'import voluptuous as vol\n'), ((2377, 2442), 'voluptuous.Optional', 'vol.Optional', (['CONF_PRECISION_MEMORY_PERCENTAGE'], {'default': 'PRECISION'}), '(CONF_PRECISION_MEMORY_PERCENTAGE, default=PRECISION)\n', (2389, 2442), True, 'import voluptuous as vol\n'), ((2491, 2549), 'voluptuous.Optional', 'vol.Optional', (['CONF_PRECISION_NETWORK_KB'], {'default': 'PRECISION'}), '(CONF_PRECISION_NETWORK_KB, default=PRECISION)\n', (2503, 2549), True, 'import voluptuous as vol\n'), ((2576, 2634), 'voluptuous.Optional', 'vol.Optional', (['CONF_PRECISION_NETWORK_MB'], {'default': 'PRECISION'}), '(CONF_PRECISION_NETWORK_MB, default=PRECISION)\n', (2588, 2634), True, 'import voluptuous as vol\n'), ((1231, 1255), 'voluptuous.Any', 'vol.Any', (['cv.string', 'None'], {}), '(cv.string, None)\n', (1238, 1255), True, 'import voluptuous as vol\n'), ((3162, 3186), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (3184, 3186), False, 'import asyncio\n'), ((3195, 3223), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['loop'], {}), '(loop)\n', (3217, 3223), False, 'import asyncio\n'), ((5294, 5367), 'threading.Thread', 'threading.Thread', ([], {'target': 'RunDocker', 'kwargs': "{'hass': hass, 'entry': entry}"}), "(target=RunDocker, kwargs={'hass': hass, 'entry': entry})\n", (5310, 5367), False, 'import threading\n'), ((2728, 2750), 'voluptuous.Any', 'vol.Any', (['DOCKER_SCHEMA'], {}), '(DOCKER_SCHEMA)\n', (2735, 2750), True, 'import voluptuous as vol\n'), ((3946, 3975), 'time.sleep', 'time.sleep', (['entry[CONF_RETRY]'], {}), '(entry[CONF_RETRY])\n', (3956, 3975), False, 'import time\n')]
import os def get_base_dir(): return os.path.dirname(__file__) def get_path_to_file_in_assets(*args): return os.path.join(get_base_dir(), 'assets', *args) def get_path_to_image_file_in_assets(*args): return os.path.join(get_base_dir(), 'assets', 'images', *args) def get_path_to_model_dir_in_assets(*args): return os.path.join(get_base_dir(), 'assets', 'models', *args) if __name__ == "__main__": pass
[ "os.path.dirname" ]
[((43, 68), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (58, 68), False, 'import os\n')]
from data_wizard.signals import progress from data_wizard.settings import get_setting, import_from_string def create_backend(): backend_path = get_setting('BACKEND') backend_class = import_from_string(backend_path + '.Backend', 'BACKEND') backend = backend_class() progress.connect(backend.progress, weak=False) return backend
[ "data_wizard.signals.progress.connect", "data_wizard.settings.import_from_string", "data_wizard.settings.get_setting" ]
[((149, 171), 'data_wizard.settings.get_setting', 'get_setting', (['"""BACKEND"""'], {}), "('BACKEND')\n", (160, 171), False, 'from data_wizard.settings import get_setting, import_from_string\n'), ((192, 248), 'data_wizard.settings.import_from_string', 'import_from_string', (["(backend_path + '.Backend')", '"""BACKEND"""'], {}), "(backend_path + '.Backend', 'BACKEND')\n", (210, 248), False, 'from data_wizard.settings import get_setting, import_from_string\n'), ((283, 329), 'data_wizard.signals.progress.connect', 'progress.connect', (['backend.progress'], {'weak': '(False)'}), '(backend.progress, weak=False)\n', (299, 329), False, 'from data_wizard.signals import progress\n')]
import force_connectivity import thinkstats import math def Mean(t): return float(sum(t) / len(t)) def Var(t, mu=None): if mu is None: mu = Mean(t) dev2 = [(x-mu)**2 for x in t] var = Mean(dev2) return var def std(variance): return math.sqrt(variance) def PartitionRecords(table): accepted_sms = force_connectivity.Cases() others = force_connectivity.Cases() for case in table.records: if case.accepted_sms: accepted_sms.AddRecord(case) else: others.AddRecord(case) return accepted_sms, others def Process(table): table.percent_outcomes = [c.percent_outcomes_submitted for c in table.records] table.n = len(table.percent_outcomes) table.mu = Mean(table.percent_outcomes) def MakeTables(data_dir='.'): table = force_connectivity.Cases() table.ReadRecords(data_dir) accepted_sms, others = PartitionRecords(table) return table, accepted_sms, others def ProcessTables(*tables): for table in tables: Process(table) def Summarize(data_dir): table, accepted_sms, others = MakeTables(data_dir) ProcessTables(accepted_sms, others) print('Number accepted sms: %s'% accepted_sms.n) print('Number not accepted sms: %s'% others.n) mu1, mu2 = accepted_sms.mu, others.mu var1, var2 = Var(accepted_sms.percent_outcomes, mu1), Var(others.percent_outcomes, mu2) std1, std2 = std(var1), std(var2) print("mean outcomes submitted") print("accepted sms: %s"% mu1) print("others: %s"% mu2) print('Std Deviation:') print('accepted sms %s'% std1) print('others %s', std2) print('difference in std dev: %s'% (std1-std2)) def main(name, data_dir='.'): Summarize(data_dir) if __name__ == '__main__': import sys main(*sys.argv)
[ "force_connectivity.Cases", "math.sqrt" ]
[((245, 264), 'math.sqrt', 'math.sqrt', (['variance'], {}), '(variance)\n', (254, 264), False, 'import math\n'), ((312, 338), 'force_connectivity.Cases', 'force_connectivity.Cases', ([], {}), '()\n', (336, 338), False, 'import force_connectivity\n'), ((349, 375), 'force_connectivity.Cases', 'force_connectivity.Cases', ([], {}), '()\n', (373, 375), False, 'import force_connectivity\n'), ((746, 772), 'force_connectivity.Cases', 'force_connectivity.Cases', ([], {}), '()\n', (770, 772), False, 'import force_connectivity\n')]
#!/usr/bin/env python3 import sys import re import json class CAliasTable: """ Provides context to alias mapping """ def __init__ (self): self.current_alias = 0 self.context_to_alias = {} self.alias_to_context = {} self.alias_description = {} def base_add (self, key): self.context_to_alias[key] = self.current_alias self.alias_to_context[self.current_alias] = key self.alias_description[self.current_alias] = "unknown" self.current_alias = self.current_alias + 1 def add (self, pointer_string): if pointer_string in self.context_to_alias: raise Exception("Context already in use, cannot alias: %s" % pointer_string) self.base_add(pointer_string) def remove (self, pointer_string): if pointer_string in self.context_to_alias: del self.context_to_alias[pointer_string] else: print("Warning: freeing context without clone/init: %s" % pointer_string) def clone (self, dest_pointer, source_pointer): if dest_pointer in self.context_to_alias: print("Warning: cloning existing context: %s into %s" % (source_pointer, dest_pointer)) self.base_add(dest_pointer) def get_alias (self, context): if context in self.context_to_alias: return self.context_to_alias[context] return None def get_context (self, alias): if alias in self.alias_to_context: return self.alias_to_context[alias] return None def description (self, alias, description=None): if description: self.alias_description[alias] = description elif alias in self.alias_description: return self.alias_description[alias] return None # TODO: Need a "validate payload" function class CParserLibrary: def __init__(self, trace_processor): self.trace_processor = trace_processor self.block_sha = False pass def parse(self, payload): function_name = payload['prim'] function = getattr(self, function_name) # TODO: Handle when there IS NO function! function(payload) # Helper functions to make the code smaller def helper_add_ctx(self, payload): if payload['dir'] != 'enter': return ctx = payload['arg0'] self.trace_processor.aliases.add(ctx) def helper_remove_ctx(self, payload): if payload['dir'] != 'enter': return ctx = payload['arg0'] self.trace_processor.aliases.remove(ctx) # SSL State change probe def mbedtls_ssl_handshake_client_step(self, payload): if payload['dir'] != 'enter': return self.trace_processor.current_state = int(payload['arg0']) # AES ECB Functions def mbedtls_aes_init(self, payload): self.helper_add_ctx(payload) def mbedtls_aes_free(self, payload): self.helper_remove_ctx(payload) def mbedtls_internal_aes_encrypt(self, payload): if payload['dir'] != 'enter': return ctx = payload['arg0'] alias = self.trace_processor.aliases.get_alias(ctx) self.trace_processor.post_event(alias, 16, "aes/E") def mbedtls_internal_aes_decrypt(self, payload): if payload['dir'] != 'enter': return ctx = payload['arg0'] alias = self.trace_processor.aliases.get_alias(ctx) self.trace_processor.post_event(alias, 16, "aes/D") # AES/CCM functions def mbedtls_ccm_init(self, payload): self.helper_add_ctx(payload) def mbedtls_ccm_free(self, payload): self.helper_remove_ctx(payload) def mbedtls_ccm_star_encrypt_and_tag(self, payload): if payload['dir'] != 'enter': return ctx = payload['arg0'] numbytes = int(payload['arg1'], 16) alias = self.trace_processor.aliases.get_alias(ctx) self.trace_processor.post_event(alias, numbytes, "ccm/E") def mbedtls_ccm_star_auth_decrypt(self, payload): if payload['dir'] != 'enter': return ctx = payload['arg0'] numbytes = int(payload['arg1'], 16) alias = self.trace_processor.aliases.get_alias(ctx) self.trace_processor.post_event(alias, numbytes, "ccm/D") # ECDH Functions def mbedtls_ecdh_init(self, payload): self.helper_add_ctx(payload) def mbedtls_ecdh_free(self, payload): self.helper_remove_ctx(payload) def mbedtls_ecdh_calc_secret(self, payload): if payload['dir'] != 'enter': return ctx = payload['arg0'] alias = self.trace_processor.aliases.get_alias(ctx) self.trace_processor.post_event(alias, 1, 'ecdh') # ECDSA functions def mbedtls_ecdsa_init(self, payload): self.helper_add_ctx(payload) def mbedtls_ecdsa_free(self, payload): self.helper_remove_ctx(payload) def mbedtls_ecdsa_write_signature(self, payload): # Ignore all SHAs that occur in read/write ECDSA if payload['dir'] == 'enter': self.block_sha = True ctx = payload['arg0'] alias = self.trace_processor.aliases.get_alias(ctx) self.trace_processor.post_event(alias, 1, 'ecdsa/s') else: self.block_sha = False def mbedtls_ecdsa_write_signature_det(self, payload): # Ignore all SHAs that occur in read/write ECDSA if payload['dir'] == 'enter': self.block_sha = True ctx = payload['arg0'] alias = self.trace_processor.aliases.get_alias(ctx) self.trace_processor.post_event(alias, 1, 'ecdsa/s') else: self.block_sha = False def mbedtls_ecdsa_read_signature(self, payload): # Ignore all SHAs that occur in read/write ECDSA if payload['dir'] == 'enter': self.block_sha = True ctx = payload['arg0'] alias = self.trace_processor.aliases.get_alias(ctx) self.trace_processor.post_event(alias, 1, 'ecdsa/v') else: self.block_sha = False # GCM (WIP) def mbedtls_gcm_init(self, payload): self.helper_add_ctx(payload) def mbedtls_gcm_free(self, payload): self.helper_remove_ctx(payload) # SHA256 def mbedtls_sha256_init(self, payload): self.helper_add_ctx(payload) def mbedtls_sha256_free(self, payload): self.helper_remove_ctx(payload) def mbedtls_sha256_clone(self, payload): if payload['dir'] != 'enter': return src = payload['arg0'] dst = payload['arg1'] self.trace_processor.aliases.clone(src, dst) def mbedtls_sha256_update_ret(self, payload): if payload['dir'] != 'enter': return ctx = payload['arg0'] numbytes = int(payload['arg2'], 16) alias = self.trace_processor.aliases.get_alias(ctx) shortname = "sha" if self.block_sha is True: shortname += '/BLOCK' self.trace_processor.post_event(alias, numbytes, shortname) # MISC # TODO: We may decide to NOT ignore AES/ECB that occurs inside these def block_cipher_df(self, payload): pass def ctr_drbg_update_internal(self, payload): pass def mbedtls_ctr_drbg_random_with_add(self, payload): pass class CTraceProcessor: """ Processes an mbedTLS TRACE file. """ def __init__(self): self.aliases = CAliasTable() self.parsers = CParserLibrary(self) self.current_state = -1 self.scoreboard = {} def process_file(self, file_name): with open(file_name, 'r') as file: for line in file: self.process_line(line.strip()) def process_line(self, text): """ Call the correct parse based on the type of trace """ trace = json.loads(text) #print(trace) self.parsers.parse(trace) def post_event (self, alias, n, tag): """ Add an event to the scoreboard, incrementing its 'n' value. """ self.aliases.description(alias, tag) if alias not in self.scoreboard: self.scoreboard[alias] = {} slot = self.scoreboard[alias] if self.current_state in slot: slot[self.current_state] += n else: slot[self.current_state] = n def main (): if len(sys.argv) < 2: raise Exception("Please specify the input file to process.") trace_processor = CTraceProcessor() trace_processor.process_file(sys.argv[1]) print("% 5s,% 30s,% 15s:," % ("alias", "type", "context"), end="") for i in range (-1, 20): print("% 6d," % i, end="") print("") for alias in sorted(trace_processor.scoreboard): print("%05d,% 30s,% 16s," % ( int(alias), trace_processor.aliases.description(alias), trace_processor.aliases.get_context(alias)), end="") for i in range(-1, 20): if i in trace_processor.scoreboard[alias]: print("% 6s," % str( trace_processor.scoreboard[alias][i]), end="") else: print("% 6s," % " ", end="") print() if __name__ == '__main__': main()
[ "json.loads" ]
[((7909, 7925), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (7919, 7925), False, 'import json\n')]
""" Geopoints - Plot Text Labels at Geopoints Locations """ # (C) Copyright 2017- ECMWF. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. # # In applying this licence, ECMWF does not waive the privileges and immunities # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. # import metview as mv # read geopoints containing 4 city locations filename = "city_loc.gpt" if mv.exist(filename): gpt = mv.read(filename) else: gpt = mv.gallery.load_dataset(filename) # define text labels # - the geopoints have values of 1, 2, 3 and 4 txt_lst = ["London", "New York", "Rio de Janeiro", "Tokyo"] # define text plotting # the first interval is 1(inclusive)->2(exclusive), etc sym_txt = mv.msymb( legend="off", symbol_type = "text", symbol_table_mode = "advanced", symbol_advanced_table_selection_type = "list", symbol_advanced_table_level_list = [1, 2, 3, 4, 5], symbol_advanced_table_text_list = txt_lst, symbol_advanced_table_text_font_size = 1, symbol_advanced_table_text_display_type = "top", symbol_text_blanking = "on", # requires Magics >=4.8.1 ) # define location plotting sym_loc = mv.msymb( legend="off", symbol_type="marker", symbol_colour="coral", symbol_height=0.4, symbol_marker_index=15, ) # define coastlines coast = mv.mcoast( map_coastline_colour="charcoal", map_coastline_thickness=2, map_coastline_land_shade="on", map_coastline_land_shade_colour="grey", map_coastline_sea_shade="on", map_coastline_sea_shade_colour="RGB(0.7475,0.8504,0.9466)", map_grid="off", map_label="off", map_layer_mode="background", ) # define the output plot file mv.setoutput(mv.pdf_output(output_name="text_at_geopoints_locations")) # generate plot mv.plot(coast, gpt, sym_txt, sym_loc)
[ "metview.gallery.load_dataset", "metview.pdf_output", "metview.msymb", "metview.exist", "metview.plot", "metview.read", "metview.mcoast" ]
[((540, 558), 'metview.exist', 'mv.exist', (['filename'], {}), '(filename)\n', (548, 558), True, 'import metview as mv\n'), ((857, 1199), 'metview.msymb', 'mv.msymb', ([], {'legend': '"""off"""', 'symbol_type': '"""text"""', 'symbol_table_mode': '"""advanced"""', 'symbol_advanced_table_selection_type': '"""list"""', 'symbol_advanced_table_level_list': '[1, 2, 3, 4, 5]', 'symbol_advanced_table_text_list': 'txt_lst', 'symbol_advanced_table_text_font_size': '(1)', 'symbol_advanced_table_text_display_type': '"""top"""', 'symbol_text_blanking': '"""on"""'}), "(legend='off', symbol_type='text', symbol_table_mode='advanced',\n symbol_advanced_table_selection_type='list',\n symbol_advanced_table_level_list=[1, 2, 3, 4, 5],\n symbol_advanced_table_text_list=txt_lst,\n symbol_advanced_table_text_font_size=1,\n symbol_advanced_table_text_display_type='top', symbol_text_blanking='on')\n", (865, 1199), True, 'import metview as mv\n'), ((1373, 1487), 'metview.msymb', 'mv.msymb', ([], {'legend': '"""off"""', 'symbol_type': '"""marker"""', 'symbol_colour': '"""coral"""', 'symbol_height': '(0.4)', 'symbol_marker_index': '(15)'}), "(legend='off', symbol_type='marker', symbol_colour='coral',\n symbol_height=0.4, symbol_marker_index=15)\n", (1381, 1487), True, 'import metview as mv\n'), ((1536, 1845), 'metview.mcoast', 'mv.mcoast', ([], {'map_coastline_colour': '"""charcoal"""', 'map_coastline_thickness': '(2)', 'map_coastline_land_shade': '"""on"""', 'map_coastline_land_shade_colour': '"""grey"""', 'map_coastline_sea_shade': '"""on"""', 'map_coastline_sea_shade_colour': '"""RGB(0.7475,0.8504,0.9466)"""', 'map_grid': '"""off"""', 'map_label': '"""off"""', 'map_layer_mode': '"""background"""'}), "(map_coastline_colour='charcoal', map_coastline_thickness=2,\n map_coastline_land_shade='on', map_coastline_land_shade_colour='grey',\n map_coastline_sea_shade='on', map_coastline_sea_shade_colour=\n 'RGB(0.7475,0.8504,0.9466)', map_grid='off', map_label='off',\n map_layer_mode='background')\n", (1545, 1845), True, 'import metview as mv\n'), ((1987, 2024), 'metview.plot', 'mv.plot', (['coast', 'gpt', 'sym_txt', 'sym_loc'], {}), '(coast, gpt, sym_txt, sym_loc)\n', (1994, 2024), True, 'import metview as mv\n'), ((570, 587), 'metview.read', 'mv.read', (['filename'], {}), '(filename)\n', (577, 587), True, 'import metview as mv\n'), ((604, 637), 'metview.gallery.load_dataset', 'mv.gallery.load_dataset', (['filename'], {}), '(filename)\n', (627, 637), True, 'import metview as mv\n'), ((1912, 1968), 'metview.pdf_output', 'mv.pdf_output', ([], {'output_name': '"""text_at_geopoints_locations"""'}), "(output_name='text_at_geopoints_locations')\n", (1925, 1968), True, 'import metview as mv\n')]
from skfda.representation.basis import (Basis, FDataBasis, Constant, Monomial, BSpline, Fourier) from skfda.representation.grid import FDataGrid from skfda import concatenate import unittest import numpy as np class TestBasis(unittest.TestCase): # def setUp(self): could be defined for set up before any test def test_from_data_cholesky(self): t = np.linspace(0, 1, 5) x = np.sin(2 * np.pi * t) + np.cos(2 * np.pi * t) basis = BSpline((0, 1), n_basis=5) np.testing.assert_array_almost_equal( FDataBasis.from_data(x, t, basis, method='cholesky' ).coefficients.round(2), np.array([[1., 2.78, -3., -0.78, 1.]]) ) def test_from_data_qr(self): t = np.linspace(0, 1, 5) x = np.sin(2 * np.pi * t) + np.cos(2 * np.pi * t) basis = BSpline((0, 1), n_basis=5) np.testing.assert_array_almost_equal( FDataBasis.from_data(x, t, basis, method='qr' ).coefficients.round(2), np.array([[1., 2.78, -3., -0.78, 1.]]) ) def test_basis_product_generic(self): monomial = Monomial(n_basis=5) fourier = Fourier(n_basis=3) prod = BSpline(n_basis=9, order=8) self.assertEqual(Basis.default_basis_of_product( monomial, fourier), prod) def test_basis_constant_product(self): constant = Constant() monomial = Monomial() fourier = Fourier() bspline = BSpline(n_basis=5, order=3) self.assertEqual(constant.basis_of_product(monomial), monomial) self.assertEqual(constant.basis_of_product(fourier), fourier) self.assertEqual(constant.basis_of_product(bspline), bspline) self.assertEqual(monomial.basis_of_product(constant), monomial) self.assertEqual(fourier.basis_of_product(constant), fourier) self.assertEqual(bspline.basis_of_product(constant), bspline) def test_basis_fourier_product(self): # Test when periods are the same fourier = Fourier(n_basis=5) fourier2 = Fourier(n_basis=3) prod = Fourier(n_basis=7) self.assertEqual(fourier.basis_of_product(fourier2), prod) # Test when periods are different fourier2 = Fourier(n_basis=3, period=2) prod = BSpline(n_basis=9, order=8) self.assertEqual(fourier.basis_of_product(fourier2), prod) def test_basis_monomial_product(self): monomial = Monomial(n_basis=5) monomial2 = Monomial(n_basis=3) prod = Monomial(n_basis=8) self.assertEqual(monomial.basis_of_product(monomial2), prod) def test_basis_bspline_product(self): bspline = BSpline(n_basis=6, order=4) bspline2 = BSpline(domain_range=(0, 1), n_basis=6, order=4, knots=[0, 0.3, 1 / 3, 1]) prod = BSpline(domain_range=(0, 1), n_basis=10, order=7, knots=[0, 0.3, 1 / 3, 2 / 3, 1]) self.assertEqual(bspline.basis_of_product(bspline2), prod) def test_basis_inner_matrix(self): np.testing.assert_array_almost_equal(Monomial(n_basis=3)._inner_matrix(), [[1, 1 / 2, 1 / 3], [1 / 2, 1 / 3, 1 / 4], [1 / 3, 1 / 4, 1 / 5]]) np.testing.assert_array_almost_equal(Monomial(n_basis=3)._inner_matrix(Monomial(n_basis=3)), [[1, 1 / 2, 1 / 3], [1 / 2, 1 / 3, 1 / 4], [1 / 3, 1 / 4, 1 / 5]]) np.testing.assert_array_almost_equal(Monomial(n_basis=3)._inner_matrix(Monomial(n_basis=4)), [[1, 1 / 2, 1 / 3, 1 / 4], [1 / 2, 1 / 3, 1 / 4, 1 / 5], [1 / 3, 1 / 4, 1 / 5, 1 / 6]]) # TODO testing with other basis def test_basis_gram_matrix(self): np.testing.assert_allclose(Monomial(n_basis=3).gram_matrix(), [[1, 1 / 2, 1 / 3], [1 / 2, 1 / 3, 1 / 4], [1 / 3, 1 / 4, 1 / 5]]) np.testing.assert_allclose(Fourier(n_basis=3).gram_matrix(), np.identity(3)) np.testing.assert_allclose(BSpline(n_basis=6).gram_matrix().round(4), np.array([[4.760e-02, 2.920e-02, 6.200e-03, 4.000e-04, 0.000e+00, 0.000e+00], [2.920e-02, 7.380e-02, 5.210e-02, 1.150e-02, 1.000e-04, 0.000e+00], [6.200e-03, 5.210e-02, 1.089e-01, 7.100e-02, 1.150e-02, 4.000e-04], [4.000e-04, 1.150e-02, 7.100e-02, 1.089e-01, 5.210e-02, 6.200e-03], [0.000e+00, 1.000e-04, 1.150e-02, 5.210e-02, 7.380e-02, 2.920e-02], [0.000e+00, 0.000e+00, 4.000e-04, 6.200e-03, 2.920e-02, 4.760e-02]])) def test_basis_basis_inprod(self): monomial = Monomial(n_basis=4) bspline = BSpline(n_basis=5, order=4) np.testing.assert_array_almost_equal( monomial.inner_product(bspline).round(3), np.array( [[0.12499983, 0.25000035, 0.24999965, 0.25000035, 0.12499983], [0.01249991, 0.07500017, 0.12499983, 0.17500017, 0.11249991], [0.00208338, 0.02916658, 0.07083342, 0.12916658, 0.10208338], [0.00044654, 0.01339264, 0.04375022, 0.09910693, 0.09330368]]) .round(3) ) np.testing.assert_array_almost_equal( monomial.inner_product(bspline), bspline.inner_product(monomial).T ) def test_basis_fdatabasis_inprod(self): monomial = Monomial(n_basis=4) bspline = BSpline(n_basis=5, order=3) bsplinefd = FDataBasis(bspline, np.arange(0, 15).reshape(3, 5)) np.testing.assert_array_almost_equal( monomial.inner_product(bsplinefd).round(3), np.array([[2., 7., 12.], [1.29626206, 3.79626206, 6.29626206], [0.96292873, 2.62959539, 4.29626206], [0.7682873, 2.0182873, 3.2682873]]).round(3) ) def test_fdatabasis_fdatabasis_inprod(self): monomial = Monomial(n_basis=4) monomialfd = FDataBasis(monomial, [[5, 4, 1, 0], [4, 2, 1, 0], [4, 1, 6, 4], [4, 5, 0, 1], [5, 6, 2, 0]]) bspline = BSpline(n_basis=5, order=3) bsplinefd = FDataBasis(bspline, np.arange(0, 15).reshape(3, 5)) np.testing.assert_array_almost_equal( monomialfd.inner_product(bsplinefd).round(3), np.array([[16.14797697, 52.81464364, 89.4813103], [11.55565285, 38.22211951, 64.88878618], [18.14698361, 55.64698361, 93.14698361], [15.2495976, 48.9995976, 82.7495976], [19.70392982, 63.03676315, 106.37009648]]).round(3) ) np.testing.assert_array_almost_equal( monomialfd._inner_product_integrate( bsplinefd, None, None).round(3), np.array([[16.14797697, 52.81464364, 89.4813103], [11.55565285, 38.22211951, 64.88878618], [18.14698361, 55.64698361, 93.14698361], [15.2495976, 48.9995976, 82.7495976], [19.70392982, 63.03676315, 106.37009648]]).round(3) ) def test_comutativity_inprod(self): monomial = Monomial(n_basis=4) bspline = BSpline(n_basis=5, order=3) bsplinefd = FDataBasis(bspline, np.arange(0, 15).reshape(3, 5)) np.testing.assert_array_almost_equal( bsplinefd.inner_product(monomial).round(3), np.transpose(monomial.inner_product(bsplinefd).round(3)) ) def test_fdatabasis_times_fdatabasis_fdatabasis(self): monomial = FDataBasis(Monomial(n_basis=3), [1, 2, 3]) bspline = FDataBasis(BSpline(n_basis=6, order=4), [1, 2, 4, 1, 0, 1]) times_fdar = monomial.times(bspline) prod_basis = BSpline(n_basis=9, order=6, knots=[0, 0.25, 0.5, 0.75, 1]) prod_coefs = np.array([[0.9788352, 1.6289955, 2.7004969, 6.2678739, 8.7636441, 4.0069960, 0.7126961, 2.8826708, 6.0052311]]) self.assertEqual(prod_basis, times_fdar.basis) np.testing.assert_array_almost_equal( prod_coefs, times_fdar.coefficients) def test_fdatabasis_times_fdatabasis_list(self): monomial = FDataBasis(Monomial(n_basis=3), [[1, 2, 3], [4, 5, 6], [7, 8, 9]]) result = monomial.times([3, 2, 1]) expec_basis = Monomial(n_basis=3) expec_coefs = np.array([[3, 6, 9], [8, 10, 12], [7, 8, 9]]) self.assertEqual(expec_basis, result.basis) np.testing.assert_array_almost_equal(expec_coefs, result.coefficients) def test_fdatabasis_times_fdatabasis_int(self): monomial = FDataBasis(Monomial(n_basis=3), [[1, 2, 3], [4, 5, 6], [7, 8, 9]]) result = monomial.times(3) expec_basis = Monomial(n_basis=3) expec_coefs = np.array([[3, 6, 9], [12, 15, 18], [21, 24, 27]]) self.assertEqual(expec_basis, result.basis) np.testing.assert_array_almost_equal(expec_coefs, result.coefficients) def test_fdatabasis__add__(self): monomial1 = FDataBasis(Monomial(n_basis=3), [1, 2, 3]) monomial2 = FDataBasis(Monomial(n_basis=3), [[1, 2, 3], [3, 4, 5]]) np.testing.assert_equal(monomial1 + monomial2, FDataBasis(Monomial(n_basis=3), [[2, 4, 6], [4, 6, 8]])) np.testing.assert_equal(monomial2 + 1, FDataBasis(Monomial(n_basis=3), [[2, 2, 3], [4, 4, 5]])) np.testing.assert_equal(1 + monomial2, FDataBasis(Monomial(n_basis=3), [[2, 2, 3], [4, 4, 5]])) np.testing.assert_equal(monomial2 + [1, 2], FDataBasis(Monomial(n_basis=3), [[2, 2, 3], [5, 4, 5]])) np.testing.assert_equal([1, 2] + monomial2, FDataBasis(Monomial(n_basis=3), [[2, 2, 3], [5, 4, 5]])) np.testing.assert_raises(NotImplementedError, monomial2.__add__, FDataBasis(Fourier(n_basis=3), [[2, 2, 3], [5, 4, 5]])) def test_fdatabasis__sub__(self): monomial1 = FDataBasis(Monomial(n_basis=3), [1, 2, 3]) monomial2 = FDataBasis(Monomial(n_basis=3), [[1, 2, 3], [3, 4, 5]]) np.testing.assert_equal(monomial1 - monomial2, FDataBasis(Monomial(n_basis=3), [[0, 0, 0], [-2, -2, -2]])) np.testing.assert_equal(monomial2 - 1, FDataBasis(Monomial(n_basis=3), [[0, 2, 3], [2, 4, 5]])) np.testing.assert_equal(1 - monomial2, FDataBasis(Monomial(n_basis=3), [[0, -2, -3], [-2, -4, -5]])) np.testing.assert_equal(monomial2 - [1, 2], FDataBasis(Monomial(n_basis=3), [[0, 2, 3], [1, 4, 5]])) np.testing.assert_equal([1, 2] - monomial2, FDataBasis(Monomial(n_basis=3), [[0, -2, -3], [-1, -4, -5]])) np.testing.assert_raises(NotImplementedError, monomial2.__sub__, FDataBasis(Fourier(n_basis=3), [[2, 2, 3], [5, 4, 5]])) def test_fdatabasis__mul__(self): monomial1 = FDataBasis(Monomial(n_basis=3), [1, 2, 3]) monomial2 = FDataBasis(Monomial(n_basis=3), [[1, 2, 3], [3, 4, 5]]) np.testing.assert_equal(monomial1 * 2, FDataBasis(Monomial(n_basis=3), [[2, 4, 6]])) np.testing.assert_equal(3 * monomial2, FDataBasis(Monomial(n_basis=3), [[3, 6, 9], [9, 12, 15]])) np.testing.assert_equal(3 * monomial2, monomial2 * 3) np.testing.assert_equal(monomial2 * [1, 2], FDataBasis(Monomial(n_basis=3), [[1, 2, 3], [6, 8, 10]])) np.testing.assert_equal([1, 2] * monomial2, FDataBasis(Monomial(n_basis=3), [[1, 2, 3], [6, 8, 10]])) np.testing.assert_raises(NotImplementedError, monomial2.__mul__, FDataBasis(Fourier(n_basis=3), [[2, 2, 3], [5, 4, 5]])) np.testing.assert_raises(NotImplementedError, monomial2.__mul__, monomial2) def test_fdatabasis__mul__2(self): monomial1 = FDataBasis(Monomial(n_basis=3), [1, 2, 3]) monomial2 = FDataBasis(Monomial(n_basis=3), [[1, 2, 3], [3, 4, 5]]) np.testing.assert_equal(monomial1 / 2, FDataBasis(Monomial(n_basis=3), [[1 / 2, 1, 3 / 2]])) np.testing.assert_equal(monomial2 / 2, FDataBasis(Monomial(n_basis=3), [[1 / 2, 1, 3 / 2], [3 / 2, 2, 5 / 2]])) np.testing.assert_equal(monomial2 / [1, 2], FDataBasis(Monomial(n_basis=3), [[1, 2, 3], [3 / 2, 2, 5 / 2]])) def test_fdatabasis_derivative_constant(self): monomial = FDataBasis(Monomial(n_basis=8), [1, 5, 8, 9, 7, 8, 4, 5]) monomial2 = FDataBasis(Monomial(n_basis=5), [[4, 9, 7, 4, 3], [1, 7, 9, 8, 5], [4, 6, 6, 6, 8]]) np.testing.assert_equal(monomial.derivative(), FDataBasis(Monomial(n_basis=7), [5, 16, 27, 28, 40, 24, 35])) np.testing.assert_equal(monomial.derivative(order=0), monomial) np.testing.assert_equal(monomial.derivative(order=6), FDataBasis(Monomial(n_basis=2), [2880, 25200])) np.testing.assert_equal(monomial2.derivative(), FDataBasis(Monomial(n_basis=4), [[9, 14, 12, 12], [7, 18, 24, 20], [6, 12, 18, 32]])) np.testing.assert_equal(monomial2.derivative(order=0), monomial2) np.testing.assert_equal(monomial2.derivative(order=3), FDataBasis(Monomial(n_basis=2), [[24, 72], [48, 120], [36, 192]])) def test_fdatabasis_derivative_monomial(self): monomial = FDataBasis(Monomial(n_basis=8), [1, 5, 8, 9, 7, 8, 4, 5]) monomial2 = FDataBasis(Monomial(n_basis=5), [[4, 9, 7, 4, 3], [1, 7, 9, 8, 5], [4, 6, 6, 6, 8]]) np.testing.assert_equal(monomial.derivative(), FDataBasis(Monomial(n_basis=7), [5, 16, 27, 28, 40, 24, 35])) np.testing.assert_equal(monomial.derivative(order=0), monomial) np.testing.assert_equal(monomial.derivative(order=6), FDataBasis(Monomial(n_basis=2), [2880, 25200])) np.testing.assert_equal(monomial2.derivative(), FDataBasis(Monomial(n_basis=4), [[9, 14, 12, 12], [7, 18, 24, 20], [6, 12, 18, 32]])) np.testing.assert_equal(monomial2.derivative(order=0), monomial2) np.testing.assert_equal(monomial2.derivative(order=3), FDataBasis(Monomial(n_basis=2), [[24, 72], [48, 120], [36, 192]])) def test_fdatabasis_derivative_fourier(self): fourier = FDataBasis(Fourier(n_basis=7), [1, 5, 8, 9, 8, 4, 5]) fourier2 = FDataBasis(Fourier(n_basis=5), [[4, 9, 7, 4, 3], [1, 7, 9, 8, 5], [4, 6, 6, 6, 8]]) fou0 = fourier.derivative(order=0) fou1 = fourier.derivative() fou2 = fourier.derivative(order=2) np.testing.assert_equal(fou1.basis, fourier.basis) np.testing.assert_almost_equal(fou1.coefficients.round(5), np.atleast_2d([0, -50.26548, 31.41593, -100.53096, 113.09734, -94.24778, 75.39822])) np.testing.assert_equal(fou0, fourier) np.testing.assert_equal(fou2.basis, fourier.basis) np.testing.assert_almost_equal(fou2.coefficients.round(5), np.atleast_2d([0, -197.39209, -315.82734, -1421.22303, -1263.30936, -1421.22303, -1776.52879])) fou0 = fourier2.derivative(order=0) fou1 = fourier2.derivative() fou2 = fourier2.derivative(order=2) np.testing.assert_equal(fou1.basis, fourier2.basis) np.testing.assert_almost_equal(fou1.coefficients.round(5), [[0, -43.98230, 56.54867, -37.69911, 50.26548], [0, -56.54867, 43.98230, - 62.83185, 100.53096], [0, -37.69911, 37.69911, -100.53096, 75.39822]]) np.testing.assert_equal(fou0, fourier2) np.testing.assert_equal(fou2.basis, fourier2.basis) np.testing.assert_almost_equal(fou2.coefficients.round(5), [[0, -355.30576, -276.34892, -631.65468, -473.74101], [0, -276.34892, -355.30576, - 1263.30936, -789.56835], [0, -236.87051, -236.87051, -947.48202, -1263.30936]]) def test_fdatabasis_derivative_bspline(self): bspline = FDataBasis(BSpline(n_basis=8), [1, 5, 8, 9, 7, 8, 4, 5]) bspline2 = FDataBasis(BSpline(n_basis=5), [[4, 9, 7, 4, 3], [1, 7, 9, 8, 5], [4, 6, 6, 6, 8]]) bs0 = bspline.derivative(order=0) bs1 = bspline.derivative() bs2 = bspline.derivative(order=2) np.testing.assert_equal(bs1.basis, BSpline(n_basis=7, order=3)) np.testing.assert_almost_equal(bs1.coefficients, np.atleast_2d([60, 22.5, 5, -10, 5, -30, 15])) np.testing.assert_equal(bs0, bspline) np.testing.assert_equal(bs2.basis, BSpline(n_basis=6, order=2)) np.testing.assert_almost_equal(bs2.coefficients, np.atleast_2d([-375, -87.5, -75, 75, -175, 450])) bs0 = bspline2.derivative(order=0) bs1 = bspline2.derivative() bs2 = bspline2.derivative(order=2) np.testing.assert_equal(bs1.basis, BSpline(n_basis=4, order=3)) np.testing.assert_almost_equal(bs1.coefficients, [[30, -6, -9, -6], [36, 6, -3, -18], [12, 0, 0, 12]]) np.testing.assert_equal(bs0, bspline2) np.testing.assert_equal(bs2.basis, BSpline(n_basis=3, order=2)) np.testing.assert_almost_equal(bs2.coefficients, [[-144, -6, 12], [-120, -18, -60], [-48, 0, 48]]) def test_concatenate(self): sample1 = np.arange(0, 10) sample2 = np.arange(10, 20) fd1 = FDataGrid([sample1]).to_basis(Fourier(n_basis=5)) fd2 = FDataGrid([sample2]).to_basis(Fourier(n_basis=5)) fd = concatenate([fd1, fd2]) np.testing.assert_equal(fd.n_samples, 2) np.testing.assert_equal(fd.dim_codomain, 1) np.testing.assert_equal(fd.dim_domain, 1) np.testing.assert_array_equal(fd.coefficients, np.concatenate( [fd1.coefficients, fd2.coefficients])) if __name__ == '__main__': print() unittest.main()
[ "numpy.sin", "numpy.arange", "numpy.testing.assert_array_almost_equal", "numpy.atleast_2d", "unittest.main", "skfda.representation.basis.BSpline", "numpy.testing.assert_almost_equal", "numpy.identity", "numpy.linspace", "numpy.testing.assert_equal", "numpy.testing.assert_raises", "skfda.representation.basis.Fourier", "skfda.representation.basis.Constant", "skfda.concatenate", "numpy.cos", "skfda.representation.basis.Basis.default_basis_of_product", "numpy.concatenate", "skfda.representation.basis.FDataBasis.from_data", "skfda.representation.basis.Monomial", "numpy.array", "skfda.representation.grid.FDataGrid", "skfda.representation.basis.FDataBasis" ]
[((22203, 22218), 'unittest.main', 'unittest.main', ([], {}), '()\n', (22216, 22218), False, 'import unittest\n'), ((409, 429), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (420, 429), True, 'import numpy as np\n'), ((504, 530), 'skfda.representation.basis.BSpline', 'BSpline', (['(0, 1)'], {'n_basis': '(5)'}), '((0, 1), n_basis=5)\n', (511, 530), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((806, 826), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (817, 826), True, 'import numpy as np\n'), ((901, 927), 'skfda.representation.basis.BSpline', 'BSpline', (['(0, 1)'], {'n_basis': '(5)'}), '((0, 1), n_basis=5)\n', (908, 927), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((1213, 1232), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(5)'}), '(n_basis=5)\n', (1221, 1232), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((1251, 1269), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (1258, 1269), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((1285, 1312), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(9)', 'order': '(8)'}), '(n_basis=9, order=8)\n', (1292, 1312), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((1471, 1481), 'skfda.representation.basis.Constant', 'Constant', ([], {}), '()\n', (1479, 1481), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((1501, 1511), 'skfda.representation.basis.Monomial', 'Monomial', ([], {}), '()\n', (1509, 1511), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((1530, 1539), 'skfda.representation.basis.Fourier', 'Fourier', ([], {}), '()\n', (1537, 1539), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((1558, 1585), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(5)', 'order': '(3)'}), '(n_basis=5, order=3)\n', (1565, 1585), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2112, 2130), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(5)'}), '(n_basis=5)\n', (2119, 2130), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2150, 2168), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (2157, 2168), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2184, 2202), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(7)'}), '(n_basis=7)\n', (2191, 2202), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2332, 2360), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(3)', 'period': '(2)'}), '(n_basis=3, period=2)\n', (2339, 2360), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2376, 2403), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(9)', 'order': '(8)'}), '(n_basis=9, order=8)\n', (2383, 2403), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2534, 2553), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(5)'}), '(n_basis=5)\n', (2542, 2553), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2574, 2593), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (2582, 2593), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2609, 2628), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(8)'}), '(n_basis=8)\n', (2617, 2628), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2759, 2786), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(6)', 'order': '(4)'}), '(n_basis=6, order=4)\n', (2766, 2786), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2806, 2880), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'domain_range': '(0, 1)', 'n_basis': '(6)', 'order': '(4)', 'knots': '[0, 0.3, 1 / 3, 1]'}), '(domain_range=(0, 1), n_basis=6, order=4, knots=[0, 0.3, 1 / 3, 1])\n', (2813, 2880), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((2923, 3009), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'domain_range': '(0, 1)', 'n_basis': '(10)', 'order': '(7)', 'knots': '[0, 0.3, 1 / 3, 2 / 3, 1]'}), '(domain_range=(0, 1), n_basis=10, order=7, knots=[0, 0.3, 1 / 3, 2 /\n 3, 1])\n', (2930, 3009), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((5244, 5263), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(4)'}), '(n_basis=4)\n', (5252, 5263), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((5282, 5309), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(5)', 'order': '(4)'}), '(n_basis=5, order=4)\n', (5289, 5309), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((5992, 6011), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(4)'}), '(n_basis=4)\n', (6000, 6011), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((6030, 6057), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(5)', 'order': '(3)'}), '(n_basis=5, order=3)\n', (6037, 6057), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((6536, 6555), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(4)'}), '(n_basis=4)\n', (6544, 6555), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((6577, 6674), 'skfda.representation.basis.FDataBasis', 'FDataBasis', (['monomial', '[[5, 4, 1, 0], [4, 2, 1, 0], [4, 1, 6, 4], [4, 5, 0, 1], [5, 6, 2, 0]]'], {}), '(monomial, [[5, 4, 1, 0], [4, 2, 1, 0], [4, 1, 6, 4], [4, 5, 0, 1\n ], [5, 6, 2, 0]])\n', (6587, 6674), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((6860, 6887), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(5)', 'order': '(3)'}), '(n_basis=5, order=3)\n', (6867, 6887), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((7934, 7953), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(4)'}), '(n_basis=4)\n', (7942, 7953), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((7972, 7999), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(5)', 'order': '(3)'}), '(n_basis=5, order=3)\n', (7979, 7999), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((8521, 8579), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(9)', 'order': '(6)', 'knots': '[0, 0.25, 0.5, 0.75, 1]'}), '(n_basis=9, order=6, knots=[0, 0.25, 0.5, 0.75, 1])\n', (8528, 8579), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((8601, 8715), 'numpy.array', 'np.array', (['[[0.9788352, 1.6289955, 2.7004969, 6.2678739, 8.7636441, 4.006996, \n 0.7126961, 2.8826708, 6.0052311]]'], {}), '([[0.9788352, 1.6289955, 2.7004969, 6.2678739, 8.7636441, 4.006996,\n 0.7126961, 2.8826708, 6.0052311]])\n', (8609, 8715), True, 'import numpy as np\n'), ((8847, 8920), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['prod_coefs', 'times_fdar.coefficients'], {}), '(prod_coefs, times_fdar.coefficients)\n', (8883, 8920), True, 'import numpy as np\n'), ((9170, 9189), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (9178, 9189), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((9212, 9257), 'numpy.array', 'np.array', (['[[3, 6, 9], [8, 10, 12], [7, 8, 9]]'], {}), '([[3, 6, 9], [8, 10, 12], [7, 8, 9]])\n', (9220, 9257), True, 'import numpy as np\n'), ((9319, 9389), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expec_coefs', 'result.coefficients'], {}), '(expec_coefs, result.coefficients)\n', (9355, 9389), True, 'import numpy as np\n'), ((9617, 9636), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (9625, 9636), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((9659, 9708), 'numpy.array', 'np.array', (['[[3, 6, 9], [12, 15, 18], [21, 24, 27]]'], {}), '([[3, 6, 9], [12, 15, 18], [21, 24, 27]])\n', (9667, 9708), True, 'import numpy as np\n'), ((9770, 9840), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['expec_coefs', 'result.coefficients'], {}), '(expec_coefs, result.coefficients)\n', (9806, 9840), True, 'import numpy as np\n'), ((12988, 13041), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['(3 * monomial2)', '(monomial2 * 3)'], {}), '(3 * monomial2, monomial2 * 3)\n', (13011, 13041), True, 'import numpy as np\n'), ((13660, 13735), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['NotImplementedError', 'monomial2.__mul__', 'monomial2'], {}), '(NotImplementedError, monomial2.__mul__, monomial2)\n', (13684, 13735), True, 'import numpy as np\n'), ((17956, 18006), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['fou1.basis', 'fourier.basis'], {}), '(fou1.basis, fourier.basis)\n', (17979, 18006), True, 'import numpy as np\n'), ((18314, 18352), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['fou0', 'fourier'], {}), '(fou0, fourier)\n', (18337, 18352), True, 'import numpy as np\n'), ((18361, 18411), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['fou2.basis', 'fourier.basis'], {}), '(fou2.basis, fourier.basis)\n', (18384, 18411), True, 'import numpy as np\n'), ((18857, 18908), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['fou1.basis', 'fourier2.basis'], {}), '(fou1.basis, fourier2.basis)\n', (18880, 18908), True, 'import numpy as np\n'), ((19293, 19332), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['fou0', 'fourier2'], {}), '(fou0, fourier2)\n', (19316, 19332), True, 'import numpy as np\n'), ((19341, 19392), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['fou2.basis', 'fourier2.basis'], {}), '(fou2.basis, fourier2.basis)\n', (19364, 19392), True, 'import numpy as np\n'), ((20534, 20571), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['bs0', 'bspline'], {}), '(bs0, bspline)\n', (20557, 20571), True, 'import numpy as np\n'), ((21048, 21154), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['bs1.coefficients', '[[30, -6, -9, -6], [36, 6, -3, -18], [12, 0, 0, 12]]'], {}), '(bs1.coefficients, [[30, -6, -9, -6], [36, 6,\n -3, -18], [12, 0, 0, 12]])\n', (21078, 21154), True, 'import numpy as np\n'), ((21278, 21316), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['bs0', 'bspline2'], {}), '(bs0, bspline2)\n', (21301, 21316), True, 'import numpy as np\n'), ((21397, 21500), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['bs2.coefficients', '[[-144, -6, 12], [-120, -18, -60], [-48, 0, 48]]'], {}), '(bs2.coefficients, [[-144, -6, 12], [-120, -\n 18, -60], [-48, 0, 48]])\n', (21427, 21500), True, 'import numpy as np\n'), ((21666, 21682), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (21675, 21682), True, 'import numpy as np\n'), ((21701, 21718), 'numpy.arange', 'np.arange', (['(10)', '(20)'], {}), '(10, 20)\n', (21710, 21718), True, 'import numpy as np\n'), ((21861, 21884), 'skfda.concatenate', 'concatenate', (['[fd1, fd2]'], {}), '([fd1, fd2])\n', (21872, 21884), False, 'from skfda import concatenate\n'), ((21894, 21934), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['fd.n_samples', '(2)'], {}), '(fd.n_samples, 2)\n', (21917, 21934), True, 'import numpy as np\n'), ((21943, 21986), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['fd.dim_codomain', '(1)'], {}), '(fd.dim_codomain, 1)\n', (21966, 21986), True, 'import numpy as np\n'), ((21995, 22036), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['fd.dim_domain', '(1)'], {}), '(fd.dim_domain, 1)\n', (22018, 22036), True, 'import numpy as np\n'), ((442, 463), 'numpy.sin', 'np.sin', (['(2 * np.pi * t)'], {}), '(2 * np.pi * t)\n', (448, 463), True, 'import numpy as np\n'), ((466, 487), 'numpy.cos', 'np.cos', (['(2 * np.pi * t)'], {}), '(2 * np.pi * t)\n', (472, 487), True, 'import numpy as np\n'), ((711, 752), 'numpy.array', 'np.array', (['[[1.0, 2.78, -3.0, -0.78, 1.0]]'], {}), '([[1.0, 2.78, -3.0, -0.78, 1.0]])\n', (719, 752), True, 'import numpy as np\n'), ((839, 860), 'numpy.sin', 'np.sin', (['(2 * np.pi * t)'], {}), '(2 * np.pi * t)\n', (845, 860), True, 'import numpy as np\n'), ((863, 884), 'numpy.cos', 'np.cos', (['(2 * np.pi * t)'], {}), '(2 * np.pi * t)\n', (869, 884), True, 'import numpy as np\n'), ((1102, 1143), 'numpy.array', 'np.array', (['[[1.0, 2.78, -3.0, -0.78, 1.0]]'], {}), '([[1.0, 2.78, -3.0, -0.78, 1.0]])\n', (1110, 1143), True, 'import numpy as np\n'), ((1338, 1387), 'skfda.representation.basis.Basis.default_basis_of_product', 'Basis.default_basis_of_product', (['monomial', 'fourier'], {}), '(monomial, fourier)\n', (1368, 1387), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((4135, 4149), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (4146, 4149), True, 'import numpy as np\n'), ((4264, 4573), 'numpy.array', 'np.array', (['[[0.0476, 0.0292, 0.0062, 0.0004, 0.0, 0.0], [0.0292, 0.0738, 0.0521, \n 0.0115, 0.0001, 0.0], [0.0062, 0.0521, 0.1089, 0.071, 0.0115, 0.0004],\n [0.0004, 0.0115, 0.071, 0.1089, 0.0521, 0.0062], [0.0, 0.0001, 0.0115, \n 0.0521, 0.0738, 0.0292], [0.0, 0.0, 0.0004, 0.0062, 0.0292, 0.0476]]'], {}), '([[0.0476, 0.0292, 0.0062, 0.0004, 0.0, 0.0], [0.0292, 0.0738, \n 0.0521, 0.0115, 0.0001, 0.0], [0.0062, 0.0521, 0.1089, 0.071, 0.0115, \n 0.0004], [0.0004, 0.0115, 0.071, 0.1089, 0.0521, 0.0062], [0.0, 0.0001,\n 0.0115, 0.0521, 0.0738, 0.0292], [0.0, 0.0, 0.0004, 0.0062, 0.0292, \n 0.0476]])\n', (4272, 4573), True, 'import numpy as np\n'), ((8344, 8363), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (8352, 8363), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((8405, 8432), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(6)', 'order': '(4)'}), '(n_basis=6, order=4)\n', (8412, 8432), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((9018, 9037), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (9026, 9037), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((9473, 9492), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (9481, 9492), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((9911, 9930), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (9919, 9930), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((9974, 9993), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (9982, 9993), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((11210, 11229), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (11218, 11229), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((11273, 11292), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (11281, 11292), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((12522, 12541), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (12530, 12541), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((12585, 12604), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (12593, 12604), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((13840, 13859), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (13848, 13859), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((13903, 13922), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (13911, 13922), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((14595, 14614), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(8)'}), '(n_basis=8)\n', (14603, 14614), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((14703, 14722), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(5)'}), '(n_basis=5)\n', (14711, 14722), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((16077, 16096), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(8)'}), '(n_basis=8)\n', (16085, 16096), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((16185, 16204), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(5)'}), '(n_basis=5)\n', (16193, 16204), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((17557, 17575), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(7)'}), '(n_basis=7)\n', (17564, 17575), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((17659, 17677), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(5)'}), '(n_basis=5)\n', (17666, 17677), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((18113, 18201), 'numpy.atleast_2d', 'np.atleast_2d', (['[0, -50.26548, 31.41593, -100.53096, 113.09734, -94.24778, 75.39822]'], {}), '([0, -50.26548, 31.41593, -100.53096, 113.09734, -94.24778, \n 75.39822])\n', (18126, 18201), True, 'import numpy as np\n'), ((18518, 18617), 'numpy.atleast_2d', 'np.atleast_2d', (['[0, -197.39209, -315.82734, -1421.22303, -1263.30936, -1421.22303, -1776.52879]'], {}), '([0, -197.39209, -315.82734, -1421.22303, -1263.30936, -\n 1421.22303, -1776.52879])\n', (18531, 18617), True, 'import numpy as np\n'), ((19867, 19885), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(8)'}), '(n_basis=8)\n', (19874, 19885), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((19972, 19990), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(5)'}), '(n_basis=5)\n', (19979, 19990), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((20300, 20327), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(7)', 'order': '(3)'}), '(n_basis=7, order=3)\n', (20307, 20327), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((20425, 20470), 'numpy.atleast_2d', 'np.atleast_2d', (['[60, 22.5, 5, -10, 5, -30, 15]'], {}), '([60, 22.5, 5, -10, 5, -30, 15])\n', (20438, 20470), True, 'import numpy as np\n'), ((20615, 20642), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(6)', 'order': '(2)'}), '(n_basis=6, order=2)\n', (20622, 20642), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((20740, 20788), 'numpy.atleast_2d', 'np.atleast_2d', (['[-375, -87.5, -75, 75, -175, 450]'], {}), '([-375, -87.5, -75, 75, -175, 450])\n', (20753, 20788), True, 'import numpy as np\n'), ((21011, 21038), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(4)', 'order': '(3)'}), '(n_basis=4, order=3)\n', (21018, 21038), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((21360, 21387), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(3)', 'order': '(2)'}), '(n_basis=3, order=2)\n', (21367, 21387), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((21763, 21781), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(5)'}), '(n_basis=5)\n', (21770, 21781), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((21827, 21845), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(5)'}), '(n_basis=5)\n', (21834, 21845), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((22092, 22144), 'numpy.concatenate', 'np.concatenate', (['[fd1.coefficients, fd2.coefficients]'], {}), '([fd1.coefficients, fd2.coefficients])\n', (22106, 22144), True, 'import numpy as np\n'), ((3410, 3429), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (3418, 3429), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((3624, 3643), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(4)'}), '(n_basis=4)\n', (3632, 3643), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((10118, 10137), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (10126, 10137), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((10297, 10316), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (10305, 10316), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((10476, 10495), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (10484, 10495), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((10660, 10679), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (10668, 10679), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((10844, 10863), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (10852, 10863), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((11051, 11069), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (11058, 11069), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((11417, 11436), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (11425, 11436), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((11599, 11618), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (11607, 11618), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((11778, 11797), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (11786, 11797), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((11967, 11986), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (11975, 11986), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((12151, 12170), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (12159, 12170), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((12363, 12381), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (12370, 12381), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((12721, 12740), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (12729, 12740), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((12889, 12908), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (12897, 12908), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((13170, 13189), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (13178, 13189), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((13355, 13374), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (13363, 13374), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((13563, 13581), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (13570, 13581), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((14039, 14058), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (14047, 14058), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((14215, 14234), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (14223, 14234), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((14416, 14435), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (14424, 14435), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((14971, 14990), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(7)'}), '(n_basis=7)\n', (14979, 14990), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((15242, 15261), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(2)'}), '(n_basis=2)\n', (15250, 15261), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((15422, 15441), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(4)'}), '(n_basis=4)\n', (15430, 15441), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((15808, 15827), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(2)'}), '(n_basis=2)\n', (15816, 15827), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((16453, 16472), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(7)'}), '(n_basis=7)\n', (16461, 16472), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((16724, 16743), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(2)'}), '(n_basis=2)\n', (16732, 16743), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((16904, 16923), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(4)'}), '(n_basis=4)\n', (16912, 16923), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((17290, 17309), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(2)'}), '(n_basis=2)\n', (17298, 17309), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((21733, 21753), 'skfda.representation.grid.FDataGrid', 'FDataGrid', (['[sample1]'], {}), '([sample1])\n', (21742, 21753), False, 'from skfda.representation.grid import FDataGrid\n'), ((21797, 21817), 'skfda.representation.grid.FDataGrid', 'FDataGrid', (['[sample2]'], {}), '([sample2])\n', (21806, 21817), False, 'from skfda.representation.grid import FDataGrid\n'), ((3181, 3200), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (3189, 3200), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((3376, 3395), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (3384, 3395), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((3590, 3609), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (3598, 3609), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((3894, 3913), 'skfda.representation.basis.Monomial', 'Monomial', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (3902, 3913), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((4066, 4084), 'skfda.representation.basis.Fourier', 'Fourier', ([], {'n_basis': '(3)'}), '(n_basis=3)\n', (4073, 4084), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((5422, 5695), 'numpy.array', 'np.array', (['[[0.12499983, 0.25000035, 0.24999965, 0.25000035, 0.12499983], [0.01249991,\n 0.07500017, 0.12499983, 0.17500017, 0.11249991], [0.00208338, \n 0.02916658, 0.07083342, 0.12916658, 0.10208338], [0.00044654, \n 0.01339264, 0.04375022, 0.09910693, 0.09330368]]'], {}), '([[0.12499983, 0.25000035, 0.24999965, 0.25000035, 0.12499983], [\n 0.01249991, 0.07500017, 0.12499983, 0.17500017, 0.11249991], [\n 0.00208338, 0.02916658, 0.07083342, 0.12916658, 0.10208338], [\n 0.00044654, 0.01339264, 0.04375022, 0.09910693, 0.09330368]])\n', (5430, 5695), True, 'import numpy as np\n'), ((6098, 6114), 'numpy.arange', 'np.arange', (['(0)', '(15)'], {}), '(0, 15)\n', (6107, 6114), True, 'import numpy as np\n'), ((6245, 6389), 'numpy.array', 'np.array', (['[[2.0, 7.0, 12.0], [1.29626206, 3.79626206, 6.29626206], [0.96292873, \n 2.62959539, 4.29626206], [0.7682873, 2.0182873, 3.2682873]]'], {}), '([[2.0, 7.0, 12.0], [1.29626206, 3.79626206, 6.29626206], [\n 0.96292873, 2.62959539, 4.29626206], [0.7682873, 2.0182873, 3.2682873]])\n', (6253, 6389), True, 'import numpy as np\n'), ((6928, 6944), 'numpy.arange', 'np.arange', (['(0)', '(15)'], {}), '(0, 15)\n', (6937, 6944), True, 'import numpy as np\n'), ((7077, 7298), 'numpy.array', 'np.array', (['[[16.14797697, 52.81464364, 89.4813103], [11.55565285, 38.22211951, \n 64.88878618], [18.14698361, 55.64698361, 93.14698361], [15.2495976, \n 48.9995976, 82.7495976], [19.70392982, 63.03676315, 106.37009648]]'], {}), '([[16.14797697, 52.81464364, 89.4813103], [11.55565285, 38.22211951,\n 64.88878618], [18.14698361, 55.64698361, 93.14698361], [15.2495976, \n 48.9995976, 82.7495976], [19.70392982, 63.03676315, 106.37009648]])\n', (7085, 7298), True, 'import numpy as np\n'), ((7554, 7775), 'numpy.array', 'np.array', (['[[16.14797697, 52.81464364, 89.4813103], [11.55565285, 38.22211951, \n 64.88878618], [18.14698361, 55.64698361, 93.14698361], [15.2495976, \n 48.9995976, 82.7495976], [19.70392982, 63.03676315, 106.37009648]]'], {}), '([[16.14797697, 52.81464364, 89.4813103], [11.55565285, 38.22211951,\n 64.88878618], [18.14698361, 55.64698361, 93.14698361], [15.2495976, \n 48.9995976, 82.7495976], [19.70392982, 63.03676315, 106.37009648]])\n', (7562, 7775), True, 'import numpy as np\n'), ((8040, 8056), 'numpy.arange', 'np.arange', (['(0)', '(15)'], {}), '(0, 15)\n', (8049, 8056), True, 'import numpy as np\n'), ((589, 641), 'skfda.representation.basis.FDataBasis.from_data', 'FDataBasis.from_data', (['x', 't', 'basis'], {'method': '"""cholesky"""'}), "(x, t, basis, method='cholesky')\n", (609, 641), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((986, 1032), 'skfda.representation.basis.FDataBasis.from_data', 'FDataBasis.from_data', (['x', 't', 'basis'], {'method': '"""qr"""'}), "(x, t, basis, method='qr')\n", (1006, 1032), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n'), ((4186, 4204), 'skfda.representation.basis.BSpline', 'BSpline', ([], {'n_basis': '(6)'}), '(n_basis=6)\n', (4193, 4204), False, 'from skfda.representation.basis import Basis, FDataBasis, Constant, Monomial, BSpline, Fourier\n')]
import board import bot class Game: def __init__(self): self.width = 7 self.height = 6 self.connect = 4 self.bots = [False, True] self.score = [0, 0] self.grid = board.Board(self.width, self.height, self.connect) def setBoard(self, width, height, connect): self.width = width self.height = height self.connect = connect self.grid = board.Board(width, height, connect) def setFirst(self, isBot): self.bots[0] = isBot def setSecond(self, isBot): self.bots[1] = isBot def firstWins(self): self.score[0] += 1 def secondWins(self): self.score[1] += 1 def resetScore(self): self.score[0] = 0 self.score[1] = 0 def reset(self): self.grid = board.Board(self.width, self.height, self.connect)
[ "board.Board" ]
[((215, 265), 'board.Board', 'board.Board', (['self.width', 'self.height', 'self.connect'], {}), '(self.width, self.height, self.connect)\n', (226, 265), False, 'import board\n'), ((422, 457), 'board.Board', 'board.Board', (['width', 'height', 'connect'], {}), '(width, height, connect)\n', (433, 457), False, 'import board\n'), ((817, 867), 'board.Board', 'board.Board', (['self.width', 'self.height', 'self.connect'], {}), '(self.width, self.height, self.connect)\n', (828, 867), False, 'import board\n')]
from paradigm import catalog from tests.utils import pack from .utils import (identifiers, modules, to_homogeneous_tuples) objects_paths = to_homogeneous_tuples(identifiers).map(pack(catalog.Path)) non_empty_objects_paths = (to_homogeneous_tuples(identifiers, min_size=1) .map(pack(catalog.Path))) modules_paths = modules.map(catalog.from_module)
[ "tests.utils.pack" ]
[((219, 237), 'tests.utils.pack', 'pack', (['catalog.Path'], {}), '(catalog.Path)\n', (223, 237), False, 'from tests.utils import pack\n'), ((394, 412), 'tests.utils.pack', 'pack', (['catalog.Path'], {}), '(catalog.Path)\n', (398, 412), False, 'from tests.utils import pack\n')]
# coding: utf-8 # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- import pytest from azure.communication.administration import CommunicationIdentityClient from _shared.helper import URIIdentityReplacer from _shared.testcase import ( CommunicationTestCase, BodyReplacerProcessor ) from devtools_testutils import ResourceGroupPreparer from _shared.communication_service_preparer import CommunicationServicePreparer class CommunicationIdentityClientTest(CommunicationTestCase): def setUp(self): super(CommunicationIdentityClientTest, self).setUp() self.recording_processors.extend([ BodyReplacerProcessor(keys=["id", "token"]), URIIdentityReplacer()]) @ResourceGroupPreparer(random_name_enabled=True) @CommunicationServicePreparer() def test_create_user(self, connection_string): identity_client = CommunicationIdentityClient.from_connection_string( connection_string) user = identity_client.create_user() assert user.identifier is not None @ResourceGroupPreparer(random_name_enabled=True) @CommunicationServicePreparer() def test_issue_token(self, connection_string): identity_client = CommunicationIdentityClient.from_connection_string( connection_string) user = identity_client.create_user() token_response = identity_client.issue_token(user, scopes=["chat"]) assert user.identifier is not None assert token_response.token is not None @ResourceGroupPreparer(random_name_enabled=True) @CommunicationServicePreparer() def test_revoke_tokens(self, connection_string): identity_client = CommunicationIdentityClient.from_connection_string( connection_string) user = identity_client.create_user() token_response = identity_client.issue_token(user, scopes=["chat"]) identity_client.revoke_tokens(user) assert user.identifier is not None assert token_response.token is not None @ResourceGroupPreparer(random_name_enabled=True) @CommunicationServicePreparer() def test_delete_user(self, connection_string): identity_client = CommunicationIdentityClient.from_connection_string( connection_string) user = identity_client.create_user() identity_client.delete_user(user) assert user.identifier is not None
[ "_shared.testcase.BodyReplacerProcessor", "_shared.communication_service_preparer.CommunicationServicePreparer", "_shared.helper.URIIdentityReplacer", "devtools_testutils.ResourceGroupPreparer", "azure.communication.administration.CommunicationIdentityClient.from_connection_string" ]
[((969, 1016), 'devtools_testutils.ResourceGroupPreparer', 'ResourceGroupPreparer', ([], {'random_name_enabled': '(True)'}), '(random_name_enabled=True)\n', (990, 1016), False, 'from devtools_testutils import ResourceGroupPreparer\n'), ((1022, 1052), '_shared.communication_service_preparer.CommunicationServicePreparer', 'CommunicationServicePreparer', ([], {}), '()\n', (1050, 1052), False, 'from _shared.communication_service_preparer import CommunicationServicePreparer\n'), ((1308, 1355), 'devtools_testutils.ResourceGroupPreparer', 'ResourceGroupPreparer', ([], {'random_name_enabled': '(True)'}), '(random_name_enabled=True)\n', (1329, 1355), False, 'from devtools_testutils import ResourceGroupPreparer\n'), ((1361, 1391), '_shared.communication_service_preparer.CommunicationServicePreparer', 'CommunicationServicePreparer', ([], {}), '()\n', (1389, 1391), False, 'from _shared.communication_service_preparer import CommunicationServicePreparer\n'), ((1776, 1823), 'devtools_testutils.ResourceGroupPreparer', 'ResourceGroupPreparer', ([], {'random_name_enabled': '(True)'}), '(random_name_enabled=True)\n', (1797, 1823), False, 'from devtools_testutils import ResourceGroupPreparer\n'), ((1829, 1859), '_shared.communication_service_preparer.CommunicationServicePreparer', 'CommunicationServicePreparer', ([], {}), '()\n', (1857, 1859), False, 'from _shared.communication_service_preparer import CommunicationServicePreparer\n'), ((2290, 2337), 'devtools_testutils.ResourceGroupPreparer', 'ResourceGroupPreparer', ([], {'random_name_enabled': '(True)'}), '(random_name_enabled=True)\n', (2311, 2337), False, 'from devtools_testutils import ResourceGroupPreparer\n'), ((2343, 2373), '_shared.communication_service_preparer.CommunicationServicePreparer', 'CommunicationServicePreparer', ([], {}), '()\n', (2371, 2373), False, 'from _shared.communication_service_preparer import CommunicationServicePreparer\n'), ((1130, 1199), 'azure.communication.administration.CommunicationIdentityClient.from_connection_string', 'CommunicationIdentityClient.from_connection_string', (['connection_string'], {}), '(connection_string)\n', (1180, 1199), False, 'from azure.communication.administration import CommunicationIdentityClient\n'), ((1469, 1538), 'azure.communication.administration.CommunicationIdentityClient.from_connection_string', 'CommunicationIdentityClient.from_connection_string', (['connection_string'], {}), '(connection_string)\n', (1519, 1538), False, 'from azure.communication.administration import CommunicationIdentityClient\n'), ((1939, 2008), 'azure.communication.administration.CommunicationIdentityClient.from_connection_string', 'CommunicationIdentityClient.from_connection_string', (['connection_string'], {}), '(connection_string)\n', (1989, 2008), False, 'from azure.communication.administration import CommunicationIdentityClient\n'), ((2451, 2520), 'azure.communication.administration.CommunicationIdentityClient.from_connection_string', 'CommunicationIdentityClient.from_connection_string', (['connection_string'], {}), '(connection_string)\n', (2501, 2520), False, 'from azure.communication.administration import CommunicationIdentityClient\n'), ((882, 925), '_shared.testcase.BodyReplacerProcessor', 'BodyReplacerProcessor', ([], {'keys': "['id', 'token']"}), "(keys=['id', 'token'])\n", (903, 925), False, 'from _shared.testcase import CommunicationTestCase, BodyReplacerProcessor\n'), ((939, 960), '_shared.helper.URIIdentityReplacer', 'URIIdentityReplacer', ([], {}), '()\n', (958, 960), False, 'from _shared.helper import URIIdentityReplacer\n')]
from django.urls import path from rest_framework_simplejwt import views as jwt_views from django_react_template.users.api.views import UserCreate, HelloWorld, LogoutAndBlacklistRefreshTokenForUserView app_name = "users" urlpatterns = [ path('create/', UserCreate.as_view(), name="create_user"), path('token/obtain/', jwt_views.TokenObtainPairView.as_view(), name='token_create'), path('token/refresh/', jwt_views.TokenRefreshView.as_view(), name='token_refresh'), path('blacklist/', LogoutAndBlacklistRefreshTokenForUserView.as_view(), name='token_blacklist'), path('hello/', HelloWorld.as_view(), name='hello_world'), ]
[ "django_react_template.users.api.views.LogoutAndBlacklistRefreshTokenForUserView.as_view", "django_react_template.users.api.views.UserCreate.as_view", "rest_framework_simplejwt.views.TokenRefreshView.as_view", "django_react_template.users.api.views.HelloWorld.as_view", "rest_framework_simplejwt.views.TokenObtainPairView.as_view" ]
[((260, 280), 'django_react_template.users.api.views.UserCreate.as_view', 'UserCreate.as_view', ([], {}), '()\n', (278, 280), False, 'from django_react_template.users.api.views import UserCreate, HelloWorld, LogoutAndBlacklistRefreshTokenForUserView\n'), ((329, 368), 'rest_framework_simplejwt.views.TokenObtainPairView.as_view', 'jwt_views.TokenObtainPairView.as_view', ([], {}), '()\n', (366, 368), True, 'from rest_framework_simplejwt import views as jwt_views\n'), ((419, 455), 'rest_framework_simplejwt.views.TokenRefreshView.as_view', 'jwt_views.TokenRefreshView.as_view', ([], {}), '()\n', (453, 455), True, 'from rest_framework_simplejwt import views as jwt_views\n'), ((503, 554), 'django_react_template.users.api.views.LogoutAndBlacklistRefreshTokenForUserView.as_view', 'LogoutAndBlacklistRefreshTokenForUserView.as_view', ([], {}), '()\n', (552, 554), False, 'from django_react_template.users.api.views import UserCreate, HelloWorld, LogoutAndBlacklistRefreshTokenForUserView\n'), ((600, 620), 'django_react_template.users.api.views.HelloWorld.as_view', 'HelloWorld.as_view', ([], {}), '()\n', (618, 620), False, 'from django_react_template.users.api.views import UserCreate, HelloWorld, LogoutAndBlacklistRefreshTokenForUserView\n')]
#!/usr/bin/env python #-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*- import time from bes.testing.unit_test import unit_test import os, os.path as path, tempfile from bes.fs.file_checksum_db import file_checksum_db from bes.fs.file_util import file_util from bes.fs.temp_file import temp_file from bes.docker.docker import docker class test_file_metadata_db(unit_test): @classmethod def setUpClass(clazz): docker.raise_skip_if_running_under_docker() def test_file_first_time(self): tmp_dir = self.make_temp_dir() db = file_checksum_db(tmp_dir) tmp_file = temp_file.make_temp_file(suffix = '.txt', content = 'this is foo\n') self.assertEqual( 0, db.count ) self.assertEqual( file_util.checksum('sha256', tmp_file), db.checksum('sha256', tmp_file) ) self.assertEqual( 1, db.count ) self.assertEqual( file_util.checksum('sha256', tmp_file), db.checksum('sha256', tmp_file) ) self.assertEqual( 1, db.count ) def test_file_persistence(self): tmp_dir = self.make_temp_dir() db = file_checksum_db(tmp_dir) tmp_file = temp_file.make_temp_file(suffix = '.txt', content = 'this is foo\n') self.assertEqual( 0, db.count ) self.assertEqual( file_util.checksum('sha256', tmp_file), db.checksum('sha256', tmp_file) ) self.assertEqual( 1, db.count ) db = file_checksum_db(tmp_dir) self.assertEqual( file_util.checksum('sha256', tmp_file), db.checksum('sha256', tmp_file) ) self.assertEqual( 0, db.count ) def test_file_changed(self): tmp_dir = self.make_temp_dir() db = file_checksum_db(tmp_dir) tmp_file = temp_file.make_temp_file(suffix = '.txt', content = 'this is foo\n') self.assertEqual( 0, db.count ) self.assertEqual( file_util.checksum('sha256', tmp_file), db.checksum('sha256', tmp_file) ) self.assertEqual( 1, db.count ) time.sleep(0.100) # need to sleep to let the mtime change with open(tmp_file, 'a') as fout: fout.write('changed') fout.flush() self.assertEqual( file_util.checksum('sha256', tmp_file), db.checksum('sha256', tmp_file) ) self.assertEqual( 2, db.count ) self.assertEqual( file_util.checksum('sha256', tmp_file), db.checksum('sha256', tmp_file) ) self.assertEqual( 2, db.count ) if __name__ == '__main__': unit_test.main()
[ "bes.fs.temp_file.temp_file.make_temp_file", "time.sleep", "bes.fs.file_util.file_util.checksum", "bes.testing.unit_test.unit_test.main", "bes.fs.file_checksum_db.file_checksum_db", "bes.docker.docker.docker.raise_skip_if_running_under_docker" ]
[((2328, 2344), 'bes.testing.unit_test.unit_test.main', 'unit_test.main', ([], {}), '()\n', (2342, 2344), False, 'from bes.testing.unit_test import unit_test\n'), ((461, 504), 'bes.docker.docker.docker.raise_skip_if_running_under_docker', 'docker.raise_skip_if_running_under_docker', ([], {}), '()\n', (502, 504), False, 'from bes.docker.docker import docker\n'), ((586, 611), 'bes.fs.file_checksum_db.file_checksum_db', 'file_checksum_db', (['tmp_dir'], {}), '(tmp_dir)\n', (602, 611), False, 'from bes.fs.file_checksum_db import file_checksum_db\n'), ((627, 691), 'bes.fs.temp_file.temp_file.make_temp_file', 'temp_file.make_temp_file', ([], {'suffix': '""".txt"""', 'content': '"""this is foo\n"""'}), "(suffix='.txt', content='this is foo\\n')\n", (651, 691), False, 'from bes.fs.temp_file import temp_file\n'), ((1080, 1105), 'bes.fs.file_checksum_db.file_checksum_db', 'file_checksum_db', (['tmp_dir'], {}), '(tmp_dir)\n', (1096, 1105), False, 'from bes.fs.file_checksum_db import file_checksum_db\n'), ((1121, 1185), 'bes.fs.temp_file.temp_file.make_temp_file', 'temp_file.make_temp_file', ([], {'suffix': '""".txt"""', 'content': '"""this is foo\n"""'}), "(suffix='.txt', content='this is foo\\n')\n", (1145, 1185), False, 'from bes.fs.temp_file import temp_file\n'), ((1367, 1392), 'bes.fs.file_checksum_db.file_checksum_db', 'file_checksum_db', (['tmp_dir'], {}), '(tmp_dir)\n', (1383, 1392), False, 'from bes.fs.file_checksum_db import file_checksum_db\n'), ((1605, 1630), 'bes.fs.file_checksum_db.file_checksum_db', 'file_checksum_db', (['tmp_dir'], {}), '(tmp_dir)\n', (1621, 1630), False, 'from bes.fs.file_checksum_db import file_checksum_db\n'), ((1646, 1710), 'bes.fs.temp_file.temp_file.make_temp_file', 'temp_file.make_temp_file', ([], {'suffix': '""".txt"""', 'content': '"""this is foo\n"""'}), "(suffix='.txt', content='this is foo\\n')\n", (1670, 1710), False, 'from bes.fs.temp_file import temp_file\n'), ((1887, 1902), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1897, 1902), False, 'import time\n'), ((754, 792), 'bes.fs.file_util.file_util.checksum', 'file_util.checksum', (['"""sha256"""', 'tmp_file'], {}), "('sha256', tmp_file)\n", (772, 792), False, 'from bes.fs.file_util import file_util\n'), ((886, 924), 'bes.fs.file_util.file_util.checksum', 'file_util.checksum', (['"""sha256"""', 'tmp_file'], {}), "('sha256', tmp_file)\n", (904, 924), False, 'from bes.fs.file_util import file_util\n'), ((1248, 1286), 'bes.fs.file_util.file_util.checksum', 'file_util.checksum', (['"""sha256"""', 'tmp_file'], {}), "('sha256', tmp_file)\n", (1266, 1286), False, 'from bes.fs.file_util import file_util\n'), ((1415, 1453), 'bes.fs.file_util.file_util.checksum', 'file_util.checksum', (['"""sha256"""', 'tmp_file'], {}), "('sha256', tmp_file)\n", (1433, 1453), False, 'from bes.fs.file_util import file_util\n'), ((1773, 1811), 'bes.fs.file_util.file_util.checksum', 'file_util.checksum', (['"""sha256"""', 'tmp_file'], {}), "('sha256', tmp_file)\n", (1791, 1811), False, 'from bes.fs.file_util import file_util\n'), ((2052, 2090), 'bes.fs.file_util.file_util.checksum', 'file_util.checksum', (['"""sha256"""', 'tmp_file'], {}), "('sha256', tmp_file)\n", (2070, 2090), False, 'from bes.fs.file_util import file_util\n'), ((2184, 2222), 'bes.fs.file_util.file_util.checksum', 'file_util.checksum', (['"""sha256"""', 'tmp_file'], {}), "('sha256', tmp_file)\n", (2202, 2222), False, 'from bes.fs.file_util import file_util\n')]
from __future__ import print_function from __future__ import absolute_import from __future__ import division import math from copy import deepcopy from compas.geometry import scale_vector from compas.geometry import normalize_vector from compas.geometry import subtract_vectors from compas.geometry import cross_vectors from compas.geometry import dot_vectors from compas.geometry import multiply_matrix_vector from compas.geometry import length_vector from compas.geometry import allclose from compas.geometry import multiply_matrices from compas.geometry import transpose_matrix from compas.geometry import norm_vector _EPS = 1e-16 """eps for testing whether a number is close to zero""" _SPEC2TUPLE = { 'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0), 'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0), 'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0), 'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0), 'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1), 'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1), 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1), 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)} """used for Euler angles: to map rotation type and axes to tuples of inner axis, parity, repetition, frame""" _NEXT_SPEC = [1, 2, 0, 1] __all__ = [ 'matrix_determinant', 'matrix_inverse', 'decompose_matrix', 'compose_matrix', 'identity_matrix', 'matrix_from_frame', 'matrix_from_frame_to_frame', 'matrix_from_change_of_basis', 'matrix_from_euler_angles', 'matrix_from_axis_and_angle', 'matrix_from_axis_angle_vector', 'matrix_from_basis_vectors', 'matrix_from_translation', 'matrix_from_orthogonal_projection', 'matrix_from_parallel_projection', 'matrix_from_perspective_projection', 'matrix_from_perspective_entries', 'matrix_from_shear_entries', 'matrix_from_shear', 'matrix_from_scale_factors', 'matrix_from_quaternion', 'euler_angles_from_matrix', 'euler_angles_from_quaternion', 'axis_and_angle_from_matrix', 'axis_angle_vector_from_matrix', 'axis_angle_from_quaternion', 'quaternion_from_matrix', 'quaternion_from_euler_angles', 'quaternion_from_axis_angle', 'basis_vectors_from_matrix', 'translation_from_matrix', ] def is_matrix_square(M): """Verify that a matrix is square. Parameters ---------- M : list[list[float]] The matrix. Returns ------- bool True if the length of every row is equal to the number of rows. False otherwise. Examples -------- >>> M = identity_matrix(4) >>> is_matrix_square(M) True """ number_of_rows = len(M) for row in M: if len(row) != number_of_rows: return False return True def matrix_minor(M, i, j): """Construct the minor corresponding to an element of a matrix. Parameters ---------- M : list[list[float]] The matrix. i : int Row index of the minor. j : int Column index of the minor. Returns ------- list[list[float]] The minor. """ return [row[:j] + row[j + 1:] for row in (M[:i] + M[i + 1:])] def matrix_determinant(M, check=True): """Calculates the determinant of a square matrix M. Parameters ---------- M : list[list[float]] A square matrix of any dimension. check : bool If True, checks if the matrix is square. Raises ------ ValueError If the matrix is not square. Returns ------- float The determinant. Examples -------- >>> M = identity_matrix(4) >>> matrix_determinant(M) 1.0 """ dim = len(M) if check: if not is_matrix_square(M): raise ValueError("Not a square matrix") if dim == 2: return M[0][0] * M[1][1] - M[0][1] * M[1][0] D = 0 for c in range(dim): D += (-1) ** c * M[0][c] * matrix_determinant(matrix_minor(M, 0, c), check=False) return D def matrix_inverse(M): """Calculates the inverse of a square matrix M. Parameters ---------- M : list[list[float]] A square matrix of any dimension. Returns ------- list[list[float]] The inverted matrix. Raises ------ ValueError If the matrix is not squared ValueError If the matrix is singular. ValueError If the matrix is not invertible. Examples -------- >>> from compas.geometry import Frame >>> f = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15]) >>> T = matrix_from_frame(f) >>> I = multiply_matrices(T, matrix_inverse(T)) >>> I2 = identity_matrix(4) >>> allclose(I[0], I2[0]) True >>> allclose(I[1], I2[1]) True >>> allclose(I[2], I2[2]) True >>> allclose(I[3], I2[3]) True """ D = matrix_determinant(M) if D == 0: ValueError("The matrix is singular.") if len(M) == 2: return [[M[1][1] / D, -1 * M[0][1] / D], [-1 * M[1][0] / D, M[0][0] / D]] cofactors = [] for r in range(len(M)): cofactor_row = [] for c in range(len(M)): cofactor_row.append((-1) ** (r + c) * matrix_determinant(matrix_minor(M, r, c))) cofactors.append(cofactor_row) cofactors = transpose_matrix(cofactors) for r in range(len(cofactors)): for c in range(len(cofactors)): cofactors[r][c] = cofactors[r][c] / D return cofactors def decompose_matrix(M): """Calculates the components of rotation, translation, scale, shear, and perspective of a given transformation matrix M. [1]_ Parameters ---------- M : list[list[float]] The square matrix of any dimension. Raises ------ ValueError If matrix is singular or degenerative. Returns ------- scale : [float, float, float] The 3 scale factors in x-, y-, and z-direction. shear : [float, float, float] The 3 shear factors for x-y, x-z, and y-z axes. angles : [float, float, float] The rotation specified through the 3 Euler angles about static x, y, z axes. translation : [float, float, float] The 3 values of translation. perspective : [float, float, float, float] The 4 perspective entries of the matrix. Examples -------- >>> trans1 = [1, 2, 3] >>> angle1 = [-2.142, 1.141, -0.142] >>> scale1 = [0.123, 2, 0.5] >>> T = matrix_from_translation(trans1) >>> R = matrix_from_euler_angles(angle1) >>> S = matrix_from_scale_factors(scale1) >>> M = multiply_matrices(multiply_matrices(T, R), S) >>> # M = compose_matrix(scale1, None, angle1, trans1, None) >>> scale2, shear2, angle2, trans2, persp2 = decompose_matrix(M) >>> allclose(scale1, scale2) True >>> allclose(angle1, angle2) True >>> allclose(trans1, trans2) True References ---------- .. [1] Slabaugh, 1999. *Computing Euler angles from a rotation matrix*. Available at: http://www.gregslabaugh.net/publications/euler.pdf """ fabs = math.fabs cos = math.cos atan2 = math.atan2 asin = math.asin pi = math.pi detM = matrix_determinant(M) # raises ValueError if matrix is not squared if detM == 0: ValueError("The matrix is singular.") Mt = transpose_matrix(M) if abs(Mt[3][3]) < _EPS: raise ValueError('The element [3,3] of the matrix is zero.') for i in range(4): for j in range(4): Mt[i][j] /= Mt[3][3] translation = [M[0][3], M[1][3], M[2][3]] # scale, shear, angles scale = [0.0, 0.0, 0.0] shear = [0.0, 0.0, 0.0] angles = [0.0, 0.0, 0.0] # copy Mt[:3, :3] into row row = [[0, 0, 0] for i in range(3)] for i in range(3): for j in range(3): row[i][j] = Mt[i][j] scale[0] = norm_vector(row[0]) for i in range(3): row[0][i] /= scale[0] shear[0] = dot_vectors(row[0], row[1]) for i in range(3): row[1][i] -= row[0][i] * shear[0] scale[1] = norm_vector(row[1]) for i in range(3): row[1][i] /= scale[1] shear[0] /= scale[1] shear[1] = dot_vectors(row[0], row[2]) for i in range(3): row[2][i] -= row[0][i] * shear[1] shear[2] = dot_vectors(row[1], row[2]) for i in range(3): row[2][i] -= row[0][i] * shear[2] scale[2] = norm_vector(row[2]) for i in range(3): row[2][i] /= scale[2] shear[1] /= scale[2] shear[2] /= scale[2] if dot_vectors(row[0], cross_vectors(row[1], row[2])) < 0: scale = [-x for x in scale] row = [[-x for x in y] for y in row] # angles if row[0][2] != -1. and row[0][2] != 1.: beta1 = asin(-row[0][2]) # beta2 = pi - beta1 alpha1 = atan2(row[1][2] / cos(beta1), row[2][2] / cos(beta1)) # alpha2 = atan2(row[1][2] / cos(beta2), row[2][2] / cos(beta2)) gamma1 = atan2(row[0][1] / cos(beta1), row[0][0] / cos(beta1)) # gamma2 = atan2(row[0][1] / cos(beta2), row[0][0] / cos(beta2)) angles = [alpha1, beta1, gamma1] else: gamma = 0. if row[0][2] == -1.: beta = pi / 2. alpha = gamma + atan2(row[1][0], row[2][0]) else: # row[0][2] == 1 beta = -pi / 2. alpha = -gamma + atan2(-row[1][0], -row[2][0]) angles = [alpha, beta, gamma] # perspective if fabs(Mt[0][3]) > _EPS and fabs(Mt[1][3]) > _EPS and fabs(Mt[2][3]) > _EPS: P = deepcopy(Mt) P[0][3], P[1][3], P[2][3], P[3][3] = 0.0, 0.0, 0.0, 1.0 Ptinv = matrix_inverse(transpose_matrix(P)) perspective = multiply_matrix_vector(Ptinv, [Mt[0][3], Mt[1][3], Mt[2][3], Mt[3][3]]) else: perspective = [0.0, 0.0, 0.0, 1.0] return scale, shear, angles, translation, perspective def compose_matrix(scale=None, shear=None, angles=None, translation=None, perspective=None): """Calculates a matrix from the components of scale, shear, euler_angles, translation and perspective. Parameters ---------- scale : [float, float, float] The 3 scale factors in x-, y-, and z-direction. shear : [float, float, float] The 3 shear factors for x-y, x-z, and y-z axes. angles : [float, float, float] The rotation specified through the 3 Euler angles about static x, y, z axes. translation : [float, float, float] The 3 values of translation. perspective : [float, float, float, float] The 4 perspective entries of the matrix. Returns ------- list[list[float]] The 4x4 matrix that combines the provided transformation components. Examples -------- >>> trans1 = [1, 2, 3] >>> angle1 = [-2.142, 1.141, -0.142] >>> scale1 = [0.123, 2, 0.5] >>> M = compose_matrix(scale1, None, angle1, trans1, None) >>> scale2, shear2, angle2, trans2, persp2 = decompose_matrix(M) >>> allclose(scale1, scale2) True >>> allclose(angle1, angle2) True >>> allclose(trans1, trans2) True """ M = [[1. if i == j else 0. for i in range(4)] for j in range(4)] if perspective is not None: P = matrix_from_perspective_entries(perspective) M = multiply_matrices(M, P) if translation is not None: T = matrix_from_translation(translation) M = multiply_matrices(M, T) if angles is not None: R = matrix_from_euler_angles(angles, static=True, axes="xyz") M = multiply_matrices(M, R) if shear is not None: H = matrix_from_shear_entries(shear) M = multiply_matrices(M, H) if scale is not None: S = matrix_from_scale_factors(scale) M = multiply_matrices(M, S) for i in range(4): for j in range(4): M[i][j] /= M[3][3] return M def identity_matrix(dim): """Construct an identity matrix. Parameters ---------- dim : int The number of rows and/or columns of the matrix. Returns ------- list of list A list of `dim` lists, with each list containing `dim` elements. The items on the "diagonal" are one. All other items are zero. Examples -------- >>> identity_matrix(4) [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]] """ return [[1. if i == j else 0. for i in range(dim)] for j in range(dim)] def matrix_from_frame(frame): """Computes a change of basis transformation from world XY to the frame. Parameters ---------- frame : :class:`compas.geometry.Frame` A frame describing the targeted Cartesian coordinate system Returns ------- list[list[float]] A 4x4 transformation matrix representing the transformation from world coordinates to frame coordinates. Examples -------- >>> from compas.geometry import Frame >>> f = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15]) >>> T = matrix_from_frame(f) """ M = identity_matrix(4) M[0][0], M[1][0], M[2][0] = frame.xaxis M[0][1], M[1][1], M[2][1] = frame.yaxis M[0][2], M[1][2], M[2][2] = frame.zaxis M[0][3], M[1][3], M[2][3] = frame.point return M def matrix_from_frame_to_frame(frame_from, frame_to): """Computes a transformation between two frames. This transformation allows to transform geometry from one Cartesian coordinate system defined by `frame_from` to another Cartesian coordinate system defined by `frame_to`. Parameters ---------- frame_from : :class:`compas.geometry.Frame` A frame defining the original Cartesian coordinate system frame_to : :class:`compas.geometry.Frame` A frame defining the targeted Cartesian coordinate system Returns ------- list[list[float]] A 4x4 transformation matrix representing the transformation from one frame to another. Examples -------- >>> from compas.geometry import Frame >>> f1 = Frame([2, 2, 2], [0.12, 0.58, 0.81], [-0.80, 0.53, -0.26]) >>> f2 = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15]) >>> T = matrix_from_frame_to_frame(f1, f2) """ T1 = matrix_from_frame(frame_from) T2 = matrix_from_frame(frame_to) return multiply_matrices(T2, matrix_inverse(T1)) def matrix_from_change_of_basis(frame_from, frame_to): """Computes a change of basis transformation between two frames. A basis change is essentially a remapping of geometry from one coordinate system to another. Parameters ---------- frame_from : :class:`compas.geometry.Frame` A frame defining the original Cartesian coordinate system frame_to : :class:`compas.geometry.Frame` A frame defining the targeted Cartesian coordinate system Returns ------- list[list[float]] A 4x4 transformation matrix representing a change of basis. Examples -------- >>> from compas.geometry import Point, Frame >>> f1 = Frame([2, 2, 2], [0.12, 0.58, 0.81], [-0.80, 0.53, -0.26]) >>> f2 = Frame([1, 1, 1], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15]) >>> T = matrix_from_change_of_basis(f1, f2) """ T1 = matrix_from_frame(frame_from) T2 = matrix_from_frame(frame_to) return multiply_matrices(matrix_inverse(T2), T1) def matrix_from_euler_angles(euler_angles, static=True, axes='xyz'): """Calculates a rotation matrix from Euler angles. In 3D space any orientation can be achieved by composing three elemental rotations, rotations about the axes (x, y, z) of a coordinate system. A triple of Euler angles can be interpreted in 24 ways, which depends on if the rotations are applied to a static (extrinsic) or rotating (intrinsic) frame and the order of axes. Parameters ---------- euler_angles : [float, float, float] Three numbers that represent the angles of rotations about the defined axes. static : bool, optional If True the rotations are applied to a static frame. If False, to a rotational. axes : Literal['xyz', 'yzx', 'zxy'], optional A 3 character string specifying order of the axes. Returns ------- list[list[float]] A 4x4 transformation matrix representing a rotation. Examples -------- >>> ea1 = 1.4, 0.5, 2.3 >>> R = matrix_from_euler_angles(ea1) >>> ea2 = euler_angles_from_matrix(R) >>> allclose(ea1, ea2) True """ global _SPEC2TUPLE global _NEXT_SPEC sin = math.sin cos = math.cos ai, aj, ak = euler_angles if static: firstaxis, parity, repetition, frame = _SPEC2TUPLE["s" + axes] else: firstaxis, parity, repetition, frame = _SPEC2TUPLE["r" + axes] i = firstaxis j = _NEXT_SPEC[i + parity] k = _NEXT_SPEC[i - parity + 1] if frame: ai, ak = ak, ai if parity: ai, aj, ak = -ai, -aj, -ak si, sj, sk = sin(ai), sin(aj), sin(ak) ci, cj, ck = cos(ai), cos(aj), cos(ak) cc, cs = ci * ck, ci * sk sc, ss = si * ck, si * sk M = [[1. if x == y else 0. for x in range(4)] for y in range(4)] if repetition: M[i][i] = cj M[i][j] = sj * si M[i][k] = sj * ci M[j][i] = sj * sk M[j][j] = -cj * ss + cc M[j][k] = -cj * cs - sc M[k][i] = -sj * ck M[k][j] = cj * sc + cs M[k][k] = cj * cc - ss else: M[i][i] = cj * ck M[i][j] = sj * sc - cs M[i][k] = sj * cc + ss M[j][i] = cj * sk M[j][j] = sj * ss + cc M[j][k] = sj * cs - sc M[k][i] = -sj M[k][j] = cj * si M[k][k] = cj * ci return M def euler_angles_from_matrix(M, static=True, axes='xyz'): """Returns Euler angles from the rotation matrix M according to specified axis sequence and type of rotation. Parameters ---------- M : list[list[float]] The 3x3 or 4x4 matrix in row-major order. static : bool, optional If True the rotations are applied to a static frame. If False, to a rotational. axes : str, optional A 3 character string specifying order of the axes. Returns ------- list[float] The 3 Euler angles. Examples -------- >>> ea1 = 1.4, 0.5, 2.3 >>> R = matrix_from_euler_angles(ea1) >>> ea2 = euler_angles_from_matrix(R) >>> allclose(ea1, ea2) True """ global _SPEC2TUPLE global _NEXT_SPEC global _EPS atan2 = math.atan2 sqrt = math.sqrt if static: firstaxis, parity, repetition, frame = _SPEC2TUPLE["s" + axes] else: firstaxis, parity, repetition, frame = _SPEC2TUPLE["r" + axes] i = firstaxis j = _NEXT_SPEC[i + parity] k = _NEXT_SPEC[i - parity + 1] if repetition: sy = sqrt(M[i][j] * M[i][j] + M[i][k] * M[i][k]) if sy > _EPS: ax = atan2(M[i][j], M[i][k]) ay = atan2(sy, M[i][i]) az = atan2(M[j][i], -M[k][i]) else: ax = atan2(-M[j][k], M[j][j]) ay = atan2(sy, M[i][i]) az = 0.0 else: cy = sqrt(M[i][i] * M[i][i] + M[j][i] * M[j][i]) if cy > _EPS: ax = atan2(M[k][j], M[k][k]) ay = atan2(-M[k][i], cy) az = atan2(M[j][i], M[i][i]) else: ax = atan2(-M[j][k], M[j][j]) ay = atan2(-M[k][i], cy) az = 0.0 if parity: ax, ay, az = -ax, -ay, -az if frame: ax, az = az, ax return [ax, ay, az] def matrix_from_axis_and_angle(axis, angle, point=None): """Calculates a rotation matrix from an rotation axis, an angle and an optional point of rotation. Parameters ---------- axis : [float, float, float] Three numbers that represent the axis of rotation. angle : float The rotation angle in radians. point : [float, float, float] | :class:`compas.geometry.Point`, optional A point to perform a rotation around an origin other than [0, 0, 0]. Returns ------- list[list[float]] A 4x4 transformation matrix representing a rotation. Notes ----- The rotation is based on the right hand rule, i.e. anti-clockwise if the axis of rotation points towards the observer. Examples -------- >>> axis1 = normalize_vector([-0.043, -0.254, 0.617]) >>> angle1 = 0.1 >>> R = matrix_from_axis_and_angle(axis1, angle1) >>> axis2, angle2 = axis_and_angle_from_matrix(R) >>> allclose(axis1, axis2) True >>> allclose([angle1], [angle2]) True """ if not point: point = [0.0, 0.0, 0.0] axis = list(axis) if length_vector(axis): axis = normalize_vector(axis) sina = math.sin(angle) cosa = math.cos(angle) R = [[cosa, 0.0, 0.0], [0.0, cosa, 0.0], [0.0, 0.0, cosa]] outer_product = [[axis[i] * axis[j] * (1.0 - cosa) for i in range(3)] for j in range(3)] R = [[R[i][j] + outer_product[i][j] for i in range(3)] for j in range(3)] axis = scale_vector(axis, sina) m = [[0.0, -axis[2], axis[1]], [axis[2], 0.0, -axis[0]], [-axis[1], axis[0], 0.0]] M = identity_matrix(4) for i in range(3): for j in range(3): R[i][j] += m[i][j] M[i][j] = R[i][j] # rotation about axis, angle AND point includes also translation t = subtract_vectors(point, multiply_matrix_vector(R, point)) M[0][3] = t[0] M[1][3] = t[1] M[2][3] = t[2] return M def matrix_from_axis_angle_vector(axis_angle_vector, point=[0, 0, 0]): """Calculates a rotation matrix from an axis-angle vector. Parameters ---------- axis_angle_vector : [float, float, float] Three numbers that represent the axis of rotation and angle of rotation through the vector's magnitude. point : [float, float, float] | :class:`compas.geometry.Point`, optional A point to perform a rotation around an origin other than [0, 0, 0]. Returns ------- list[list[float]] The 4x4 transformation matrix representing a rotation. Examples -------- >>> aav1 = [-0.043, -0.254, 0.617] >>> R = matrix_from_axis_angle_vector(aav1) >>> aav2 = axis_angle_vector_from_matrix(R) >>> allclose(aav1, aav2) True """ axis = list(axis_angle_vector) angle = length_vector(axis_angle_vector) return matrix_from_axis_and_angle(axis, angle, point) def axis_and_angle_from_matrix(M): """Returns the axis and the angle of the rotation matrix M. Parameters ---------- M : list[list[float]] The 4-by-4 transformation matrix. Returns ------- [float, float, float] The rotation axis. float The rotation angle in radians. """ fabs = math.fabs sqrt = math.sqrt eps = 0.01 # margin to allow for rounding errors eps2 = 0.1 # margin to distinguish between 0 and 180 degrees if all(fabs(M[i][j] - M[j][i]) < eps for i, j in [(0, 1), (0, 2), (1, 2)]): if (all(fabs(M[i][j] - M[j][i]) < eps2 for i, j in [(0, 1), (0, 2), (1, 2)]) and fabs(M[0][0] + M[1][1] + M[2][2] - 3) < eps2): return [0, 0, 0], 0 angle = math.pi xx = (M[0][0] + 1) / 2 yy = (M[1][1] + 1) / 2 zz = (M[2][2] + 1) / 2 xy = (M[0][1] + M[1][0]) / 4 xz = (M[0][2] + M[2][0]) / 4 yz = (M[1][2] + M[2][1]) / 4 root_half = sqrt(0.5) if (xx > yy) and (xx > zz): if xx < eps: axis = [0, root_half, root_half] else: x = sqrt(xx) axis = [x, xy / x, xz / x] elif yy > zz: if yy < eps: axis = [root_half, 0, root_half] else: y = sqrt(yy) axis = [xy / y, y, yz / y] else: if zz < eps: axis = [root_half, root_half, 0] else: z = sqrt(zz) axis = [xz / z, yz / z, z] return axis, angle s = sqrt( (M[2][1] - M[1][2]) * (M[2][1] - M[1][2]) + (M[0][2] - M[2][0]) * (M[0][2] - M[2][0]) + (M[1][0] - M[0][1]) * (M[1][0] - M[0][1])) # should this also be an eps? if fabs(s) < 0.001: s = 1 angle = math.acos((M[0][0] + M[1][1] + M[2][2] - 1) / 2) x = (M[2][1] - M[1][2]) / s y = (M[0][2] - M[2][0]) / s z = (M[1][0] - M[0][1]) / s return [x, y, z], angle def axis_angle_vector_from_matrix(M): """Returns the axis-angle vector of the rotation matrix M. Parameters ---------- M : list[list[float]] The 4-by-4 transformation matrix. Returns ------- [float, float, float] The axis-angle vector. """ axis, angle = axis_and_angle_from_matrix(M) return scale_vector(axis, angle) def matrix_from_quaternion(quaternion): """Calculates a rotation matrix from quaternion coefficients. Parameters ---------- quaternion : [float, float, float, float] Four numbers that represents the four coefficient values of a quaternion. Returns ------- list[list[float]] The 4x4 transformation matrix representing a rotation. Raises ------ ValueError If quaternion is invalid. Examples -------- >>> q1 = [0.945, -0.021, -0.125, 0.303] >>> R = matrix_from_quaternion(q1) >>> q2 = quaternion_from_matrix(R) >>> allclose(q1, q2, tol=1e-03) True """ sqrt = math.sqrt q = quaternion n = q[0]**2 + q[1]**2 + q[2]**2 + q[3]**2 # dot product # perhaps this should not be hard-coded? eps = 1.0e-15 if n < eps: raise ValueError("Invalid quaternion, dot product must be != 0.") q = [v * sqrt(2.0 / n) for v in q] q = [[q[i] * q[j] for i in range(4)] for j in range(4)] # outer_product rotation = [ [1.0 - q[2][2] - q[3][3], q[1][2] - q[3][0], q[1][3] + q[2][0], 0.0], [q[1][2] + q[3][0], 1.0 - q[1][1] - q[3][3], q[2][3] - q[1][0], 0.0], [q[1][3] - q[2][0], q[2][3] + q[1][0], 1.0 - q[1][1] - q[2][2], 0.0], [0.0, 0.0, 0.0, 1.0]] return rotation def quaternion_from_matrix(M): """Returns the 4 quaternion coefficients from a rotation matrix. Parameters ---------- M : list[list[float]] The coefficients of the rotation matrix, row per row. Returns ------- [float, float, float, float] The quaternion coefficients. Examples -------- >>> q1 = [0.945, -0.021, -0.125, 0.303] >>> R = matrix_from_quaternion(q1) >>> q2 = quaternion_from_matrix(R) >>> allclose(q1, q2, tol=1e-03) True """ sqrt = math.sqrt qw, qx, qy, qz = 0, 0, 0, 0 trace = M[0][0] + M[1][1] + M[2][2] if trace > 0.0: s = 0.5 / sqrt(trace + 1.0) qw = 0.25 / s qx = (M[2][1] - M[1][2]) * s qy = (M[0][2] - M[2][0]) * s qz = (M[1][0] - M[0][1]) * s elif (M[0][0] > M[1][1]) and (M[0][0] > M[2][2]): s = 2.0 * sqrt(1.0 + M[0][0] - M[1][1] - M[2][2]) qw = (M[2][1] - M[1][2]) / s qx = 0.25 * s qy = (M[0][1] + M[1][0]) / s qz = (M[0][2] + M[2][0]) / s elif M[1][1] > M[2][2]: s = 2.0 * sqrt(1.0 + M[1][1] - M[0][0] - M[2][2]) qw = (M[0][2] - M[2][0]) / s qx = (M[0][1] + M[1][0]) / s qy = 0.25 * s qz = (M[1][2] + M[2][1]) / s else: s = 2.0 * sqrt(1.0 + M[2][2] - M[0][0] - M[1][1]) qw = (M[1][0] - M[0][1]) / s qx = (M[0][2] + M[2][0]) / s qy = (M[1][2] + M[2][1]) / s qz = 0.25 * s return [qw, qx, qy, qz] def matrix_from_basis_vectors(xaxis, yaxis): """Creates a rotation matrix from basis vectors (= orthonormal vectors). Parameters ---------- xaxis : [float, float, float] | :class:`compas.geometry.Vector` The x-axis of the frame. yaxis : [float, float, float] | :class:`compas.geometry.Vector` The y-axis of the frame. Returns ------- list[list[float]] A 4x4 transformation matrix representing a rotation. Notes ----- .. code-block:: none [ x0 y0 z0 0 ] [ x1 y1 z1 0 ] [ x2 y2 z2 0 ] [ 0 0 0 1 ] Examples -------- >>> xaxis = [0.68, 0.68, 0.27] >>> yaxis = [-0.67, 0.73, -0.15] >>> R = matrix_from_basis_vectors(xaxis, yaxis) """ xaxis = normalize_vector(list(xaxis)) yaxis = normalize_vector(list(yaxis)) zaxis = cross_vectors(xaxis, yaxis) yaxis = cross_vectors(zaxis, xaxis) R = identity_matrix(4) R[0][0], R[1][0], R[2][0] = xaxis R[0][1], R[1][1], R[2][1] = yaxis R[0][2], R[1][2], R[2][2] = zaxis return R def basis_vectors_from_matrix(R): """Returns the basis vectors from the rotation matrix R. Parameters ---------- R : list[list[float]] A 4-by-4 transformation matrix, or a 3-by-3 rotation matrix. Returns ------- [float, float, float] The first basis vector of the rotation. [float, float, float] The second basis vector of the rotation. Raises ------ ValueError If rotation matrix is invalid. Examples -------- >>> from compas.geometry import Frame >>> f = Frame([0, 0, 0], [0.68, 0.68, 0.27], [-0.67, 0.73, -0.15]) >>> R = matrix_from_frame(f) >>> xaxis, yaxis = basis_vectors_from_matrix(R) """ xaxis = [R[0][0], R[1][0], R[2][0]] yaxis = [R[0][1], R[1][1], R[2][1]] zaxis = [R[0][2], R[1][2], R[2][2]] if not allclose(zaxis, cross_vectors(xaxis, yaxis)): raise ValueError("Matrix is invalid rotation matrix.") return xaxis, yaxis def matrix_from_translation(translation): """Returns a 4x4 translation matrix in row-major order. Parameters ---------- translation : [float, float, float] The x, y and z components of the translation. Returns ------- list[list[float]] The 4x4 transformation matrix representing a translation. Notes ----- .. code-block:: none [ . . . 0 ] [ . . . 1 ] [ . . . 2 ] [ . . . . ] Examples -------- >>> T = matrix_from_translation([1, 2, 3]) """ M = identity_matrix(4) M[0][3] = float(translation[0]) M[1][3] = float(translation[1]) M[2][3] = float(translation[2]) return M def translation_from_matrix(M): """Returns the 3 values of translation from the matrix M. Parameters ---------- M : list[list[float]] A 4-by-4 transformation matrix. Returns ------- [float, float, float] The translation vector. """ return [M[0][3], M[1][3], M[2][3]] def matrix_from_orthogonal_projection(plane): """Returns an orthogonal projection matrix to project onto a plane. Parameters ---------- plane : [point, normal] | :class:`compas.geometry.Plane` The plane to project onto. Returns ------- list[list[float]] The 4x4 transformation matrix representing an orthogonal projection. Examples -------- >>> point = [0, 0, 0] >>> normal = [0, 0, 1] >>> plane = (point, normal) >>> P = matrix_from_orthogonal_projection(plane) """ point, normal = plane T = identity_matrix(4) normal = normalize_vector(normal) for j in range(3): for i in range(3): T[i][j] -= normal[i] * normal[j] # outer_product T[0][3], T[1][3], T[2][3] = scale_vector(normal, dot_vectors(point, normal)) return T def matrix_from_parallel_projection(plane, direction): """Returns an parallel projection matrix to project onto a plane. Parameters ---------- plane : [point, normal] | :class:`compas.geometry.Plane` The plane to project onto. direction : [float, float, float] | :class:`compas.geometry.Vector` Direction of the projection. Returns ------- list[list[float]] A 4-by-4 transformation matrix. Examples -------- >>> point = [0, 0, 0] >>> normal = [0, 0, 1] >>> plane = (point, normal) >>> direction = [1, 1, 1] >>> P = matrix_from_parallel_projection(plane, direction) """ point, normal = plane T = identity_matrix(4) normal = normalize_vector(normal) scale = dot_vectors(direction, normal) for j in range(3): for i in range(3): T[i][j] -= direction[i] * normal[j] / scale T[0][3], T[1][3], T[2][3] = scale_vector(direction, dot_vectors(point, normal) / scale) return T def matrix_from_perspective_projection(plane, center_of_projection): """Returns a perspective projection matrix to project onto a plane along lines that emanate from a single point, called the center of projection. Parameters ---------- plane : [point, normal] | :class:`compas.geometry.Plane` The plane to project onto. center_of_projection : [float, float, float] | :class:`compas.geometry.Point` The camera view point. Returns ------- list[list[float]] A 4-by-4 transformation matrix. Examples -------- >>> point = [0, 0, 0] >>> normal = [0, 0, 1] >>> plane = (point, normal) >>> center_of_projection = [1, 1, 0] >>> P = matrix_from_perspective_projection(plane, center_of_projection) """ point, normal = plane T = identity_matrix(4) normal = normalize_vector(normal) T[0][0] = T[1][1] = T[2][2] = dot_vectors(subtract_vectors(center_of_projection, point), normal) for j in range(3): for i in range(3): T[i][j] -= center_of_projection[i] * normal[j] T[0][3], T[1][3], T[2][3] = scale_vector(center_of_projection, dot_vectors(point, normal)) for i in range(3): T[3][i] -= normal[i] T[3][3] = dot_vectors(center_of_projection, normal) return T def matrix_from_perspective_entries(perspective): """Returns a matrix from perspective entries. Parameters ---------- values : [float, float, float, float] The 4 perspective entries of a matrix. Returns ------- list[list[float]] A 4-by-4 transformation matrix. Notes ----- .. code-block:: none [ . . . . ] [ . . . . ] [ . . . . ] [ 0 1 2 3 ] """ M = identity_matrix(4) M[3][0] = float(perspective[0]) M[3][1] = float(perspective[1]) M[3][2] = float(perspective[2]) M[3][3] = float(perspective[3]) return M def matrix_from_shear_entries(shear_entries): """Returns a shear matrix from the 3 factors for x-y, x-z, and y-z axes. Parameters ---------- shear_entries : [float, float, float] The 3 shear factors for x-y, x-z, and y-z axes. Returns ------- list[list[float]] A 4-by-4 transformation matrix. Notes ----- .. code-block:: none [ . 0 1 . ] [ . . 2 . ] [ . . . . ] [ . . . . ] Examples -------- >>> Sh = matrix_from_shear_entries([1, 2, 3]) """ M = identity_matrix(4) M[0][1] = float(shear_entries[0]) M[0][2] = float(shear_entries[1]) M[1][2] = float(shear_entries[2]) return M def matrix_from_shear(angle, direction, point, normal): """Constructs a shear matrix by an angle along the direction vector on the shear plane (defined by point and normal). Parameters ---------- angle : float The angle in radians. direction : [float, float, float] | :class:`compas.geometry.Vector` The direction vector as list of 3 numbers. It must be orthogonal to the normal vector. point : [float, float, float] | :class:`compas.geometry.Point` The point of the shear plane as list of 3 numbers. normal : [float, float, float] | :class:`compas.geometry.Vector` The normal of the shear plane as list of 3 numbers. Returns ------- list[list[float]] A 4-by-4 transformation matrix. Raises ------ ValueError If direction and normal are not orthogonal. Notes ----- A point P is transformed by the shear matrix into P" such that the vector P-P" is parallel to the direction vector and its extent is given by the angle of P-P'-P", where P' is the orthogonal projection of P onto the shear plane (defined by point and normal). Examples -------- >>> angle = 0.1 >>> direction = [0.1, 0.2, 0.3] >>> point = [4, 3, 1] >>> normal = cross_vectors(direction, [1, 0.3, -0.1]) >>> S = matrix_from_shear(angle, direction, point, normal) """ fabs = math.fabs normal = normalize_vector(normal) direction = normalize_vector(direction) if fabs(dot_vectors(normal, direction)) > _EPS: raise ValueError('Direction and normal vectors are not orthogonal') angle = math.tan(angle) M = identity_matrix(4) for j in range(3): for i in range(3): M[i][j] += angle * direction[i] * normal[j] M[0][3], M[1][3], M[2][3] = scale_vector(direction, -angle * dot_vectors(point, normal)) return M def matrix_from_scale_factors(scale_factors): """Returns a 4x4 scaling transformation. Parameters ---------- scale_factors : [float, float, float] Three numbers defining the scaling factors in x, y, and z respectively. Returns ------- list[list[float]] A 4-by-4 transformation matrix. Notes ----- .. code-block:: python [ 0 . . . ] [ . 1 . . ] [ . . 2 . ] [ . . . . ] Examples -------- >>> Sc = matrix_from_scale_factors([1, 2, 3]) """ M = identity_matrix(4) M[0][0] = float(scale_factors[0]) M[1][1] = float(scale_factors[1]) M[2][2] = float(scale_factors[2]) return M def quaternion_from_euler_angles(e, static=True, axes='xyz'): """Returns a quaternion from Euler angles. Parameters ---------- euler_angles : [float, float, float] Three numbers that represent the angles of rotations about the specified axes. static : bool, optional If True, the rotations are applied to a static frame. If False, the rotations are applied to a rotational frame. axes : str, optional A three-character string specifying the order of the axes. Returns ------- [float, float, float, float] Quaternion as a list of four real values ``[w, x, y, z]``. """ m = matrix_from_euler_angles(e, static, axes) q = quaternion_from_matrix(m) return q def euler_angles_from_quaternion(q, static=True, axes='xyz'): """Returns Euler angles from a quaternion. Parameters ---------- quaternion : [float, float, float, float] Quaternion as a list of four real values ``[w, x, y, z]``. static : bool, optional If True, the rotations are applied to a static frame. If False, the rotations are applied to a rotational frame. axes : str, optional A three-character string specifying the order of the axes. Returns ------- [float, float, float] Euler angles as a list of three real values ``[a, b, c]``. """ m = matrix_from_quaternion(q) e = euler_angles_from_matrix(m, static, axes) return e def quaternion_from_axis_angle(axis, angle): """Returns a quaternion describing a rotation around the given axis by the given angle. Parameters ---------- axis : [float, float, float] | :class:`compas.geometry.Vector` XYZ coordinates of the rotation axis vector. angle : float Angle of rotation in radians. Returns ------- [float, float, float, float] Quaternion as a list of four real values ``[qw, qx, qy, qz]``. Examples -------- >>> axis = [1.0, 0.0, 0.0] >>> angle = math.pi/2 >>> q = quaternion_from_axis_angle(axis, angle) >>> allclose(q, [math.sqrt(2)/2, math.sqrt(2)/2, 0, 0]) True """ m = matrix_from_axis_and_angle(axis, angle, None) q = quaternion_from_matrix(m) return q def axis_angle_from_quaternion(q): """Returns an axis and an angle of rotation from the given quaternion. Parameters ---------- q : [float, float, float, float] Quaternion as a list of four real values ``[qw, qx, qy, qz]``. Returns ------- axis : [float, float, float] XYZ coordinates of the rotation axis vector. angle : float Angle of rotation in radians. Examples -------- >>> q = [1., 1., 0., 0.] >>> axis, angle = axis_angle_from_quaternion(q) >>> allclose(axis, [1., 0., 0.]) True >>> allclose([angle], [math.pi/2], 1e-6) True """ m = matrix_from_quaternion(q) axis, angle = axis_and_angle_from_matrix(m) return axis, angle
[ "compas.geometry.multiply_matrix_vector", "copy.deepcopy", "compas.geometry.transpose_matrix", "compas.geometry.scale_vector", "math.tan", "compas.geometry.multiply_matrices", "compas.geometry.length_vector", "math.sin", "compas.geometry.cross_vectors", "math.acos", "compas.geometry.subtract_vectors", "math.cos", "compas.geometry.norm_vector", "compas.geometry.dot_vectors", "compas.geometry.normalize_vector" ]
[((5491, 5518), 'compas.geometry.transpose_matrix', 'transpose_matrix', (['cofactors'], {}), '(cofactors)\n', (5507, 5518), False, 'from compas.geometry import transpose_matrix\n'), ((7541, 7560), 'compas.geometry.transpose_matrix', 'transpose_matrix', (['M'], {}), '(M)\n', (7557, 7560), False, 'from compas.geometry import transpose_matrix\n'), ((8074, 8093), 'compas.geometry.norm_vector', 'norm_vector', (['row[0]'], {}), '(row[0])\n', (8085, 8093), False, 'from compas.geometry import norm_vector\n'), ((8163, 8190), 'compas.geometry.dot_vectors', 'dot_vectors', (['row[0]', 'row[1]'], {}), '(row[0], row[1])\n', (8174, 8190), False, 'from compas.geometry import dot_vectors\n'), ((8272, 8291), 'compas.geometry.norm_vector', 'norm_vector', (['row[1]'], {}), '(row[1])\n', (8283, 8291), False, 'from compas.geometry import norm_vector\n'), ((8386, 8413), 'compas.geometry.dot_vectors', 'dot_vectors', (['row[0]', 'row[2]'], {}), '(row[0], row[2])\n', (8397, 8413), False, 'from compas.geometry import dot_vectors\n'), ((8495, 8522), 'compas.geometry.dot_vectors', 'dot_vectors', (['row[1]', 'row[2]'], {}), '(row[1], row[2])\n', (8506, 8522), False, 'from compas.geometry import dot_vectors\n'), ((8604, 8623), 'compas.geometry.norm_vector', 'norm_vector', (['row[2]'], {}), '(row[2])\n', (8615, 8623), False, 'from compas.geometry import norm_vector\n'), ((20948, 20967), 'compas.geometry.length_vector', 'length_vector', (['axis'], {}), '(axis)\n', (20961, 20967), False, 'from compas.geometry import length_vector\n'), ((21019, 21034), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (21027, 21034), False, 'import math\n'), ((21046, 21061), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (21054, 21061), False, 'import math\n'), ((21310, 21334), 'compas.geometry.scale_vector', 'scale_vector', (['axis', 'sina'], {}), '(axis, sina)\n', (21322, 21334), False, 'from compas.geometry import scale_vector\n'), ((22640, 22672), 'compas.geometry.length_vector', 'length_vector', (['axis_angle_vector'], {}), '(axis_angle_vector)\n', (22653, 22672), False, 'from compas.geometry import length_vector\n'), ((24605, 24653), 'math.acos', 'math.acos', (['((M[0][0] + M[1][1] + M[2][2] - 1) / 2)'], {}), '((M[0][0] + M[1][1] + M[2][2] - 1) / 2)\n', (24614, 24653), False, 'import math\n'), ((25132, 25157), 'compas.geometry.scale_vector', 'scale_vector', (['axis', 'angle'], {}), '(axis, angle)\n', (25144, 25157), False, 'from compas.geometry import scale_vector\n'), ((28857, 28884), 'compas.geometry.cross_vectors', 'cross_vectors', (['xaxis', 'yaxis'], {}), '(xaxis, yaxis)\n', (28870, 28884), False, 'from compas.geometry import cross_vectors\n'), ((28897, 28924), 'compas.geometry.cross_vectors', 'cross_vectors', (['zaxis', 'xaxis'], {}), '(zaxis, xaxis)\n', (28910, 28924), False, 'from compas.geometry import cross_vectors\n'), ((31702, 31726), 'compas.geometry.normalize_vector', 'normalize_vector', (['normal'], {}), '(normal)\n', (31718, 31726), False, 'from compas.geometry import normalize_vector\n'), ((32664, 32688), 'compas.geometry.normalize_vector', 'normalize_vector', (['normal'], {}), '(normal)\n', (32680, 32688), False, 'from compas.geometry import normalize_vector\n'), ((32702, 32732), 'compas.geometry.dot_vectors', 'dot_vectors', (['direction', 'normal'], {}), '(direction, normal)\n', (32713, 32732), False, 'from compas.geometry import dot_vectors\n'), ((33797, 33821), 'compas.geometry.normalize_vector', 'normalize_vector', (['normal'], {}), '(normal)\n', (33813, 33821), False, 'from compas.geometry import normalize_vector\n'), ((34198, 34239), 'compas.geometry.dot_vectors', 'dot_vectors', (['center_of_projection', 'normal'], {}), '(center_of_projection, normal)\n', (34209, 34239), False, 'from compas.geometry import dot_vectors\n'), ((37055, 37079), 'compas.geometry.normalize_vector', 'normalize_vector', (['normal'], {}), '(normal)\n', (37071, 37079), False, 'from compas.geometry import normalize_vector\n'), ((37096, 37123), 'compas.geometry.normalize_vector', 'normalize_vector', (['direction'], {}), '(direction)\n', (37112, 37123), False, 'from compas.geometry import normalize_vector\n'), ((37266, 37281), 'math.tan', 'math.tan', (['angle'], {}), '(angle)\n', (37274, 37281), False, 'import math\n'), ((9734, 9746), 'copy.deepcopy', 'deepcopy', (['Mt'], {}), '(Mt)\n', (9742, 9746), False, 'from copy import deepcopy\n'), ((9885, 9956), 'compas.geometry.multiply_matrix_vector', 'multiply_matrix_vector', (['Ptinv', '[Mt[0][3], Mt[1][3], Mt[2][3], Mt[3][3]]'], {}), '(Ptinv, [Mt[0][3], Mt[1][3], Mt[2][3], Mt[3][3]])\n', (9907, 9956), False, 'from compas.geometry import multiply_matrix_vector\n'), ((11464, 11487), 'compas.geometry.multiply_matrices', 'multiply_matrices', (['M', 'P'], {}), '(M, P)\n', (11481, 11487), False, 'from compas.geometry import multiply_matrices\n'), ((11581, 11604), 'compas.geometry.multiply_matrices', 'multiply_matrices', (['M', 'T'], {}), '(M, T)\n', (11598, 11604), False, 'from compas.geometry import multiply_matrices\n'), ((11714, 11737), 'compas.geometry.multiply_matrices', 'multiply_matrices', (['M', 'R'], {}), '(M, R)\n', (11731, 11737), False, 'from compas.geometry import multiply_matrices\n'), ((11821, 11844), 'compas.geometry.multiply_matrices', 'multiply_matrices', (['M', 'H'], {}), '(M, H)\n', (11838, 11844), False, 'from compas.geometry import multiply_matrices\n'), ((11928, 11951), 'compas.geometry.multiply_matrices', 'multiply_matrices', (['M', 'S'], {}), '(M, S)\n', (11945, 11951), False, 'from compas.geometry import multiply_matrices\n'), ((20984, 21006), 'compas.geometry.normalize_vector', 'normalize_vector', (['axis'], {}), '(axis)\n', (21000, 21006), False, 'from compas.geometry import normalize_vector\n'), ((21682, 21714), 'compas.geometry.multiply_matrix_vector', 'multiply_matrix_vector', (['R', 'point'], {}), '(R, point)\n', (21704, 21714), False, 'from compas.geometry import multiply_matrix_vector\n'), ((31894, 31920), 'compas.geometry.dot_vectors', 'dot_vectors', (['point', 'normal'], {}), '(point, normal)\n', (31905, 31920), False, 'from compas.geometry import dot_vectors\n'), ((33869, 33914), 'compas.geometry.subtract_vectors', 'subtract_vectors', (['center_of_projection', 'point'], {}), '(center_of_projection, point)\n', (33885, 33914), False, 'from compas.geometry import subtract_vectors\n'), ((34102, 34128), 'compas.geometry.dot_vectors', 'dot_vectors', (['point', 'normal'], {}), '(point, normal)\n', (34113, 34128), False, 'from compas.geometry import dot_vectors\n'), ((8756, 8785), 'compas.geometry.cross_vectors', 'cross_vectors', (['row[1]', 'row[2]'], {}), '(row[1], row[2])\n', (8769, 8785), False, 'from compas.geometry import cross_vectors\n'), ((9842, 9861), 'compas.geometry.transpose_matrix', 'transpose_matrix', (['P'], {}), '(P)\n', (9858, 9861), False, 'from compas.geometry import transpose_matrix\n'), ((29936, 29963), 'compas.geometry.cross_vectors', 'cross_vectors', (['xaxis', 'yaxis'], {}), '(xaxis, yaxis)\n', (29949, 29963), False, 'from compas.geometry import cross_vectors\n'), ((32896, 32922), 'compas.geometry.dot_vectors', 'dot_vectors', (['point', 'normal'], {}), '(point, normal)\n', (32907, 32922), False, 'from compas.geometry import dot_vectors\n'), ((37137, 37167), 'compas.geometry.dot_vectors', 'dot_vectors', (['normal', 'direction'], {}), '(normal, direction)\n', (37148, 37167), False, 'from compas.geometry import dot_vectors\n'), ((37482, 37508), 'compas.geometry.dot_vectors', 'dot_vectors', (['point', 'normal'], {}), '(point, normal)\n', (37493, 37508), False, 'from compas.geometry import dot_vectors\n')]
""" This is an example how to train SentenceTransformers in a multi-task setup. The system trains BERT on the AllNLI and on the STSbenchmark dataset. """ from torch.utils.data import DataLoader import math from sentence_transformers import models, losses from sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer, util from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator from sentence_transformers.readers import * import logging from datetime import datetime import gzip import csv import os #### Just some code to print debug information to stdout logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO, handlers=[LoggingHandler()]) #### /print debug information to stdout # Read the dataset model_name = 'bert-base-uncased' batch_size = 16 model_save_path = 'output/training_multi-task_'+model_name+'-'+datetime.now().strftime("%Y-%m-%d_%H-%M-%S") #Check if dataset exsist. If not, download and extract it nli_dataset_path = 'datasets/AllNLI.tsv.gz' sts_dataset_path = 'datasets/stsbenchmark.tsv.gz' if not os.path.exists(nli_dataset_path): util.http_get('https://public.ukp.informatik.tu-darmstadt.de/reimers/sentence-transformers/datasets/AllNLI.tsv.gz', nli_dataset_path) if not os.path.exists(sts_dataset_path): util.http_get('https://public.ukp.informatik.tu-darmstadt.de/reimers/sentence-transformers/datasets/stsbenchmark.tsv.gz', sts_dataset_path) # Use BERT for mapping tokens to embeddings word_embedding_model = models.Transformer(model_name) # Apply mean pooling to get one fixed sized sentence vector pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), pooling_mode_mean_tokens=True, pooling_mode_cls_token=False, pooling_mode_max_tokens=False) model = SentenceTransformer(modules=[word_embedding_model, pooling_model]) # Convert the dataset to a DataLoader ready for training logging.info("Read AllNLI train dataset") label2int = {"contradiction": 0, "entailment": 1, "neutral": 2} train_nli_samples = [] with gzip.open(nli_dataset_path, 'rt', encoding='utf8') as fIn: reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE) for row in reader: if row['split'] == 'train': label_id = label2int[row['label']] train_nli_samples.append(InputExample(texts=[row['sentence1'], row['sentence2']], label=label_id)) train_data_nli = SentencesDataset(train_nli_samples, model=model) train_dataloader_nli = DataLoader(train_data_nli, shuffle=True, batch_size=batch_size) train_loss_nli = losses.SoftmaxLoss(model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=len(label2int)) logging.info("Read STSbenchmark train dataset") train_sts_samples = [] dev_sts_samples = [] test_sts_samples = [] with gzip.open(sts_dataset_path, 'rt', encoding='utf8') as fIn: reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE) for row in reader: score = float(row['score']) / 5.0 # Normalize score to range 0 ... 1 inp_example = InputExample(texts=[row['sentence1'], row['sentence2']], label=score) if row['split'] == 'dev': dev_sts_samples.append(inp_example) elif row['split'] == 'test': test_sts_samples.append(inp_example) else: train_sts_samples.append(inp_example) train_data_sts = SentencesDataset(train_sts_samples, model=model) train_dataloader_sts = DataLoader(train_data_sts, shuffle=True, batch_size=batch_size) train_loss_sts = losses.CosineSimilarityLoss(model=model) logging.info("Read STSbenchmark dev dataset") evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_sts_samples, name='sts-dev') # Configure the training num_epochs = 4 warmup_steps = math.ceil(len(train_data_sts) * num_epochs / batch_size * 0.1) #10% of train data for warm-up logging.info("Warmup-steps: {}".format(warmup_steps)) # Here we define the two train objectives: train_dataloader_nli with train_loss_nli (i.e., SoftmaxLoss for NLI data) # and train_dataloader_sts with train_loss_sts (i.e., CosineSimilarityLoss for STSbenchmark data) # You can pass as many (dataloader, loss) tuples as you like. They are iterated in a round-robin way. train_objectives = [(train_dataloader_nli, train_loss_nli), (train_dataloader_sts, train_loss_sts)] # Train the model model.fit(train_objectives=train_objectives, evaluator=evaluator, epochs=num_epochs, evaluation_steps=1000, warmup_steps=warmup_steps, output_path=model_save_path ) ############################################################################## # # Load the stored model and evaluate its performance on STS benchmark dataset # ############################################################################## model = SentenceTransformer(model_save_path) test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_sts_samples, name='sts-test') test_evaluator(model, output_path=model_save_path)
[ "sentence_transformers.SentencesDataset", "sentence_transformers.util.http_get", "gzip.open", "torch.utils.data.DataLoader", "sentence_transformers.models.Transformer", "csv.DictReader", "os.path.exists", "sentence_transformers.LoggingHandler", "logging.info", "sentence_transformers.losses.CosineSimilarityLoss", "datetime.datetime.now", "sentence_transformers.SentenceTransformer", "sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.from_input_examples" ]
[((1606, 1636), 'sentence_transformers.models.Transformer', 'models.Transformer', (['model_name'], {}), '(model_name)\n', (1624, 1636), False, 'from sentence_transformers import models, losses\n'), ((1976, 2042), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', ([], {'modules': '[word_embedding_model, pooling_model]'}), '(modules=[word_embedding_model, pooling_model])\n', (1995, 2042), False, 'from sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer, util\n'), ((2102, 2143), 'logging.info', 'logging.info', (['"""Read AllNLI train dataset"""'], {}), "('Read AllNLI train dataset')\n", (2114, 2143), False, 'import logging\n'), ((2603, 2651), 'sentence_transformers.SentencesDataset', 'SentencesDataset', (['train_nli_samples'], {'model': 'model'}), '(train_nli_samples, model=model)\n', (2619, 2651), False, 'from sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer, util\n'), ((2675, 2738), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data_nli'], {'shuffle': '(True)', 'batch_size': 'batch_size'}), '(train_data_nli, shuffle=True, batch_size=batch_size)\n', (2685, 2738), False, 'from torch.utils.data import DataLoader\n'), ((2887, 2934), 'logging.info', 'logging.info', (['"""Read STSbenchmark train dataset"""'], {}), "('Read STSbenchmark train dataset')\n", (2899, 2934), False, 'import logging\n'), ((3582, 3630), 'sentence_transformers.SentencesDataset', 'SentencesDataset', (['train_sts_samples'], {'model': 'model'}), '(train_sts_samples, model=model)\n', (3598, 3630), False, 'from sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer, util\n'), ((3654, 3717), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data_sts'], {'shuffle': '(True)', 'batch_size': 'batch_size'}), '(train_data_sts, shuffle=True, batch_size=batch_size)\n', (3664, 3717), False, 'from torch.utils.data import DataLoader\n'), ((3735, 3775), 'sentence_transformers.losses.CosineSimilarityLoss', 'losses.CosineSimilarityLoss', ([], {'model': 'model'}), '(model=model)\n', (3762, 3775), False, 'from sentence_transformers import models, losses\n'), ((3778, 3823), 'logging.info', 'logging.info', (['"""Read STSbenchmark dev dataset"""'], {}), "('Read STSbenchmark dev dataset')\n", (3790, 3823), False, 'import logging\n'), ((3836, 3922), 'sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.from_input_examples', 'EmbeddingSimilarityEvaluator.from_input_examples', (['dev_sts_samples'], {'name': '"""sts-dev"""'}), "(dev_sts_samples, name=\n 'sts-dev')\n", (3884, 3922), False, 'from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator\n'), ((5038, 5074), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_save_path'], {}), '(model_save_path)\n', (5057, 5074), False, 'from sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer, util\n'), ((5092, 5180), 'sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.from_input_examples', 'EmbeddingSimilarityEvaluator.from_input_examples', (['test_sts_samples'], {'name': '"""sts-test"""'}), "(test_sts_samples, name=\n 'sts-test')\n", (5140, 5180), False, 'from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator\n'), ((1178, 1210), 'os.path.exists', 'os.path.exists', (['nli_dataset_path'], {}), '(nli_dataset_path)\n', (1192, 1210), False, 'import os\n'), ((1216, 1359), 'sentence_transformers.util.http_get', 'util.http_get', (['"""https://public.ukp.informatik.tu-darmstadt.de/reimers/sentence-transformers/datasets/AllNLI.tsv.gz"""', 'nli_dataset_path'], {}), "(\n 'https://public.ukp.informatik.tu-darmstadt.de/reimers/sentence-transformers/datasets/AllNLI.tsv.gz'\n , nli_dataset_path)\n", (1229, 1359), False, 'from sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer, util\n'), ((1358, 1390), 'os.path.exists', 'os.path.exists', (['sts_dataset_path'], {}), '(sts_dataset_path)\n', (1372, 1390), False, 'import os\n'), ((1396, 1545), 'sentence_transformers.util.http_get', 'util.http_get', (['"""https://public.ukp.informatik.tu-darmstadt.de/reimers/sentence-transformers/datasets/stsbenchmark.tsv.gz"""', 'sts_dataset_path'], {}), "(\n 'https://public.ukp.informatik.tu-darmstadt.de/reimers/sentence-transformers/datasets/stsbenchmark.tsv.gz'\n , sts_dataset_path)\n", (1409, 1545), False, 'from sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer, util\n'), ((2236, 2286), 'gzip.open', 'gzip.open', (['nli_dataset_path', '"""rt"""'], {'encoding': '"""utf8"""'}), "(nli_dataset_path, 'rt', encoding='utf8')\n", (2245, 2286), False, 'import gzip\n'), ((2308, 2367), 'csv.DictReader', 'csv.DictReader', (['fIn'], {'delimiter': '"""\t"""', 'quoting': 'csv.QUOTE_NONE'}), "(fIn, delimiter='\\t', quoting=csv.QUOTE_NONE)\n", (2322, 2367), False, 'import csv\n'), ((3006, 3056), 'gzip.open', 'gzip.open', (['sts_dataset_path', '"""rt"""'], {'encoding': '"""utf8"""'}), "(sts_dataset_path, 'rt', encoding='utf8')\n", (3015, 3056), False, 'import gzip\n'), ((3078, 3137), 'csv.DictReader', 'csv.DictReader', (['fIn'], {'delimiter': '"""\t"""', 'quoting': 'csv.QUOTE_NONE'}), "(fIn, delimiter='\\t', quoting=csv.QUOTE_NONE)\n", (3092, 3137), False, 'import csv\n'), ((779, 795), 'sentence_transformers.LoggingHandler', 'LoggingHandler', ([], {}), '()\n', (793, 795), False, 'from sentence_transformers import SentencesDataset, LoggingHandler, SentenceTransformer, util\n'), ((970, 984), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (982, 984), False, 'from datetime import datetime\n')]
# -*- coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-04-10 08:25 from __future__ import unicode_literals import area_riservata.models from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('area_riservata', '0003_auto_20170408_1901'), ] operations = [ migrations.AddField( model_name='allegato', name='file', field=models.FileField(blank=True, null=True, upload_to=area_riservata.models.relURI_path), ), ]
[ "django.db.models.FileField" ]
[((435, 524), 'django.db.models.FileField', 'models.FileField', ([], {'blank': '(True)', 'null': '(True)', 'upload_to': 'area_riservata.models.relURI_path'}), '(blank=True, null=True, upload_to=area_riservata.models.\n relURI_path)\n', (451, 524), False, 'from django.db import migrations, models\n')]
# Copyright 2019 Capital One Services, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pcf.particle.aws.glacier.glacier_vault import GlacierVault from pcf.core import State import placebo import boto3 import os class TestGlacierVault: particle_definition = { "pcf_name": "pcf_glacier", "flavor": "glacier_vault", "aws_resource": { "vaultName": "pcf_test_glacier", # Required "custom_config": { "Tags": { "Name": "pcf-glacier-test" } } } } def test_create_vault(self): session = boto3.Session() dirname = os.path.dirname(__file__) filename = os.path.join(dirname, 'replay') pill = placebo.attach(session, data_path=filename) pill.playback() # define particle particle = GlacierVault(self.particle_definition, session) # Test start particle.set_desired_state("running") particle.apply() assert particle.get_state() == State.running # # test tags tags = particle.client.list_tags_for_vault(vaultName=particle.vault_name, accountId=particle.account_id) assert self.particle_definition.get("aws_resource").get("custom_config").get("Tags") == tags.get("Tags") def test_terminate(self): session = boto3.Session() dirname = os.path.dirname(__file__) filename = os.path.join(dirname, 'replay') pill = placebo.attach(session, data_path=filename) pill.playback() # define particle particle = GlacierVault(self.particle_definition, session) # Test Terminate particle.set_desired_state("terminated") particle.apply() assert particle.get_state() == State.terminated pill.stop()
[ "pcf.particle.aws.glacier.glacier_vault.GlacierVault", "boto3.Session", "placebo.attach", "os.path.dirname", "os.path.join" ]
[((1125, 1140), 'boto3.Session', 'boto3.Session', ([], {}), '()\n', (1138, 1140), False, 'import boto3\n'), ((1159, 1184), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1174, 1184), False, 'import os\n'), ((1204, 1235), 'os.path.join', 'os.path.join', (['dirname', '"""replay"""'], {}), "(dirname, 'replay')\n", (1216, 1235), False, 'import os\n'), ((1251, 1294), 'placebo.attach', 'placebo.attach', (['session'], {'data_path': 'filename'}), '(session, data_path=filename)\n', (1265, 1294), False, 'import placebo\n'), ((1364, 1411), 'pcf.particle.aws.glacier.glacier_vault.GlacierVault', 'GlacierVault', (['self.particle_definition', 'session'], {}), '(self.particle_definition, session)\n', (1376, 1411), False, 'from pcf.particle.aws.glacier.glacier_vault import GlacierVault\n'), ((1859, 1874), 'boto3.Session', 'boto3.Session', ([], {}), '()\n', (1872, 1874), False, 'import boto3\n'), ((1893, 1918), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1908, 1918), False, 'import os\n'), ((1938, 1969), 'os.path.join', 'os.path.join', (['dirname', '"""replay"""'], {}), "(dirname, 'replay')\n", (1950, 1969), False, 'import os\n'), ((1985, 2028), 'placebo.attach', 'placebo.attach', (['session'], {'data_path': 'filename'}), '(session, data_path=filename)\n', (1999, 2028), False, 'import placebo\n'), ((2098, 2145), 'pcf.particle.aws.glacier.glacier_vault.GlacierVault', 'GlacierVault', (['self.particle_definition', 'session'], {}), '(self.particle_definition, session)\n', (2110, 2145), False, 'from pcf.particle.aws.glacier.glacier_vault import GlacierVault\n')]
""" Global fixtures and functions for pytest pytest can only share fixtures between modules if they are declared here. """ import logging import os import pytest from loguru import logger import genomepy.providers from genomepy.providers.base import BaseProvider from genomepy.providers.ensembl import EnsemblProvider from genomepy.providers.gencode import GencodeProvider from genomepy.providers.local import LocalProvider from genomepy.providers.ncbi import NcbiProvider from genomepy.providers.ucsc import UcscProvider from genomepy.providers.url import UrlProvider @pytest.fixture(scope="function") def caplog(caplog): """Fixture is necessary to be able to check loguru log messages""" class PropogateHandler(logging.Handler): def emit(self, record): logging.getLogger(record.name).handle(record) handler_id = logger.add(PropogateHandler(), format="{message} {extra}") yield caplog logger.remove(handler_id) def teardown(gprefix, skip=None): for ext in [ "fa.fai", "fa.sizes", "gaps.bed", "fa.gz.fai", "fa.gz.sizes", "annotation.gtf", "annotation.bed", ]: if skip and ext in skip: continue file = gprefix + ext if os.path.exists(file): os.remove(file) gdir = os.path.dirname(gprefix) readme = os.path.join(gdir, "README.txt") if os.path.exists(readme): os.remove(readme) @pytest.fixture(scope="function") def small_genome(): yield genomepy.Genome("tests/data/small_genome.fa.gz") teardown("tests/data/small_genome.") @pytest.fixture(scope="function") def gap_genome(): yield genomepy.Genome("tests/data/gap.fa") teardown("tests/data/gap.") @pytest.fixture(scope="function") def annot(): genome_file = "tests/data/regexp/regexp.fa" gtf_file = "tests/data/regexp/regexp.annotation.gtf" bed_file = "tests/data/regexp/regexp.annotation.bed" genomepy.Genome(genome_file) with open(gtf_file, "w") as f: f.write("# skip this line\n") f.write( """chrM\tvanHeeringen-lab\tNP_059343.1\t15307\t16448\t42\t+\t.\tattributes""" ) with open(bed_file, "w") as f: f.write( """chrM\t15307\t16448\tNP_059343.1\t42\t+\t15307\t16448\t0\t1\t1141,\t0,""" ) yield genomepy.Annotation("regexp", genomes_dir="tests/data") teardown("tests/data/regexp/regexp.") def validate_annot(fname, ftype): """fname = path, ftype = 'bed' or 'gtf'.""" assert os.path.exists(fname) columns = 12 if ftype == "bed" else 9 start, end = (3, 4) if ftype == "gtf" else (1, 2) with open(fname, "r") as f: for line in f: if line.startswith("#"): continue vals = line.split("\t") assert columns == len(vals) int(vals[start]), int(vals[end]) break @pytest.fixture(scope="function") def base(): return BaseProvider() @pytest.fixture(scope="function") def ensembl(): return EnsemblProvider() @pytest.fixture(scope="function") def ucsc(): return UcscProvider() @pytest.fixture(scope="function") def gencode(): return GencodeProvider() @pytest.fixture(scope="function") def ncbi(): return NcbiProvider() @pytest.fixture(scope="function") def local(): return LocalProvider() @pytest.fixture(scope="function") def url(): return UrlProvider() @pytest.fixture(scope="function") def provider(): return genomepy.Provider()
[ "genomepy.providers.url.UrlProvider", "os.remove", "genomepy.providers.local.LocalProvider", "os.path.dirname", "pytest.fixture", "os.path.exists", "genomepy.providers.ucsc.UcscProvider", "genomepy.providers.ncbi.NcbiProvider", "genomepy.providers.base.BaseProvider", "loguru.logger.remove", "genomepy.providers.gencode.GencodeProvider", "genomepy.providers.ensembl.EnsemblProvider", "os.path.join", "logging.getLogger" ]
[((574, 606), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (588, 606), False, 'import pytest\n'), ((1458, 1490), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (1472, 1490), False, 'import pytest\n'), ((1615, 1647), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (1629, 1647), False, 'import pytest\n'), ((1749, 1781), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (1763, 1781), False, 'import pytest\n'), ((2912, 2944), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (2926, 2944), False, 'import pytest\n'), ((2986, 3018), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (3000, 3018), False, 'import pytest\n'), ((3066, 3098), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (3080, 3098), False, 'import pytest\n'), ((3140, 3172), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (3154, 3172), False, 'import pytest\n'), ((3220, 3252), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (3234, 3252), False, 'import pytest\n'), ((3294, 3326), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (3308, 3326), False, 'import pytest\n'), ((3370, 3402), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (3384, 3402), False, 'import pytest\n'), ((3442, 3474), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (3456, 3474), False, 'import pytest\n'), ((932, 957), 'loguru.logger.remove', 'logger.remove', (['handler_id'], {}), '(handler_id)\n', (945, 957), False, 'from loguru import logger\n'), ((1327, 1351), 'os.path.dirname', 'os.path.dirname', (['gprefix'], {}), '(gprefix)\n', (1342, 1351), False, 'import os\n'), ((1365, 1397), 'os.path.join', 'os.path.join', (['gdir', '"""README.txt"""'], {}), "(gdir, 'README.txt')\n", (1377, 1397), False, 'import os\n'), ((1405, 1427), 'os.path.exists', 'os.path.exists', (['readme'], {}), '(readme)\n', (1419, 1427), False, 'import os\n'), ((2535, 2556), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (2549, 2556), False, 'import os\n'), ((2968, 2982), 'genomepy.providers.base.BaseProvider', 'BaseProvider', ([], {}), '()\n', (2980, 2982), False, 'from genomepy.providers.base import BaseProvider\n'), ((3045, 3062), 'genomepy.providers.ensembl.EnsemblProvider', 'EnsemblProvider', ([], {}), '()\n', (3060, 3062), False, 'from genomepy.providers.ensembl import EnsemblProvider\n'), ((3122, 3136), 'genomepy.providers.ucsc.UcscProvider', 'UcscProvider', ([], {}), '()\n', (3134, 3136), False, 'from genomepy.providers.ucsc import UcscProvider\n'), ((3199, 3216), 'genomepy.providers.gencode.GencodeProvider', 'GencodeProvider', ([], {}), '()\n', (3214, 3216), False, 'from genomepy.providers.gencode import GencodeProvider\n'), ((3276, 3290), 'genomepy.providers.ncbi.NcbiProvider', 'NcbiProvider', ([], {}), '()\n', (3288, 3290), False, 'from genomepy.providers.ncbi import NcbiProvider\n'), ((3351, 3366), 'genomepy.providers.local.LocalProvider', 'LocalProvider', ([], {}), '()\n', (3364, 3366), False, 'from genomepy.providers.local import LocalProvider\n'), ((3425, 3438), 'genomepy.providers.url.UrlProvider', 'UrlProvider', ([], {}), '()\n', (3436, 3438), False, 'from genomepy.providers.url import UrlProvider\n'), ((1266, 1286), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (1280, 1286), False, 'import os\n'), ((1437, 1454), 'os.remove', 'os.remove', (['readme'], {}), '(readme)\n', (1446, 1454), False, 'import os\n'), ((1300, 1315), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (1309, 1315), False, 'import os\n'), ((788, 818), 'logging.getLogger', 'logging.getLogger', (['record.name'], {}), '(record.name)\n', (805, 818), False, 'import logging\n')]
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name='aogl', version='0.1', author="<NAME>", author_email="<EMAIL>", description="A python package to retrieve the latest 10 blog posts from https://ao.gl", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/ao/aogl_pip", packages=["aogl"], entry_points = { "console_scripts": ['aogl = aogl.aogl:main'] }, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], )
[ "setuptools.setup" ]
[((88, 630), 'setuptools.setup', 'setuptools.setup', ([], {'name': '"""aogl"""', 'version': '"""0.1"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'description': '"""A python package to retrieve the latest 10 blog posts from https://ao.gl"""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/ao/aogl_pip"""', 'packages': "['aogl']", 'entry_points': "{'console_scripts': ['aogl = aogl.aogl:main']}", 'classifiers': "['Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent']"}), "(name='aogl', version='0.1', author='<NAME>', author_email=\n '<EMAIL>', description=\n 'A python package to retrieve the latest 10 blog posts from https://ao.gl',\n long_description=long_description, long_description_content_type=\n 'text/markdown', url='https://github.com/ao/aogl_pip', packages=['aogl'\n ], entry_points={'console_scripts': ['aogl = aogl.aogl:main']},\n classifiers=['Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent'])\n", (104, 630), False, 'import setuptools\n')]
''' img tools 包含图片处理常见工具. function: 1. get_img(getpath, gray=False, scale_percent=100) 根据路径返回img/灰度选择/缩放 2. save_img(savepath, img) 保存图片 3. plot_line_chart(y1, y2, y3) 画折线图 暂不完善 4. cut_pic(img,pattern=0, up = 0, down = 0, left = 0, right = 0) 切割图片/比例切割/像素切割 5. plot_3d_line(x, z, y, over, x_max, y_max, z_max) 三维空间画线 6. plot_3d_dot(location, over, x_max, y_max, z_max) 三维空间中画点 ''' import sys sys.path.append(r'./') # 为了能找到自写函数 import cv2 import matplotlib.pyplot as plt import numpy as np def get_img(getpath, gray=False, scale_percent=100): ''' 根据路径返回img\n getpath:图片路径\n gray:是否显示为灰度图;default=False\n scale_percent:放缩比例;default=100 ''' img = cv2.imread(getpath) width = int(img.shape[1] * scale_percent / 100) height = int(img.shape[0] * scale_percent / 100) if scale_percent != 100: # percent of original size dim = (width, height) # resize image img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA) if gray: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 转换为灰度图 解决cv读取灰度图成为三通道的问题 img = img.reshape(width, height, 1) return img def save_img(savepath, img): ''' 保存图片 ''' cv2.imwrite(savepath, img) def plot_line_chart(y1, y2, y3): ''' 画折线图 暂不完善 ''' plt.figure(figsize=(20,2)) plt.title('太阳风速度预测') # 折线图标题 plt.rcParams['font.sans-serif'] = ['SimHei'] # 显示汉字 plt.xlabel('time') # x轴标题 plt.ylabel('太阳风速 km/s') # y轴标题 plt.plot(y1, color='#800080', label='true', linewidth=1) # 绘制折线图,添加数据点,设置点的大小 # plt.plot(y2, color='#00a8e1', label='p1', linewidth=1) plt.plot(y3, color='#99cc00', label='p2', linewidth=1) plt.legend(['True', 'P2']) # 设置折线名称 # plt.legend(['True', 'P1', 'P2']) # 设置折线名称 plt.show() # 显示折线图 def cut_pic(img,pattern=0, up = 0, down = 0, left = 0, right = 0): ''' 切割图片\n (img, 切割模式(0:比例,1:像素),图片上部分, 图片下, 左, 右)\n eg.\n >>>(img, 1, 50, 50, 50, 50)\n 上下左右各切50像素\n >>>(img, 0, 0.2, 0.2, 0.2, 0.2)\n 上下左右各切20%\n ''' h, w = len(img), len(img[0]) if pattern: img = img[up:h - down, left:w-right] else: img = img[int(h*up):int(h*(1-down)), int(w*left):int(w*(1-right))] return img def plot_3d_line(x, z, y, over, x_max, y_max, z_max): from matplotlib.font_manager import FontProperties # 把这两行放在主函数能提高运行效率 font_set = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=12) # if x > x_max:x_max = x # if y > y_max:y_max = y # if z > z_max:z_max = z ''' 画3d图像 ''' plt.ion() ax = plt.axes(projection='3d') # 坐标轴建立,由于无法直接建立右手坐标系,根据调试设置右手坐标系,此部分有一点凌乱。 ax.set_xlim(0, x_max) ax.set_zlim(0, y_max) ax.set_ylim(z_max, 0) ax.plot3D(x,z,y,'red') #绘制空间曲线 plt.title('导弹发射轨迹', fontproperties=font_set) plt.xlabel('x水平距离(米)', fontproperties=font_set) plt.ylabel('z水平距离(米)', fontproperties=font_set) # plt.zlabel('导弹运行高度(米)', fontproperties=font_set) plt.show() if not over: plt.pause(0.5) plt.clf() else: plt.pause(1000) def plot_3d_dot(location, over, x_max, y_max, z_max): from matplotlib.font_manager import FontProperties font_set = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=12) # if x > x_max:x_max = x # if y > y_max:y_max = y # if z > z_max:z_max = z ''' 画3d图像 ''' plt.ion() ax = plt.axes(projection='3d') # 坐标轴建立,由于无法直接建立右手坐标系,根据调试设置右手坐标系,此部分有一点凌乱。 ax.set_xlim(0, x_max) ax.set_zlim(0, y_max) ax.set_ylim(z_max, 0) ax.plot(location[0],location[2],location[1],'red', marker='o') #绘制空间曲线 plt.title('导弹发射轨迹', fontproperties=font_set) plt.xlabel('x水平距离(米)', fontproperties=font_set) plt.ylabel('z水平距离(米)', fontproperties=font_set) # plt.zlabel('导弹运行高度(米)', fontproperties=font_set) plt.show() if not over: plt.pause(0.5) plt.clf() else: plt.pause(1000) if __name__ == '__main__': print("Welcome to MyTools!") from utils.txtTool import * # l1 = txtReadNumArray("./example/data/true.txt") # l2 = txtReadNumArray("./example/data/p1.txt") # l3 = txtReadNumArray("./example/data/p2.txt") # plot_line_chart(l1[120:8000],l2[0:8000],l3[0:8000]) # imgpath = "./example/test.jpg" # img = get_img(imgpath, gray=True, scale_percent=25) # print(img.shape) # img = img[50:462, 50:462] # img = cut_pic(img, 1, 50, 50, 50, 50) # img = cut_pic(img, 1, 14, 14, 14, 14) # img = 255.-img # from mathTool import * # img = standardization(img) # print(img[0:5,0:5]) # print(img[40:50,40:50]) # from circle import * # box = box_circle(100, (49,49), 50, 0., 1.) # img = img*box # print(img[0:5,0:5]) # print(img[40:50,40:50]) # cv2.imshow("img", img) # cv2.waitKey (0) # cv2.destroyAllWindows() plot_3d_line([1, 2, 3], [1, 2, 3], [1, 2, 3], True, 10, 10, 10) plot_3d_dot([1, 5, 10], True, 10, 10, 10)
[ "sys.path.append", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.plot", "matplotlib.font_manager.FontProperties", "matplotlib.pyplot.axes", "cv2.imwrite", "matplotlib.pyplot.legend", "cv2.cvtColor", "matplotlib.pyplot.clf", "cv2.imread", "matplotlib.pyplot.figure", "matplotlib.pyplot.ion", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.pause", "cv2.resize" ]
[((412, 433), 'sys.path.append', 'sys.path.append', (['"""./"""'], {}), "('./')\n", (427, 433), False, 'import sys\n'), ((699, 718), 'cv2.imread', 'cv2.imread', (['getpath'], {}), '(getpath)\n', (709, 718), False, 'import cv2\n'), ((1227, 1253), 'cv2.imwrite', 'cv2.imwrite', (['savepath', 'img'], {}), '(savepath, img)\n', (1238, 1253), False, 'import cv2\n'), ((1325, 1352), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 2)'}), '(figsize=(20, 2))\n', (1335, 1352), True, 'import matplotlib.pyplot as plt\n'), ((1356, 1376), 'matplotlib.pyplot.title', 'plt.title', (['"""太阳风速度预测"""'], {}), "('太阳风速度预测')\n", (1365, 1376), True, 'import matplotlib.pyplot as plt\n'), ((1447, 1465), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (1457, 1465), True, 'import matplotlib.pyplot as plt\n'), ((1478, 1501), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""太阳风速 km/s"""'], {}), "('太阳风速 km/s')\n", (1488, 1501), True, 'import matplotlib.pyplot as plt\n'), ((1514, 1570), 'matplotlib.pyplot.plot', 'plt.plot', (['y1'], {'color': '"""#800080"""', 'label': '"""true"""', 'linewidth': '(1)'}), "(y1, color='#800080', label='true', linewidth=1)\n", (1522, 1570), True, 'import matplotlib.pyplot as plt\n'), ((1658, 1712), 'matplotlib.pyplot.plot', 'plt.plot', (['y3'], {'color': '"""#99cc00"""', 'label': '"""p2"""', 'linewidth': '(1)'}), "(y3, color='#99cc00', label='p2', linewidth=1)\n", (1666, 1712), True, 'import matplotlib.pyplot as plt\n'), ((1718, 1744), 'matplotlib.pyplot.legend', 'plt.legend', (["['True', 'P2']"], {}), "(['True', 'P2'])\n", (1728, 1744), True, 'import matplotlib.pyplot as plt\n'), ((1808, 1818), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1816, 1818), True, 'import matplotlib.pyplot as plt\n'), ((2429, 2492), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {'fname': '"""c:\\\\windows\\\\fonts\\\\simsun.ttc"""', 'size': '(12)'}), "(fname='c:\\\\windows\\\\fonts\\\\simsun.ttc', size=12)\n", (2443, 2492), False, 'from matplotlib.font_manager import FontProperties\n'), ((2612, 2621), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (2619, 2621), True, 'import matplotlib.pyplot as plt\n'), ((2631, 2656), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (2639, 2656), True, 'import matplotlib.pyplot as plt\n'), ((2826, 2870), 'matplotlib.pyplot.title', 'plt.title', (['"""导弹发射轨迹"""'], {'fontproperties': 'font_set'}), "('导弹发射轨迹', fontproperties=font_set)\n", (2835, 2870), True, 'import matplotlib.pyplot as plt\n'), ((2875, 2922), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x水平距离(米)"""'], {'fontproperties': 'font_set'}), "('x水平距离(米)', fontproperties=font_set)\n", (2885, 2922), True, 'import matplotlib.pyplot as plt\n'), ((2927, 2974), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""z水平距离(米)"""'], {'fontproperties': 'font_set'}), "('z水平距离(米)', fontproperties=font_set)\n", (2937, 2974), True, 'import matplotlib.pyplot as plt\n'), ((3034, 3044), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3042, 3044), True, 'import matplotlib.pyplot as plt\n'), ((3263, 3326), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {'fname': '"""c:\\\\windows\\\\fonts\\\\simsun.ttc"""', 'size': '(12)'}), "(fname='c:\\\\windows\\\\fonts\\\\simsun.ttc', size=12)\n", (3277, 3326), False, 'from matplotlib.font_manager import FontProperties\n'), ((3445, 3454), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (3452, 3454), True, 'import matplotlib.pyplot as plt\n'), ((3464, 3489), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (3472, 3489), True, 'import matplotlib.pyplot as plt\n'), ((3699, 3743), 'matplotlib.pyplot.title', 'plt.title', (['"""导弹发射轨迹"""'], {'fontproperties': 'font_set'}), "('导弹发射轨迹', fontproperties=font_set)\n", (3708, 3743), True, 'import matplotlib.pyplot as plt\n'), ((3748, 3795), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x水平距离(米)"""'], {'fontproperties': 'font_set'}), "('x水平距离(米)', fontproperties=font_set)\n", (3758, 3795), True, 'import matplotlib.pyplot as plt\n'), ((3800, 3847), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""z水平距离(米)"""'], {'fontproperties': 'font_set'}), "('z水平距离(米)', fontproperties=font_set)\n", (3810, 3847), True, 'import matplotlib.pyplot as plt\n'), ((3907, 3917), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3915, 3917), True, 'import matplotlib.pyplot as plt\n'), ((960, 1010), 'cv2.resize', 'cv2.resize', (['img', 'dim'], {'interpolation': 'cv2.INTER_AREA'}), '(img, dim, interpolation=cv2.INTER_AREA)\n', (970, 1010), False, 'import cv2\n'), ((1041, 1078), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1053, 1078), False, 'import cv2\n'), ((3070, 3084), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.5)'], {}), '(0.5)\n', (3079, 3084), True, 'import matplotlib.pyplot as plt\n'), ((3093, 3102), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3100, 3102), True, 'import matplotlib.pyplot as plt\n'), ((3121, 3136), 'matplotlib.pyplot.pause', 'plt.pause', (['(1000)'], {}), '(1000)\n', (3130, 3136), True, 'import matplotlib.pyplot as plt\n'), ((3943, 3957), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.5)'], {}), '(0.5)\n', (3952, 3957), True, 'import matplotlib.pyplot as plt\n'), ((3966, 3975), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3973, 3975), True, 'import matplotlib.pyplot as plt\n'), ((3994, 4009), 'matplotlib.pyplot.pause', 'plt.pause', (['(1000)'], {}), '(1000)\n', (4003, 4009), True, 'import matplotlib.pyplot as plt\n')]
from lxml import etree def write_node_path(path_nodes, tree): """Writes nodes to tree starting from root.""" parent = tree.getroot() path = "." for node in path_nodes: path += "/" + node check_node = tree.find(path) if check_node is None: parent.insert(-1, etree.Element(node)) parent = tree.find(path) def write_value(node, parent_path, value, tree): path = "{}/{}".format(parent_path, node) if tree.find(path) is None: tree.find(parent_path).insert(-1, etree.Element(node)) tree.find(path).text = "%f" % value
[ "lxml.etree.Element" ]
[((534, 553), 'lxml.etree.Element', 'etree.Element', (['node'], {}), '(node)\n', (547, 553), False, 'from lxml import etree\n'), ((310, 329), 'lxml.etree.Element', 'etree.Element', (['node'], {}), '(node)\n', (323, 329), False, 'from lxml import etree\n')]
import numpy as np import torch as torch import random import copy __all__ = ['WordDrop','NumericalChange','GaussianNoise','FragmentTransfer','Reverse'] class GaussianNoise(): def __init__(self): pass def __call__(self, sequence, mode): def get_parameter(m, mode): para = [[m*0.005 if m != 0 else 0.005, m/100 if m != 0 else 1/100, m/100 if m != 0 else 1/100, m/100 if m != 0 else 1/100, m/100 if m != 0 else 1/100, m/100 if m != 0 else 1/100], [m/100 if m != 0 else 1/100, None, None, None, None, None], [m/100 if m != 0 else 1/100, None, None, None, None, None], [m/100 if m != 0 else 1/100, None, None, None, None, None], [m/100 if m != 0 else 1/100, None, None, None, None, None], [m/100 if m != 0 else 1/100, None, None, None, None, None]] # scale #return para[mode[0]][mode[1]] return para[mode[0]][mode[1]] var, m = torch.var(sequence), torch.mean(sequence) gen = torch.Generator(device=var.device) gen.manual_seed(42) #gen.seed() gaussian = torch.normal(torch.tensor(0), torch.sqrt(var), size=sequence.shape, generator=gen) noise = torch.divide(gaussian, torch.max(gaussian) / get_parameter(m, mode)) #print("noise:", torch.sum(torch.pow(noise,2))) return torch.add(sequence, noise) # aug_sequence = torch.full_like(sequence, 0) # for i in range(sequence.shape[0]): # torch.seed() # var, m = torch.var_mean(sequence[i]) # gaussian = (torch.normal(0., float(torch.sqrt(var)), size=(1,sequence.shape[1])))[0] # noise = torch.divide(gaussian, torch.max(gaussian)/get_parameter(m, mode)) # aug_sequence[i] = torch.add(sequence[i], noise) # # if i % 10000 == 0: # # print(m, torch.max(noise)) # # print(torch.sum(torch.pow(torch.sub(aug_sequence,sequence), 2))) # return aug_sequence class NumericalChange(): def __init__(self): pass def __call__(self, sequence, mode): para = [[None, 0.008, None, None, None, None], [0.008, 0.008, 0.008, 0.008, 0.008, 0.008], [None, 0.008, None, None, None, None], [None, 0.008, None, None, None, None], [None, 0.008, None, None, None, None], [None, 0.008, None, None, None, None]] # alpha aug_sequence = torch.full_like(sequence, 0) for i in range(sequence.shape[0]): torch.seed() a1, a2 = torch.bernoulli(torch.full_like(sequence[i], 0.7)), torch.bernoulli(torch.full_like(sequence[i], 0.3)) a2 = torch.add(-a2,1.0) b = torch.mul(torch.sub(torch.add(a1,a2), 1),para[mode[0]][mode[1]]) aug_sequence[i] = torch.mul(sequence[i], torch.add(b,1.0)) return aug_sequence class WordDrop(): def __init__(self): pass def __call__(self, sequence, mode): para = [[None, None, 0.01, None, None, None], [None, None, 0.01, None, None, None], [0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [None, None, 0.01, None, None, None], [None, None, 0.01, None, None, None], [None, None, 0.01, None, None, None]] # cut aug_sequence = torch.full_like(sequence, 0) for i in range(sequence.shape[0]): gen = torch.Generator(device=sequence.device) gen.manual_seed(42) b = torch.bernoulli(torch.full_like(sequence[i], 1-para[mode[0]][mode[1]]), generator=gen) aug_sequence[i] = torch.mul(sequence[i], b) # print(torch.sum(aug_sequence[i]-sequence[i])) return aug_sequence class FragmentTransfer(): def __init__(self): pass def __call__(self, sequence, mode): # [left, right] para = [[None, None, None, 2, None, None], [None, None, None, 2, None, None], [None, None, None, 2, None, None], [2, 2, 2, 2, 2, 2], [None, None, None, 2, None, None], [None, None, None, 2, None, None]] # fragment dim = int(sequence.shape[1]) span = list(torch.chunk(sequence[0], chunks=dim // para[mode[0]][mode[1]] + 1, dim=0)) l = list(range(len(span))) random.shuffle(l) aug_sequence = torch.full_like(sequence, 0) for i in range(sequence.shape[0]): torch.seed() left, right = random.sample(l, 2) # for seq in sequence: seq=xxx cannot change the sequence span = list(torch.chunk(sequence[i], chunks=dim // para[mode[0]][mode[1]] + 1, dim=0)) # shuffle_span = [i.numpy() for i in span] # random.shuffle(shuffle_span) # seq = torch.from_numpy(np.concatenate(shuffle_span,axis=0)) # print(left,right) right = random.randint(left - para[mode[0]][mode[1]], left + para[mode[0]][mode[1]]) span.insert(right if left > right else max(right-1, 0), span.pop(left)) # print(span) aug_sequence[i] = torch.cat(span, dim=0) return aug_sequence class Reverse(): def __init__(self): pass def __call__(self, sequence, mode, windowsize=5): # [pos-left, pos+right] para = [[None, None, None, None, 1, None], [None, None, None, None, 1, None], [None, None, None, None, 1, None], [None, None, None, None, 1, None], [1, 1, 1, 1, 1, 1], [None, None, None, None, 1, None]] # windowsize dim = int(sequence.shape[1]) # l = list(range(dim)) # random.shuffle(l) # for k in range(sequence.shape[0]): # random.seed(k) # # left, right = random.sample(l, 2) # if left < right: # b = torch.cat((sequence[k][:left], torch.flip(sequence[k][left:right], dims=[0]), sequence[k][right:]), dim=0) # # print('b1 ',b - sequence[k],left,right) # sequence[k] = b # else: # b = torch.cat((torch.flip(sequence[k][:right], dims=[0]), sequence[k][right:left], torch.flip(sequence[k][left:], dims=[0])), dim=0) # # print('b2 ',b - sequence[k],left,right) # sequence[k] = b aug_sequence = torch.full_like(sequence, 0) for k in range(sequence.shape[0]): torch.seed() left = random.randint(0, para[mode[0]][mode[1]]) right = para[mode[0]][mode[1]] - left pos = random.randint(left,dim-right-1) # print(pos,right,left) aug_sequence[k] = torch.cat((sequence[k][:pos-left], torch.flip(sequence[k][pos-left:pos], dims=[0]), torch.flip(sequence[k][pos:pos+right+1], dims=[0]), sequence[k][pos+right+1:]), dim=0) return aug_sequence # class CropAndResize(): # def __init__(self): # pass # def __call__(self, sequence, mode): # # [left, right] # dim = int(sequence.shape[1]) # para = [[None, None, None, None, None, dim // 50 + 1], # [None, None, None, None, None, dim // 50 + 1], # [None, None, None, None, None, dim // 50 + 1], # [None, None, None, None, None, dim // 50 + 1], # [None, None, None, None, None, dim // 50 + 1], # [dim // 50 + 1, dim // 50 + 1, dim // 50 + 1, dim // 50 + 1, dim // 50 + 1, 1]] # croped dimensions # # if crop_dim % 2 == 0: # # left = crop_dim // 2 # # right = dim - 1 - crop_dim // 2 # # else: # # left = crop_dim // 2 # # right = dim - 1 - (crop_dim // 2 + 1) # # center = (left + right) / 2 # # around_seq = [center + float((i - center) * dim / (dim - crop_dim)) # # for i in range(dim)] # # crop_seq = [center + float((i - center) * dim / (dim - crop_dim)) # # for i in range(dim)] # # for i in range(dim): # # min_pos, min_dis = -1, dim # # for j in range(left, right+1): # # if abs(i - crop_seq[j]) < min_dis: # # min_pos, min_dis = j, abs(i - crop_seq[j]) # # around_seq[i] = min_pos # # for k in range(sequence.shape[0]): # # # print(sequence[k].dim(),sequence[k].shape) # # a = torch.gather(sequence[k],0,torch.tensor(around_seq)) # # sequence[k] = a # # return sequence # # return torch.gather(sequence,dim=0,index=torch.tensor(around_seq)) # around_seq_dict = {} # for left in range(0,para[mode[0]][mode[1]]+1): # right = dim - 1 - (para[mode[0]][mode[1]] - left) # center = (left + right) / 2 # around_seq_strech = [left + float((i - left) * (dim - left) / (dim - para[mode[0]][mode[1]])) # for i in range(dim)] # # print(left,':',around_seq_strech[left:right+1],around_seq_strech) # around_seq = [dim + float((i - dim) * dim / (dim - left)) # for i in around_seq_strech] # crop_seq = copy.deepcopy(around_seq) # for i in range(dim): # min_pos, min_dis = -1, 100000 # for j in range(left, right+1): # if abs(i - crop_seq[j]) < min_dis: # min_pos, min_dis = j, abs(i - crop_seq[j]) # around_seq[i] = min_pos # around_seq_dict[left] = around_seq # # print(left,':',around_seq) # # print(left,':',crop_seq[left:right+1],crop_seq) # aug_sequence = torch.full_like(sequence, 0) # for k in range(sequence.shape[0]): # torch.seed() # left = random.randint(0, para[mode[0]][mode[1]]) # a = torch.gather(sequence[k],0,torch.tensor(around_seq_dict[left])) # aug_sequence[k] = a # # print(around_seq_dict) # return aug_sequence # class GECA(): # def __init__(self): # pass # def __call__(self, sequence, crop_dim=7): # [left, right] # dim = int(sequence.shape[1]) # if crop_dim % 2 == 0: # left = crop_dim // 2 # right = dim - 1 - crop_dim // 2 # else: # left = crop_dim // 2 # right = dim - 1 - (crop_dim // 2 + 1) # center = (left + right) / 2 # around_seq = [center + float((i - center) * dim / (dim - crop_dim)) # for i in range(dim)] # crop_seq = [center + float((i - center) * dim / (dim - crop_dim)) # for i in range(dim)] # for i in range(dim): # min_pos, min_dis = -1, dim # for j in range(left, right+1): # if abs(i - crop_seq[j]) < min_dis: # min_pos, min_dis = j, abs(i - crop_seq[j]) # around_seq[i] = min_pos # for i in range(sequence.shape[0]): # a = torch.full_like(sequence[i], i) # for j in range(dim): # a[j] = sequence[i][around_seq[j]] # sequence[i] = a # return sequence # for a sequence, find a most similar, and replace others # use torch.where def cli_main(): a = FragmentTransfer() b = torch.randn((4,20),dtype=torch.float32) print(b) c = a(b,mode=(3,3)) # print(b) print(c) print(c-b) if __name__ == '__main__': cli_main()
[ "torch.mean", "torch.flip", "torch.full_like", "torch.var", "random.randint", "torch.sqrt", "random.shuffle", "torch.add", "random.sample", "torch.randn", "torch.cat", "torch.mul", "torch.seed", "torch.max", "torch.chunk", "torch.tensor", "torch.Generator" ]
[((11657, 11698), 'torch.randn', 'torch.randn', (['(4, 20)'], {'dtype': 'torch.float32'}), '((4, 20), dtype=torch.float32)\n', (11668, 11698), True, 'import torch as torch\n'), ((1058, 1092), 'torch.Generator', 'torch.Generator', ([], {'device': 'var.device'}), '(device=var.device)\n', (1073, 1092), True, 'import torch as torch\n'), ((1399, 1425), 'torch.add', 'torch.add', (['sequence', 'noise'], {}), '(sequence, noise)\n', (1408, 1425), True, 'import torch as torch\n'), ((2520, 2548), 'torch.full_like', 'torch.full_like', (['sequence', '(0)'], {}), '(sequence, 0)\n', (2535, 2548), True, 'import torch as torch\n'), ((3416, 3444), 'torch.full_like', 'torch.full_like', (['sequence', '(0)'], {}), '(sequence, 0)\n', (3431, 3444), True, 'import torch as torch\n'), ((4439, 4456), 'random.shuffle', 'random.shuffle', (['l'], {}), '(l)\n', (4453, 4456), False, 'import random\n'), ((4480, 4508), 'torch.full_like', 'torch.full_like', (['sequence', '(0)'], {}), '(sequence, 0)\n', (4495, 4508), True, 'import torch as torch\n'), ((6500, 6528), 'torch.full_like', 'torch.full_like', (['sequence', '(0)'], {}), '(sequence, 0)\n', (6515, 6528), True, 'import torch as torch\n'), ((1002, 1021), 'torch.var', 'torch.var', (['sequence'], {}), '(sequence)\n', (1011, 1021), True, 'import torch as torch\n'), ((1023, 1043), 'torch.mean', 'torch.mean', (['sequence'], {}), '(sequence)\n', (1033, 1043), True, 'import torch as torch\n'), ((1173, 1188), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (1185, 1188), True, 'import torch as torch\n'), ((1190, 1205), 'torch.sqrt', 'torch.sqrt', (['var'], {}), '(var)\n', (1200, 1205), True, 'import torch as torch\n'), ((2604, 2616), 'torch.seed', 'torch.seed', ([], {}), '()\n', (2614, 2616), True, 'import torch as torch\n'), ((2758, 2777), 'torch.add', 'torch.add', (['(-a2)', '(1.0)'], {}), '(-a2, 1.0)\n', (2767, 2777), True, 'import torch as torch\n'), ((3506, 3545), 'torch.Generator', 'torch.Generator', ([], {'device': 'sequence.device'}), '(device=sequence.device)\n', (3521, 3545), True, 'import torch as torch\n'), ((3711, 3736), 'torch.mul', 'torch.mul', (['sequence[i]', 'b'], {}), '(sequence[i], b)\n', (3720, 3736), True, 'import torch as torch\n'), ((4321, 4394), 'torch.chunk', 'torch.chunk', (['sequence[0]'], {'chunks': '(dim // para[mode[0]][mode[1]] + 1)', 'dim': '(0)'}), '(sequence[0], chunks=dim // para[mode[0]][mode[1]] + 1, dim=0)\n', (4332, 4394), True, 'import torch as torch\n'), ((4564, 4576), 'torch.seed', 'torch.seed', ([], {}), '()\n', (4574, 4576), True, 'import torch as torch\n'), ((4603, 4622), 'random.sample', 'random.sample', (['l', '(2)'], {}), '(l, 2)\n', (4616, 4622), False, 'import random\n'), ((5016, 5092), 'random.randint', 'random.randint', (['(left - para[mode[0]][mode[1]])', '(left + para[mode[0]][mode[1]])'], {}), '(left - para[mode[0]][mode[1]], left + para[mode[0]][mode[1]])\n', (5030, 5092), False, 'import random\n'), ((5233, 5255), 'torch.cat', 'torch.cat', (['span'], {'dim': '(0)'}), '(span, dim=0)\n', (5242, 5255), True, 'import torch as torch\n'), ((6584, 6596), 'torch.seed', 'torch.seed', ([], {}), '()\n', (6594, 6596), True, 'import torch as torch\n'), ((6616, 6657), 'random.randint', 'random.randint', (['(0)', 'para[mode[0]][mode[1]]'], {}), '(0, para[mode[0]][mode[1]])\n', (6630, 6657), False, 'import random\n'), ((6726, 6763), 'random.randint', 'random.randint', (['left', '(dim - right - 1)'], {}), '(left, dim - right - 1)\n', (6740, 6763), False, 'import random\n'), ((1282, 1301), 'torch.max', 'torch.max', (['gaussian'], {}), '(gaussian)\n', (1291, 1301), True, 'import torch as torch\n'), ((2911, 2928), 'torch.add', 'torch.add', (['b', '(1.0)'], {}), '(b, 1.0)\n', (2920, 2928), True, 'import torch as torch\n'), ((3610, 3666), 'torch.full_like', 'torch.full_like', (['sequence[i]', '(1 - para[mode[0]][mode[1]])'], {}), '(sequence[i], 1 - para[mode[0]][mode[1]])\n', (3625, 3666), True, 'import torch as torch\n'), ((4717, 4790), 'torch.chunk', 'torch.chunk', (['sequence[i]'], {'chunks': '(dim // para[mode[0]][mode[1]] + 1)', 'dim': '(0)'}), '(sequence[i], chunks=dim // para[mode[0]][mode[1]] + 1, dim=0)\n', (4728, 4790), True, 'import torch as torch\n'), ((2654, 2687), 'torch.full_like', 'torch.full_like', (['sequence[i]', '(0.7)'], {}), '(sequence[i], 0.7)\n', (2669, 2687), True, 'import torch as torch\n'), ((2706, 2739), 'torch.full_like', 'torch.full_like', (['sequence[i]', '(0.3)'], {}), '(sequence[i], 0.3)\n', (2721, 2739), True, 'import torch as torch\n'), ((2813, 2830), 'torch.add', 'torch.add', (['a1', 'a2'], {}), '(a1, a2)\n', (2822, 2830), True, 'import torch as torch\n'), ((6860, 6909), 'torch.flip', 'torch.flip', (['sequence[k][pos - left:pos]'], {'dims': '[0]'}), '(sequence[k][pos - left:pos], dims=[0])\n', (6870, 6909), True, 'import torch as torch\n'), ((6909, 6963), 'torch.flip', 'torch.flip', (['sequence[k][pos:pos + right + 1]'], {'dims': '[0]'}), '(sequence[k][pos:pos + right + 1], dims=[0])\n', (6919, 6963), True, 'import torch as torch\n')]
import sys sys.argv.pop(0) number = 1 for arg in sys.argv: print(f"{number}. {arg}") number += 1
[ "sys.argv.pop" ]
[((12, 27), 'sys.argv.pop', 'sys.argv.pop', (['(0)'], {}), '(0)\n', (24, 27), False, 'import sys\n')]
# 3. Use `functools.wraps` to preserve the function attributes # including the docstring that you wrote. # 1. Write a function decorator that can be used to measure # the run time of a functions. Use `timeit.default_timer` to get time stamps. import functools from timeit import default_timer as timer def runtime1(func): @functools.wraps(func) def _runtime(*args, **kwargs): start = timer() x = func(*args, **kwargs) end = timer() print("Run Time is {}".format(end - start)) return x return _runtime @runtime1 def add(a, b): return a + b print(add(322, 3)) # 2. Make the decorator parameterized. It should take an integer # that specifies how often the function has to be run. # Make sure you divide the resulting run time by this number. def check(*argtypes): '''Function argument type checker.''' def _check(func): '''Takes the function.''' @functools.wraps(func) def __check(*args): '''Takes the arguments''' if len(args) != len(argtypes): msg = 'Expected %d but got %d arguments' % ( len(argtypes), len(args)) raise TypeError(msg) for arg, argtype in zip(args, argtypes): if not isinstance(arg, argtype): msg = 'Expected %s but got %s' % ( argtypes, tuple(type(arg) for arg in args)) raise TypeError(msg) return func(*args) return __check return _check @check(int) def runtime2(times): def _runtime(func): @functools.wraps(func) def __runtime(*args, **kwargs): x = [] start = timer() for _ in range(times): x.append(func(*args, **kwargs)) end = timer() print('Average runtime is {}'.format((end - start) / times)) return x return __runtime return _runtime @runtime2(5) def power(a, b): return pow(a, b) print(power(322, 3)) # 4. Make the time measurement optional by using a global switch in the # module that can be set to True or False to turn time measurement on or off. TIME_LOG = False @check(int) def runtime3(times): def _runtime3(func): @functools.wraps(func) def __runtime3(*args, **kwargs): x = [] if TIME_LOG: start = timer() for _ in range(times): x.append(func(*args, **kwargs)) end = timer() print('Average runtime is {}'.format((end - start) / times)) else: for _ in range(times): x.append(func(*args, **kwargs)) return x return __runtime3 return _runtime3 @runtime3(5) def power(a, b): return pow(a, b) print(power(4, 12)) TIME_LOG = True print(power(4, 12)) # 5. Write another decorator that can be used with a class and registers # every class that it decorates in a dictionary. registry = {} def decor_all_methods(decorator): def decorate(cls): '''for attr in cls.__dict__: # there's propably a better way to do this if callable(getattr(cls, attr)) and attr != '__init__': setattr(cls, attr, decorator(getattr(cls, attr))) registry.setdefault(cls, []).append(attr)''' callable_attributes = {k: v for k, v in cls.__dict__.items() if callable(v) and k != '__init__'} for name, func in callable_attributes.items(): decorated = decorator(func) setattr(cls, name, decorated) registry.setdefault(cls, []).append(name) return cls return decorate @decor_all_methods(runtime3(2)) class Math(): '''decor all methos of Math class (except __init__) with runtime3 decorator which make the method run 2 times''' def __init__(self, a, b): self.a = a self.b = b def power(self): return pow(self.a, self.b) m = Math(34, 12) print(m.power()) print(registry)
[ "timeit.default_timer", "functools.wraps" ]
[((330, 351), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (345, 351), False, 'import functools\n'), ((403, 410), 'timeit.default_timer', 'timer', ([], {}), '()\n', (408, 410), True, 'from timeit import default_timer as timer\n'), ((459, 466), 'timeit.default_timer', 'timer', ([], {}), '()\n', (464, 466), True, 'from timeit import default_timer as timer\n'), ((933, 954), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (948, 954), False, 'import functools\n'), ((1614, 1635), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (1629, 1635), False, 'import functools\n'), ((2283, 2304), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (2298, 2304), False, 'import functools\n'), ((1715, 1722), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1720, 1722), True, 'from timeit import default_timer as timer\n'), ((1824, 1831), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1829, 1831), True, 'from timeit import default_timer as timer\n'), ((2414, 2421), 'timeit.default_timer', 'timer', ([], {}), '()\n', (2419, 2421), True, 'from timeit import default_timer as timer\n'), ((2535, 2542), 'timeit.default_timer', 'timer', ([], {}), '()\n', (2540, 2542), True, 'from timeit import default_timer as timer\n')]
import numpy as np import sys as sys import csv sys.path.insert(1, './../Tools') from DispersionRelationDeterminantFullConductivityZeff import VectorFinder_auto_Extensive #************Start of user block****************** #para= [nu, zeff,eta, shat, beta, ky, mu] para_min=[0.1, 1., 0.5, 0.001,0.0005, 0.01, 0.] para_max=[10., 5., 5., 0.05, 0.02, 0.2, 10.] path='.' Output_csv=path+'/0MTM_scan_PC.csv' xstar=10. ModIndex=1 #************End of user block****************** para_min=np.array(para_min) para_max=np.array(para_max) width=(para_max-para_min) with open(Output_csv, 'w', newline='') as csvfile: #clear all and then write a row csv_data = csv.writer(csvfile, delimiter=',') csv_data.writerow(['omega_omega_n','gamma_omega_n',\ 'nu','zeff','eta','shat','beta','ky',\ 'ModIndex','mu','xstar']) csvfile.close() while 1==1: param=para_min+width*np.random.rand(7) [nu,zeff,eta,shat,beta,ky,mu]=param w0=VectorFinder_auto_Extensive(nu,zeff,eta,shat,beta,ky,1,mu,xstar) #w0=1+1j omega=np.real(w0) gamma=np.imag(w0) print(str(omega)+','+str(gamma)+','+str(nu)+','+str(zeff)+','\ +str(eta)+','+str(shat)+','+str(beta)+','+str(ky)+','\ +str(ModIndex)+','+str(mu)+','+str(xstar)) with open(Output_csv, 'a+', newline='') as csvfile: #adding a row csv_data = csv.writer(csvfile, delimiter=',') csv_data.writerow([ omega,gamma,nu,zeff,eta,shat,beta,ky,\ ModIndex,mu,xstar ]) csvfile.close() print('******w*****') print('w='+str(w0))
[ "csv.writer", "sys.path.insert", "numpy.imag", "numpy.array", "DispersionRelationDeterminantFullConductivityZeff.VectorFinder_auto_Extensive", "numpy.real", "numpy.random.rand" ]
[((48, 80), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""./../Tools"""'], {}), "(1, './../Tools')\n", (63, 80), True, 'import sys as sys\n'), ((505, 523), 'numpy.array', 'np.array', (['para_min'], {}), '(para_min)\n', (513, 523), True, 'import numpy as np\n'), ((533, 551), 'numpy.array', 'np.array', (['para_max'], {}), '(para_max)\n', (541, 551), True, 'import numpy as np\n'), ((681, 715), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (691, 715), False, 'import csv\n'), ((982, 1054), 'DispersionRelationDeterminantFullConductivityZeff.VectorFinder_auto_Extensive', 'VectorFinder_auto_Extensive', (['nu', 'zeff', 'eta', 'shat', 'beta', 'ky', '(1)', 'mu', 'xstar'], {}), '(nu, zeff, eta, shat, beta, ky, 1, mu, xstar)\n', (1009, 1054), False, 'from DispersionRelationDeterminantFullConductivityZeff import VectorFinder_auto_Extensive\n'), ((1071, 1082), 'numpy.real', 'np.real', (['w0'], {}), '(w0)\n', (1078, 1082), True, 'import numpy as np\n'), ((1093, 1104), 'numpy.imag', 'np.imag', (['w0'], {}), '(w0)\n', (1100, 1104), True, 'import numpy as np\n'), ((1392, 1426), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (1402, 1426), False, 'import csv\n'), ((916, 933), 'numpy.random.rand', 'np.random.rand', (['(7)'], {}), '(7)\n', (930, 933), True, 'import numpy as np\n')]
from honeygrove.tests.testresources import __path__ as resources import uuid # HoneytokenDB configuration: honeytokendbProbabilities = {"TESTSERVICEA": 0.5, "TESTSERVICEB": 0.1, "LISTEN": 0.1} HPID = str(uuid.uuid4()) # Path to Filesystem all services are using path_to_filesys = resources._path[0] + '/test_filesys.xml' # Honeytoken Directory tokendir = 'testresources/honeytokenfiles' sshPort = 12222 sshName = "SSH" resources = 'testresources' # Honeyadaptertest tokendir_adapter = 'testresources/honeyadaptertest/tokens'
[ "uuid.uuid4" ]
[((206, 218), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (216, 218), False, 'import uuid\n')]
from PIL import Image, ImageDraw MAP_BACKGROUND_FILE='input.png' width=100;height=100 map_background = Image.open(MAP_BACKGROUND_FILE).convert('RGBA') map_mesh = Image.new('RGBA', (width, height), (0, 0, 0, 0)) draw = ImageDraw.Draw(map_mesh) # Create mesh using: draw.line([...], fill=(255, 255, 255, 50), width=1) map_background.paste(map_mesh, (0, 0), map_mesh) map_background.paste((255,255,255), (0, 0), map_mesh) map_background.save('ss.png') import Image, ImageDraw, random background = Image.new('RGB', (100, 100), (255, 255, 255)) MAP_BACKGROUND_FILE='icoinput.png' width=100;height=100 foreground = Image.open(MAP_BACKGROUND_FILE).convert('RGBA') # foreground = Image.new('RGB', (100, 100), (255, 0, 0)) mask = Image.new('L', (100, 100), 0) # mask=Image.new('RGB', (100, 100), (255, 0, 0)) draw = ImageDraw.Draw(mask) for i in range(0, 100, 2): draw.line((i, 0, i, 100),(256)) draw.line((0, i, 100, i),(256)) # draw.line((i, 0, i, 100), fill=random.randrange(256)) # draw.line((0, i, 100, i), fill=random.randrange(256)) result = Image.composite(background, foreground, mask) result.save('ss2.png')
[ "ImageDraw.Draw", "Image.new", "Image.open", "Image.composite" ]
[((163, 211), 'Image.new', 'Image.new', (['"""RGBA"""', '(width, height)', '(0, 0, 0, 0)'], {}), "('RGBA', (width, height), (0, 0, 0, 0))\n", (172, 211), False, 'import Image, ImageDraw, random\n'), ((219, 243), 'ImageDraw.Draw', 'ImageDraw.Draw', (['map_mesh'], {}), '(map_mesh)\n', (233, 243), False, 'import Image, ImageDraw, random\n'), ((500, 545), 'Image.new', 'Image.new', (['"""RGB"""', '(100, 100)', '(255, 255, 255)'], {}), "('RGB', (100, 100), (255, 255, 255))\n", (509, 545), False, 'import Image, ImageDraw, random\n'), ((729, 758), 'Image.new', 'Image.new', (['"""L"""', '(100, 100)', '(0)'], {}), "('L', (100, 100), 0)\n", (738, 758), False, 'import Image, ImageDraw, random\n'), ((815, 835), 'ImageDraw.Draw', 'ImageDraw.Draw', (['mask'], {}), '(mask)\n', (829, 835), False, 'import Image, ImageDraw, random\n'), ((1065, 1110), 'Image.composite', 'Image.composite', (['background', 'foreground', 'mask'], {}), '(background, foreground, mask)\n', (1080, 1110), False, 'import Image, ImageDraw, random\n'), ((104, 135), 'Image.open', 'Image.open', (['MAP_BACKGROUND_FILE'], {}), '(MAP_BACKGROUND_FILE)\n', (114, 135), False, 'import Image, ImageDraw, random\n'), ((615, 646), 'Image.open', 'Image.open', (['MAP_BACKGROUND_FILE'], {}), '(MAP_BACKGROUND_FILE)\n', (625, 646), False, 'import Image, ImageDraw, random\n')]
# -*- coding: utf-8 -*- # Natural Language Toolkit: Twitter API # # Copyright (C) 2001-2015 NLTK Project # Author: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # URL: <http://nltk.org/> # For license information, see LICENSE.TXT """ This module provides an interface for TweetHandlers, and support for timezone handling. """ from datetime import tzinfo, timedelta, datetime from nltk.compat import UTC import time as _time class LocalTimezoneOffsetWithUTC(tzinfo): """ This is not intended to be a general purpose class for dealing with the local timezone. In particular: * it assumes that the date passed has been created using `datetime(..., tzinfo=Local)`, where `Local` is an instance of the object `LocalTimezoneOffsetWithUTC`; * for such an object, it returns the offset with UTC, used for date comparisons. Reference: https://docs.python.org/3/library/datetime.html """ STDOFFSET = timedelta(seconds=-_time.timezone) if _time.daylight: DSTOFFSET = timedelta(seconds=-_time.altzone) else: DSTOFFSET = STDOFFSET def utcoffset(self, dt): """ Access the relevant time offset. """ return self.DSTOFFSET LOCAL = LocalTimezoneOffsetWithUTC() class BasicTweetHandler(object): """ Minimal implementation of `TweetHandler`. Counts the number of Tweets and decides when the client should stop fetching them. """ def __init__(self, limit=20): self.limit = limit self.counter = 0 """ A flag to indicate to the client whether to stop fetching data given some condition (e.g., reaching a date limit). """ self.do_stop = False """ Stores the id of the last fetched Tweet to handle pagination. """ self.max_id = None def do_continue(self): """ Returns `False` if the client should stop fetching Tweets. """ return self.counter < self.limit and not self.do_stop class TweetHandlerI(BasicTweetHandler): """ Interface class whose subclasses should implement a handle method that Twitter clients can delegate to. """ def __init__(self, limit=20, upper_date_limit=None, lower_date_limit=None): """ :param int limit: The number of data items to process in the current\ round of processing. :param tuple upper_date_limit: The date at which to stop collecting\ new data. This should be entered as a tuple which can serve as the\ argument to `datetime.datetime`.\ E.g. `date_limit=(2015, 4, 1, 12, 40)` for 12:30 pm on April 1 2015. :param tuple lower_date_limit: The date at which to stop collecting\ new data. See `upper_data_limit` for formatting. """ BasicTweetHandler.__init__(self, limit) self.upper_date_limit = None self.lower_date_limit = None if upper_date_limit: self.upper_date_limit = datetime(*upper_date_limit, tzinfo=LOCAL) if lower_date_limit: self.lower_date_limit = datetime(*lower_date_limit, tzinfo=LOCAL) self.startingup = True def handle(self, data): """ Deal appropriately with data returned by the Twitter API """ raise NotImplementedError def on_finish(self): """ Actions when the tweet limit has been reached """ raise NotImplementedError def check_date_limit(self, data, verbose=False): """ Validate date limits. """ if self.upper_date_limit or self.lower_date_limit: date_fmt = '%a %b %d %H:%M:%S +0000 %Y' tweet_date = \ datetime.strptime(data['created_at'], date_fmt).replace(tzinfo=UTC) if (self.upper_date_limit and tweet_date > self.upper_date_limit) or \ (self.lower_date_limit and tweet_date < self.lower_date_limit): if self.upper_date_limit: message = "earlier" date_limit = self.upper_date_limit else: message = "later" date_limit = self.lower_date_limit if verbose: print("Date limit {0} is {1} than date of current tweet {2}".\ format(date_limit, message, tweet_date)) self.do_stop = True
[ "datetime.datetime.strptime", "datetime.timedelta", "datetime.datetime" ]
[((969, 1003), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(-_time.timezone)'}), '(seconds=-_time.timezone)\n', (978, 1003), False, 'from datetime import tzinfo, timedelta, datetime\n'), ((1051, 1084), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(-_time.altzone)'}), '(seconds=-_time.altzone)\n', (1060, 1084), False, 'from datetime import tzinfo, timedelta, datetime\n'), ((3089, 3130), 'datetime.datetime', 'datetime', (['*upper_date_limit'], {'tzinfo': 'LOCAL'}), '(*upper_date_limit, tzinfo=LOCAL)\n', (3097, 3130), False, 'from datetime import tzinfo, timedelta, datetime\n'), ((3198, 3239), 'datetime.datetime', 'datetime', (['*lower_date_limit'], {'tzinfo': 'LOCAL'}), '(*lower_date_limit, tzinfo=LOCAL)\n', (3206, 3239), False, 'from datetime import tzinfo, timedelta, datetime\n'), ((3847, 3894), 'datetime.datetime.strptime', 'datetime.strptime', (["data['created_at']", 'date_fmt'], {}), "(data['created_at'], date_fmt)\n", (3864, 3894), False, 'from datetime import tzinfo, timedelta, datetime\n')]
#!/usr/bin/env python3 import glob import regex as re import os.path import nltk.data from nltk.tokenize.nist import NISTTokenizer def collapse(s): s = re.sub(r'(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?', 'URLTOKEN', s) s = s.replace('...', 'ELIPSISTOKEN') return s def main(): nist = NISTTokenizer() sent_detector = nltk.data.load('tokenizers/punkt/english.pickle') for fn in glob.glob('../data/hmong/sch_corpus2_raw/sch-*.txt'): fnout = '../data/hmong/sch_corpus2_tok/' + os.path.basename(fn) with open(fn, encoding='utf-8') as f, open(fnout, 'w', encoding='utf-8') as fout: for sent in sent_detector.tokenize(collapse(f.read())): print(' '.join(nist.tokenize(sent)), file=fout) if __name__ == '__main__': main()
[ "regex.sub", "nltk.tokenize.nist.NISTTokenizer", "glob.glob" ]
[((159, 282), 'regex.sub', 're.sub', (['"""(http|ftp|https)://([\\\\w_-]+(?:(?:\\\\.[\\\\w_-]+)+))([\\\\w.,@?^=%&:/~+#-]*[\\\\w@?^=%&/~+#-])?"""', '"""URLTOKEN"""', 's'], {}), "(\n '(http|ftp|https)://([\\\\w_-]+(?:(?:\\\\.[\\\\w_-]+)+))([\\\\w.,@?^=%&:/~+#-]*[\\\\w@?^=%&/~+#-])?'\n , 'URLTOKEN', s)\n", (165, 282), True, 'import regex as re\n'), ((347, 362), 'nltk.tokenize.nist.NISTTokenizer', 'NISTTokenizer', ([], {}), '()\n', (360, 362), False, 'from nltk.tokenize.nist import NISTTokenizer\n'), ((447, 499), 'glob.glob', 'glob.glob', (['"""../data/hmong/sch_corpus2_raw/sch-*.txt"""'], {}), "('../data/hmong/sch_corpus2_raw/sch-*.txt')\n", (456, 499), False, 'import glob\n')]
from spada.methods import method class GetSwitches(method.Method): def __init__(self, annotation = 'annotation.pklz'): method.Method.__init__(self, __name__, annotation) self._genes.flushSwitches() def run(self, switchesFile): self.logger.info("Reading switches.") self._genes.readSwitches(switchesFile, self._txs) if __name__ == '__main__': pass
[ "spada.methods.method.Method.__init__" ]
[((123, 173), 'spada.methods.method.Method.__init__', 'method.Method.__init__', (['self', '__name__', 'annotation'], {}), '(self, __name__, annotation)\n', (145, 173), False, 'from spada.methods import method\n')]
# Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved. # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import torch import torch.nn as nn import numpy as np import torchvision import math import torch.nn.functional as F from torch.autograd import Variable def attention(query, key, value, mask=None, dropout=None): "Compute 'Scaled Dot Product Attention'" d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) # print('scores', scores) if mask is not None: scores = scores.masked_fill(mask == 0, -1e9) scores.retain_grad() p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) res = torch.matmul(p_attn, value) return res, p_attn Q = torch.tensor( [[[1.0, 1.0, 2.0, 0.0, 5.0], [-1.0, 2.0, 2.0, 0.0, 5.0]]], requires_grad=True ) mask = torch.tensor([[[1, 1], [1, 1]]]) res, p_attn = attention(Q, Q, Q, mask) print("res", res) print() print("p_attn", p_attn) p_attn.retain_grad() res.backward(torch.tensor([[[1.0, 1.0, 2.0, 0.0, 5.0], [-1.0, 2.0, 2.0, 0.0, 5.0]]])) print(Q.grad)
[ "math.sqrt", "torch.matmul", "torch.tensor", "torch.nn.functional.softmax" ]
[((1772, 1867), 'torch.tensor', 'torch.tensor', (['[[[1.0, 1.0, 2.0, 0.0, 5.0], [-1.0, 2.0, 2.0, 0.0, 5.0]]]'], {'requires_grad': '(True)'}), '([[[1.0, 1.0, 2.0, 0.0, 5.0], [-1.0, 2.0, 2.0, 0.0, 5.0]]],\n requires_grad=True)\n', (1784, 1867), False, 'import torch\n'), ((1878, 1910), 'torch.tensor', 'torch.tensor', (['[[[1, 1], [1, 1]]]'], {}), '([[[1, 1], [1, 1]]])\n', (1890, 1910), False, 'import torch\n'), ((1618, 1643), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (1627, 1643), True, 'import torch.nn.functional as F\n'), ((1715, 1742), 'torch.matmul', 'torch.matmul', (['p_attn', 'value'], {}), '(p_attn, value)\n', (1727, 1742), False, 'import torch\n'), ((2036, 2107), 'torch.tensor', 'torch.tensor', (['[[[1.0, 1.0, 2.0, 0.0, 5.0], [-1.0, 2.0, 2.0, 0.0, 5.0]]]'], {}), '([[[1.0, 1.0, 2.0, 0.0, 5.0], [-1.0, 2.0, 2.0, 0.0, 5.0]]])\n', (2048, 2107), False, 'import torch\n'), ((1457, 1471), 'math.sqrt', 'math.sqrt', (['d_k'], {}), '(d_k)\n', (1466, 1471), False, 'import math\n')]
# -*- coding: utf-8 -*- # BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules # Copyright (C) 2020, <NAME> <<EMAIL>> # # This module is under the UIUC open-source license. See # github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt # for license details. """ """ import biosteam as bst from ._lle_unit import LLEUnit from ._splitter import Splitter from .design_tools import PressureVessel __all__ = ('LiquidsSettler', 'LLESettler', 'LiquidsSplitSettler') class LiquidsSettler(bst.Unit, PressureVessel, isabstract=True): _N_ins = 1 _N_outs = 2 _N_heat_utilities = 0 def __init__(self, ID='', ins=None, outs=(), thermo=None, *, area_to_feed=0.1, length_to_diameter=4, vessel_material='Carbon steel', vessel_type='Horizontal'): bst.Unit.__init__(self, ID, ins, outs, thermo) self.vessel_material = vessel_material self.vessel_type = vessel_type self.length_to_diameter = length_to_diameter #: Length to diameter ratio self.area_to_feed = area_to_feed #: [ft2/gpm] Diameter * length per gpm of feed @staticmethod def _default_vessel_type(): return 'Horizontal' def _design(self): feed = self.ins[0] F_vol_gpm = feed.get_total_flow('gpm') area = self.area_to_feed * F_vol_gpm length_to_diameter = self.length_to_diameter P = feed.get_property('P', 'psi') D = (area / length_to_diameter) ** 0.5 L = length_to_diameter * D self.design_results.update(self._vessel_design(P, D, L)) def _cost(self): D = self.design_results self.purchase_costs.update( self._vessel_purchase_cost(D['Weight'], D['Diameter'], D['Length']) ) class LLESettler(LLEUnit, LiquidsSettler): line = 'Settler' def __init__(self, ID='', ins=None, outs=(), thermo=None, *, area_to_feed=0.1, length_to_diameter=4, vessel_material='Carbon steel', vessel_type='Horizontal', top_chemical=None, efficiency=1.0): LLEUnit.__init__(self, ID, ins, outs, thermo, top_chemical, efficiency) self.vessel_material = vessel_material self.vessel_type = vessel_type self.length_to_diameter = length_to_diameter self.area_to_feed = area_to_feed class LiquidsSplitSettler(LiquidsSettler): line = 'Settler' def __init__(self, ID='', ins=None, outs=(), thermo=None, *, split, order=None, area_to_feed=0.1, length_to_diameter=4, vessel_material='Carbon steel', vessel_type='Horizontal'): bst.Unit.__init__(self, ID, ins, outs, thermo) self.vessel_material = vessel_material self.vessel_type = vessel_type self.length_to_diameter = length_to_diameter self.area_to_feed = area_to_feed self._isplit = self.chemicals.isplit(split, order) split = Splitter.split isplit = Splitter.isplit _run = Splitter._run
[ "biosteam.Unit.__init__" ]
[((865, 911), 'biosteam.Unit.__init__', 'bst.Unit.__init__', (['self', 'ID', 'ins', 'outs', 'thermo'], {}), '(self, ID, ins, outs, thermo)\n', (882, 911), True, 'import biosteam as bst\n'), ((2808, 2854), 'biosteam.Unit.__init__', 'bst.Unit.__init__', (['self', 'ID', 'ins', 'outs', 'thermo'], {}), '(self, ID, ins, outs, thermo)\n', (2825, 2854), True, 'import biosteam as bst\n')]
from cosmic_ray.operators import zero_iteration_loop from cosmic_ray.plugins import get_test_runner from cosmic_ray.work_item import WorkItem from cosmic_ray.worker import worker, WorkerOutcome from path_utils import DATA_DIR, excursion, extend_path def test_no_test_return_value(): with extend_path(DATA_DIR), excursion(DATA_DIR): test_runner = get_test_runner("unittest", ".") result = worker("a.b", zero_iteration_loop.ZeroIterationLoop, 100, test_runner) expected = WorkItem( data=None, test_outcome=None, worker_outcome=WorkerOutcome.NO_TEST, diff=None, module=None, operator=None, occurrence=None, line_number=None, command_line=None, job_id=None) assert result == expected
[ "path_utils.extend_path", "cosmic_ray.work_item.WorkItem", "cosmic_ray.plugins.get_test_runner", "cosmic_ray.worker.worker", "path_utils.excursion" ]
[((295, 316), 'path_utils.extend_path', 'extend_path', (['DATA_DIR'], {}), '(DATA_DIR)\n', (306, 316), False, 'from path_utils import DATA_DIR, excursion, extend_path\n'), ((318, 337), 'path_utils.excursion', 'excursion', (['DATA_DIR'], {}), '(DATA_DIR)\n', (327, 337), False, 'from path_utils import DATA_DIR, excursion, extend_path\n'), ((361, 393), 'cosmic_ray.plugins.get_test_runner', 'get_test_runner', (['"""unittest"""', '"""."""'], {}), "('unittest', '.')\n", (376, 393), False, 'from cosmic_ray.plugins import get_test_runner\n'), ((411, 481), 'cosmic_ray.worker.worker', 'worker', (['"""a.b"""', 'zero_iteration_loop.ZeroIterationLoop', '(100)', 'test_runner'], {}), "('a.b', zero_iteration_loop.ZeroIterationLoop, 100, test_runner)\n", (417, 481), False, 'from cosmic_ray.worker import worker, WorkerOutcome\n'), ((525, 716), 'cosmic_ray.work_item.WorkItem', 'WorkItem', ([], {'data': 'None', 'test_outcome': 'None', 'worker_outcome': 'WorkerOutcome.NO_TEST', 'diff': 'None', 'module': 'None', 'operator': 'None', 'occurrence': 'None', 'line_number': 'None', 'command_line': 'None', 'job_id': 'None'}), '(data=None, test_outcome=None, worker_outcome=WorkerOutcome.NO_TEST,\n diff=None, module=None, operator=None, occurrence=None, line_number=\n None, command_line=None, job_id=None)\n', (533, 716), False, 'from cosmic_ray.work_item import WorkItem\n')]
#!/usr/bin/env python3 # Copyright lowRISC contributors. # Licensed under the Apache License, Version 2.0, see LICENSE for details. # SPDX-License-Identifier: Apache-2.0 r""" Class describing a flow configuration object """ import datetime import logging as log import pprint from shutil import which from .utils import * # Interface class for extensions. class FlowCfg(): def __str__(self): return pprint.pformat(self.__dict__) def __repr__(self): return pprint.pformat(self.__dict__) def __init__(self, flow_cfg_file, proj_root, args): # Options set from command line self.items = [] self.items.extend(args.items) self.list_items = [] self.list_items.extend(args.list) self.flow_cfg_file = flow_cfg_file self.proj_root = proj_root self.args = args self.scratch_root = args.scratch_root self.branch = args.branch self.job_prefix = args.job_prefix # Imported cfg files using 'import_cfgs' keyword self.imported_cfg_files = [] self.imported_cfg_files.append(flow_cfg_file) # Add exports using 'exports' keyword - these are exported to the child # process' environment. self.exports = [] # Add overrides using the overrides keyword - existing attributes # are overridden with the override values. self.overrides = [] # List of cfgs if the parsed cfg is a master cfg list self.cfgs = [] # Add a notion of "master" cfg - this is indicated using # a special key 'use_cfgs' within the hjson cfg. self.is_master_cfg = False # Set the partial path to the IP's DV area. self.rel_path = os.path.dirname(flow_cfg_file).replace( self.proj_root + '/', '') # Timestamp self.ts_format_long = args.ts_format_long self.timestamp_long = args.timestamp_long self.ts_format = args.ts_format self.timestamp = args.timestamp def __post_init__(self): # Run some post init checks if not self.is_master_cfg: # Check if self.cfgs is a list of exactly 1 item (self) if not (len(self.cfgs) == 1 and self.cfgs[0].name == self.name): log.error("Parse error!\n%s", self.cfgs) sys.exit(1) @staticmethod def create_instance(flow_cfg_file, proj_root, args): '''Create a new instance of this class as with given parameters. ''' return FlowCfg(flow_cfg_file, proj_root, args) def parse_flow_cfg(self, flow_cfg_file, is_entry_point=True): ''' Parse the flow cfg hjson file. This is a private API used within the extended class' __init__ function. This parses the hjson cfg (and imports / use cfgs) and builds an initial dictionary. This method takes 2 args. flow_cfg_file: This is the flow cfg file to be parsed. is_entry_point: the cfg file that is passed on the command line is the entry point cfg. If the cfg file is a part of an inport_cfgs or use_cfgs key, then it is not an entry point. ''' hjson_dict = parse_hjson(flow_cfg_file) # Check if this is the master cfg, if this is the entry point cfg file if is_entry_point: self.is_master_cfg = self.check_if_master_cfg(hjson_dict) # If not a master cfg, then register self with self.cfgs if self.is_master_cfg is False: self.cfgs.append(self) # Resolve the raw hjson dict to build this object self.resolve_hjson_raw(hjson_dict) def check_if_master_cfg(self, hjson_dict): # This is a master cfg only if it has a single key called "use_cfgs" # which contains a list of actual flow cfgs. hjson_cfg_dict_keys = hjson_dict.keys() return (len(hjson_cfg_dict_keys) == 1 and \ "use_cfgs" in hjson_cfg_dict_keys and \ type(hjson_dict["use_cfgs"]) is list) def resolve_hjson_raw(self, hjson_dict): attrs = self.__dict__.keys() rm_hjson_dict_keys = [] import_cfgs = [] use_cfgs = [] for key in hjson_dict.keys(): if key in attrs: hjson_dict_val = hjson_dict[key] self_val = getattr(self, key) scalar_types = {str: [""], int: [0, -1], bool: [False]} # Case 1: key value in class and hjson_dict differ - error! if type(hjson_dict_val) != type(self_val): log.error("Coflicting key types: \"%s\" {\"%s, \"%s\"}", key, type(hjson_dict_val).__name__, type(self_val).__name__) sys.exit(1) # Case 2: key value in class and hjson_dict are strs - set if # not already set, else error! elif type(hjson_dict_val) in scalar_types.keys(): defaults = scalar_types[type(hjson_dict_val)] if self_val == hjson_dict_val: rm_hjson_dict_keys.append(key) elif self_val in defaults and not hjson_dict_val in defaults: setattr(self, key, hjson_dict_val) rm_hjson_dict_keys.append(key) elif not self_val in defaults and not hjson_dict_val in defaults: log.error( "Coflicting values {\"%s\", \"%s\"} encountered for key \"%s\"", str(self_val), str(hjson_dict_val), key) sys.exit(1) # Case 3: key value in class and hjson_dict are lists - merge'em elif type(hjson_dict_val) is list and type(self_val) is list: self_val.extend(hjson_dict_val) setattr(self, key, self_val) rm_hjson_dict_keys.append(key) # Case 4: unknown issue else: log.error( "Type of \"%s\" (%s) in %s appears to be invalid (should be %s)", key, type(hjson_dict_val).__name__, hjson_dict, type(self_val).__name__) sys.exit(1) # If key is 'import_cfgs' then add to the list of cfgs to # process elif key == 'import_cfgs': import_cfgs.extend(hjson_dict[key]) rm_hjson_dict_keys.append(key) # If this is a master cfg list and the key is 'use_cfgs' elif self.is_master_cfg and key == "use_cfgs": use_cfgs.extend(hjson_dict[key]) # If this is a not master cfg list and the key is 'use_cfgs' elif not self.is_master_cfg and key == "use_cfgs": # Throw an error and exit log.error( "Key \"use_cfgs\" encountered in a non-master cfg file list \"%s\"", self.flow_cfg_file) sys.exit(1) else: # add key-value to class setattr(self, key, hjson_dict[key]) rm_hjson_dict_keys.append(key) # Parse imported cfgs for cfg_file in import_cfgs: if not cfg_file in self.imported_cfg_files: self.imported_cfg_files.append(cfg_file) # Substitute wildcards in cfg_file files since we need to process # them right away. cfg_file = subst_wildcards(cfg_file, self.__dict__) self.parse_flow_cfg(cfg_file, False) else: log.error("Cfg file \"%s\" has already been parsed", cfg_file) # Parse master cfg files if self.is_master_cfg: for cfg_file in use_cfgs: # Substitute wildcards in cfg_file files since we need to process # them right away. cfg_file = subst_wildcards(cfg_file, self.__dict__) self.cfgs.append( self.create_instance(cfg_file, self.proj_root, self.args)) def _process_overrides(self): # Look through the dict and find available overrides. # If override is available, check if the type of the value for existing # and the overridden keys are the same. overrides_dict = {} if hasattr(self, "overrides"): overrides = getattr(self, "overrides") if type(overrides) is not list: log.error( "The type of key \"overrides\" is %s - it should be a list", type(overrides)) sys.exit(1) # Process override one by one for item in overrides: if type(item) is dict and set(item.keys()) == set( ["name", "value"]): ov_name = item["name"] ov_value = item["value"] if ov_name not in overrides_dict.keys(): overrides_dict[ov_name] = ov_value self._do_override(ov_name, ov_value) else: log.error( "Override for key \"%s\" already exists!\nOld: %s\nNew: %s", ov_name, overrides_dict[ov_name], ov_value) sys.exit(1) else: log.error("\"overrides\" is is a list of dicts with {\"name\": <name>, " \ "\"value\": <value>} pairs. Found this instead:\n%s", str(item)) sys.exit(1) def _do_override(self, ov_name, ov_value): # Go through self attributes and replace with overrides if hasattr(self, ov_name): orig_value = getattr(self, ov_name) if type(orig_value) == type(ov_value): log.debug("Overriding \"%s\" value \"%s\" with \"%s\"", ov_name, orig_value, ov_value) setattr(self, ov_name, ov_value) else: log.error("The type of override value \"%s\" for \"%s\" mismatches " + \ "the type of original value \"%s\"", ov_value, ov_name, orig_value) sys.exit(1) else: log.error("Override key \"%s\" not found in the cfg!", ov_name) sys.exit(1) def _process_exports(self): # Convert 'exports' to dict exports_dict = {} if self.exports != []: for item in self.exports: if type(item) is dict: exports_dict.update(item) elif type(item) is str: [key, value] = item.split(':', 1) if type(key) is not str: key = str(key) if type(value) is not str: value = str(value) exports_dict.update({key.strip(): value.strip()}) else: log.error("Type error in \"exports\": %s", str(item)) sys.exit(1) self.exports = exports_dict def _purge(self): '''Purge the existing scratch areas in preperation for the new run.''' return def purge(self): '''Public facing API for _purge(). ''' for item in self.cfgs: item._purge() def _print_list(self): '''Print the list of available items that can be kicked off. ''' return def print_list(self): '''Public facing API for _print_list(). ''' for item in self.cfgs: item._print_list() def _create_deploy_objects(self): '''Create deploy objects from items that were passed on for being run. The deploy objects for build and run are created from the objects that were created from the create_objects() method. ''' return def create_deploy_objects(self): '''Public facing API for _create_deploy_objects(). ''' if self.is_master_cfg: self.deploy = [] for item in self.cfgs: item._create_deploy_objects() self.deploy.extend(item.deploy) else: self._create_deploy_objects() def _gen_results(self, fmt="md"): ''' The function is called after the flow has executed. It collates the status of all run targets and generates a dict. It parses the testplan and maps the generated result to the testplan entries to generate a final table (list). It uses the fmt arg to dump the final result as a markdown or html. ''' return def gen_results(self): '''Public facing API for _gen_results(). ''' results = [] for item in self.cfgs: result = item._gen_results() print(result) results.append(result) return results def _publish_results(self): '''Publish results to the opentitan web server. Results are uploaded to {results_server}/{rel_path}/latest/results. If the 'latest' directory exists, then it is renamed to its 'timestamp' directory. If the list of directories in this area is > 7, then the oldest entry is removed. {results_server}/{rel_path}/history.md contains links to the last 7 results. ''' if which('gsutil') is None or which('gcloud') is None: log.error( "Google cloud SDK not installed! Cannot access the results server" ) return # Construct the paths results_root_dir = self.results_server + '/' + self.rel_path results_dir = results_root_dir + '/latest' results_page = results_dir + '/results.md' # Timeformat for moving the dir tf = "%Y.%m.%d_%H.%M.%S" # Extract the timestamp of the existing results_page cmd = "gsutil ls -L " + results_page + " | " + "grep \'Creation time:\'" cmd_output = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) old_results_ts = cmd_output.stdout.decode("utf-8") old_results_ts = old_results_ts.replace("Creation time:", "") old_results_ts = old_results_ts.strip() # Move the 'latest' to its timestamp directory if lookup succeeded if cmd_output.returncode == 0: try: if old_results_ts != "": ts = datetime.datetime.strptime( old_results_ts, "%a, %d %b %Y %H:%M:%S %Z") old_results_ts = ts.strftime(tf) except ValueError as e: log.error( "%s: \'%s\' Timestamp conversion value error raised!", e) old_results_ts = "" # If the timestamp conversion failed - then create a dummy one with # yesterday's date. if old_results_ts == "": log.log(VERBOSE, "Creating dummy timestamp with yesterday's date") ts = datetime.datetime.now( datetime.timezone.utc) - datetime.timedelta(days=1) old_results_ts = ts.strftime(tf) old_results_dir = results_root_dir + "/" + old_results_ts cmd = ["gsutil", "mv", results_dir, old_results_dir] cmd_output = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) if cmd_output.returncode != 0: log.error("Failed to mv old results page \"%s\" to \"%s\"!", results_dir, old_results_dir) # Do an ls in the results root dir to check what directories exist. results_dirs = [] cmd = ["gsutil", "ls", results_root_dir] cmd_output = subprocess.run(args=cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) if cmd_output.returncode == 0: # Some directories exist. Check if 'latest' is one of them results_dirs = cmd_output.stdout.decode("utf-8").strip() results_dirs = results_dirs.split("\n") else: log.log(VERBOSE, "Failed to run \"%s\"!", cmd) # Start pruning log.log(VERBOSE, "Pruning %s area to limit last 7 results", results_root_dir) rdirs = [] for rdir in results_dirs: dirname = rdir.replace(results_root_dir, '') dirname = dirname.replace('/', '') if dirname in ['latest', 'history.md']: continue rdirs.append(dirname) rdirs.sort(reverse=True) rm_cmd = "" history_txt = " History\n\n" history_txt += "- [Latest](" + results_page + ")\n" if len(rdirs) > 0: for i in range(len(rdirs)): if i < 6: rdir_url = results_root_dir + '/' + rdirs[i] + "/results.md" history_txt += "- [{}]({})\n".format(rdirs[i], rdir_url) else: rm_cmd += results_root_dir + '/' + rdirs[i] + " " if rm_cmd != "": rm_cmd = "gsutil rm -r " + rm_cmd + "; " # Publish the updated history page. history_txt = history_txt.replace("gs://", "http://") history_file = self.scratch_path + "/history_" + self.timestamp + ".md" history_page = results_root_dir + "/history.md" f = open(history_file, 'w') f.write(history_txt) f.close() # Construct history cp cmd history_cp_cmd = "gsutil cp " + history_file + " " + history_page + \ "; rm -rf " + history_file + "; " # Copy over the latest regression result. log.info("Publishing results to %s", results_page.replace("gs://", "http://")) cmd = history_cp_cmd + rm_cmd + \ "gsutil cp " + self.results_file + " " + results_page try: cmd_output = subprocess.run(args=cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) except Exception as e: log.error("%s: Failed to publish results:\n\"%s\"", e, str(cmd)) def publish_results(self): '''Public facing API for publishing results to the opentitan web server. ''' for item in self.cfgs: item._publish_results()
[ "logging.error", "pprint.pformat", "logging.debug", "logging.log", "shutil.which", "datetime.datetime.strptime", "datetime.timedelta", "datetime.datetime.now" ]
[((415, 444), 'pprint.pformat', 'pprint.pformat', (['self.__dict__'], {}), '(self.__dict__)\n', (429, 444), False, 'import pprint\n'), ((485, 514), 'pprint.pformat', 'pprint.pformat', (['self.__dict__'], {}), '(self.__dict__)\n', (499, 514), False, 'import pprint\n'), ((16648, 16725), 'logging.log', 'log.log', (['VERBOSE', '"""Pruning %s area to limit last 7 results"""', 'results_root_dir'], {}), "(VERBOSE, 'Pruning %s area to limit last 7 results', results_root_dir)\n", (16655, 16725), True, 'import logging as log\n'), ((10484, 10545), 'logging.error', 'log.error', (['"""Override key "%s" not found in the cfg!"""', 'ov_name'], {}), '(\'Override key "%s" not found in the cfg!\', ov_name)\n', (10493, 10545), True, 'import logging as log\n'), ((13621, 13698), 'logging.error', 'log.error', (['"""Google cloud SDK not installed! Cannot access the results server"""'], {}), "('Google cloud SDK not installed! Cannot access the results server')\n", (13630, 13698), True, 'import logging as log\n'), ((16568, 16612), 'logging.log', 'log.log', (['VERBOSE', '"""Failed to run "%s"!"""', 'cmd'], {}), '(VERBOSE, \'Failed to run "%s"!\', cmd)\n', (16575, 16612), True, 'import logging as log\n'), ((2274, 2314), 'logging.error', 'log.error', (['"""Parse error!\n%s"""', 'self.cfgs'], {}), "('Parse error!\\n%s', self.cfgs)\n", (2283, 2314), True, 'import logging as log\n'), ((7769, 7829), 'logging.error', 'log.error', (['"""Cfg file "%s" has already been parsed"""', 'cfg_file'], {}), '(\'Cfg file "%s" has already been parsed\', cfg_file)\n', (7778, 7829), True, 'import logging as log\n'), ((10041, 10126), 'logging.debug', 'log.debug', (['"""Overriding "%s" value "%s" with "%s\\""""', 'ov_name', 'orig_value', 'ov_value'], {}), '(\'Overriding "%s" value "%s" with "%s"\', ov_name, orig_value, ov_value\n )\n', (10050, 10126), True, 'import logging as log\n'), ((10237, 10373), 'logging.error', 'log.error', (['(\'The type of override value "%s" for "%s" mismatches \' +\n \'the type of original value "%s"\')', 'ov_value', 'ov_name', 'orig_value'], {}), '(\'The type of override value "%s" for "%s" mismatches \' +\n \'the type of original value "%s"\', ov_value, ov_name, orig_value)\n', (10246, 10373), True, 'import logging as log\n'), ((13557, 13572), 'shutil.which', 'which', (['"""gsutil"""'], {}), "('gsutil')\n", (13562, 13572), False, 'from shutil import which\n'), ((13584, 13599), 'shutil.which', 'which', (['"""gcloud"""'], {}), "('gcloud')\n", (13589, 13599), False, 'from shutil import which\n'), ((15246, 15312), 'logging.log', 'log.log', (['VERBOSE', '"""Creating dummy timestamp with yesterday\'s date"""'], {}), '(VERBOSE, "Creating dummy timestamp with yesterday\'s date")\n', (15253, 15312), True, 'import logging as log\n'), ((15873, 15963), 'logging.error', 'log.error', (['"""Failed to mv old results page "%s" to "%s"!"""', 'results_dir', 'old_results_dir'], {}), '(\'Failed to mv old results page "%s" to "%s"!\', results_dir,\n old_results_dir)\n', (15882, 15963), True, 'import logging as log\n'), ((14754, 14824), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['old_results_ts', '"""%a, %d %b %Y %H:%M:%S %Z"""'], {}), "(old_results_ts, '%a, %d %b %Y %H:%M:%S %Z')\n", (14780, 14824), False, 'import datetime\n'), ((14955, 15020), 'logging.error', 'log.error', (['"""%s: \'%s\' Timestamp conversion value error raised!"""', 'e'], {}), '("%s: \'%s\' Timestamp conversion value error raised!", e)\n', (14964, 15020), True, 'import logging as log\n'), ((15358, 15402), 'datetime.datetime.now', 'datetime.datetime.now', (['datetime.timezone.utc'], {}), '(datetime.timezone.utc)\n', (15379, 15402), False, 'import datetime\n'), ((15426, 15452), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (15444, 15452), False, 'import datetime\n'), ((9297, 9415), 'logging.error', 'log.error', (['"""Override for key "%s" already exists!\nOld: %s\nNew: %s"""', 'ov_name', 'overrides_dict[ov_name]', 'ov_value'], {}), '("""Override for key "%s" already exists!\nOld: %s\nNew: %s""",\n ov_name, overrides_dict[ov_name], ov_value)\n', (9306, 9415), True, 'import logging as log\n'), ((6989, 7087), 'logging.error', 'log.error', (['"""Key "use_cfgs" encountered in a non-master cfg file list "%s\\""""', 'self.flow_cfg_file'], {}), '(\'Key "use_cfgs" encountered in a non-master cfg file list "%s"\',\n self.flow_cfg_file)\n', (6998, 7087), True, 'import logging as log\n')]
# -*- coding: utf-8 -*- from openerp import fields, models, api from openerp.tools.translate import html_translate class to_affiliate_advertisement(models.Model): _description = "Store advertisement for affiliate partner to chose installing on their website." _name = "to.affiliate.advertisement" image = fields.Binary(string="Banner Image") url = fields.Char(string="Url to Nomi") name = fields.Char(string="Advertisement Name") alternative_txt = fields.Char(string="Alternative") active = fields.Boolean(string="Active") html = fields.Html('Html Code', sanitize=False, translate=html_translate) #company_id = fields.Many2one('res.company', string="Company", default=lambda self: self.env.user.company_id)
[ "openerp.fields.Html", "openerp.fields.Binary", "openerp.fields.Boolean", "openerp.fields.Char" ]
[((324, 360), 'openerp.fields.Binary', 'fields.Binary', ([], {'string': '"""Banner Image"""'}), "(string='Banner Image')\n", (337, 360), False, 'from openerp import fields, models, api\n'), ((371, 404), 'openerp.fields.Char', 'fields.Char', ([], {'string': '"""Url to Nomi"""'}), "(string='Url to Nomi')\n", (382, 404), False, 'from openerp import fields, models, api\n'), ((417, 457), 'openerp.fields.Char', 'fields.Char', ([], {'string': '"""Advertisement Name"""'}), "(string='Advertisement Name')\n", (428, 457), False, 'from openerp import fields, models, api\n'), ((480, 513), 'openerp.fields.Char', 'fields.Char', ([], {'string': '"""Alternative"""'}), "(string='Alternative')\n", (491, 513), False, 'from openerp import fields, models, api\n'), ((528, 559), 'openerp.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Active"""'}), "(string='Active')\n", (542, 559), False, 'from openerp import fields, models, api\n'), ((571, 637), 'openerp.fields.Html', 'fields.Html', (['"""Html Code"""'], {'sanitize': '(False)', 'translate': 'html_translate'}), "('Html Code', sanitize=False, translate=html_translate)\n", (582, 637), False, 'from openerp import fields, models, api\n')]
import logging import torch from rl_agents.agents.common.memory import Transition from rl_agents.agents.common.models import model_factory from rl_agents.agents.common.optimizers import loss_function_factory, optimizer_factory from rl_agents.agents.common.utils import choose_device from rl_agents.agents.deep_q_network.abstract import AbstractDQNAgent logger = logging.getLogger(__name__) class DQNAgent(AbstractDQNAgent): def __init__(self, env, config=None): super(DQNAgent, self).__init__(env, config) self.value_net = model_factory(self.config["model"]) self.target_net = model_factory(self.config["model"]) self.target_net.load_state_dict(self.value_net.state_dict()) self.target_net.eval() self.device = choose_device(self.config["device"]) self.value_net.to(self.device) self.target_net.to(self.device) self.loss_function = loss_function_factory(self.config["loss_function"]) self.optimizer = optimizer_factory(self.config["optimizer"]["type"], self.value_net.parameters(), **self.config["optimizer"]) self.steps = 0 def step_optimizer(self, loss): # Optimize the model self.optimizer.zero_grad() loss.backward() for param in self.value_net.parameters(): param.grad.data.clamp_(-1, 1) self.optimizer.step() def compute_bellman_residual(self, batch, target_state_action_value=None): # Compute concatenate the batch elements if not isinstance(batch.state, torch.Tensor): # logger.info("Casting the batch to torch.tensor") state = torch.cat(tuple(torch.tensor([batch.state], dtype=torch.float))).to(self.device) action = torch.tensor(batch.action, dtype=torch.long).to(self.device) reward = torch.tensor(batch.reward, dtype=torch.float).to(self.device) next_state = torch.cat(tuple(torch.tensor([batch.next_state], dtype=torch.float))).to(self.device) terminal = torch.tensor(batch.terminal, dtype=torch.uint8).to(self.device) batch = Transition(state, action, reward, next_state, terminal, batch.info) # Compute Q(s_t, a) - the model computes Q(s_t), then we select the # columns of actions taken state_action_values = self.value_net(batch.state) state_action_values = state_action_values.gather(1, batch.action.unsqueeze(1)).squeeze(1) if target_state_action_value is None: with torch.no_grad(): # Compute V(s_{t+1}) for all next states. next_state_values = torch.zeros(batch.reward.shape).to(self.device) # Double Q-learning: pick best actions from policy network _, best_actions = self.value_net(batch.next_state).max(1) # Double Q-learning: estimate action values from target network best_values = self.target_net(batch.next_state).gather(1, best_actions.unsqueeze(1)).squeeze(1) next_state_values[1 - batch.terminal] = best_values[1 - batch.terminal] # Compute the expected Q values target_state_action_value = batch.reward + self.config["gamma"] * next_state_values # Compute loss loss = self.loss_function(state_action_values, target_state_action_value) return loss, target_state_action_value, batch def get_batch_state_values(self, states): values, actions = self.value_net(torch.tensor(states, dtype=torch.float).to(self.device)).max(1) return values.data.cpu().numpy(), actions.data.cpu().numpy() def get_batch_state_action_values(self, states): return self.value_net(torch.tensor(states, dtype=torch.float).to(self.device)).data.cpu().numpy() def save(self, filename): state = {'state_dict': self.value_net.state_dict(), 'optimizer': self.optimizer.state_dict()} torch.save(state, filename) return filename def load(self, filename): checkpoint = torch.load(filename, map_location=self.device) self.value_net.load_state_dict(checkpoint['state_dict']) self.target_net.load_state_dict(checkpoint['state_dict']) self.optimizer.load_state_dict(checkpoint['optimizer']) return filename def initialize_model(self): self.value_net.reset() def set_writer(self, writer): super().set_writer(writer) self.writer.add_graph(self.value_net, input_to_model=torch.zeros((1, *self.env.observation_space.shape), dtype=torch.float, device=self.device)) self.writer.add_scalar("agent/trainable_parameters", sum(p.numel() for p in self.value_net.parameters() if p.requires_grad), 0)
[ "rl_agents.agents.common.memory.Transition", "rl_agents.agents.common.optimizers.loss_function_factory", "torch.load", "torch.save", "torch.zeros", "torch.tensor", "torch.no_grad", "rl_agents.agents.common.models.model_factory", "rl_agents.agents.common.utils.choose_device", "logging.getLogger" ]
[((364, 391), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (381, 391), False, 'import logging\n'), ((547, 582), 'rl_agents.agents.common.models.model_factory', 'model_factory', (["self.config['model']"], {}), "(self.config['model'])\n", (560, 582), False, 'from rl_agents.agents.common.models import model_factory\n'), ((609, 644), 'rl_agents.agents.common.models.model_factory', 'model_factory', (["self.config['model']"], {}), "(self.config['model'])\n", (622, 644), False, 'from rl_agents.agents.common.models import model_factory\n'), ((767, 803), 'rl_agents.agents.common.utils.choose_device', 'choose_device', (["self.config['device']"], {}), "(self.config['device'])\n", (780, 803), False, 'from rl_agents.agents.common.utils import choose_device\n'), ((912, 963), 'rl_agents.agents.common.optimizers.loss_function_factory', 'loss_function_factory', (["self.config['loss_function']"], {}), "(self.config['loss_function'])\n", (933, 963), False, 'from rl_agents.agents.common.optimizers import loss_function_factory, optimizer_factory\n'), ((4019, 4046), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (4029, 4046), False, 'import torch\n'), ((4123, 4169), 'torch.load', 'torch.load', (['filename'], {'map_location': 'self.device'}), '(filename, map_location=self.device)\n', (4133, 4169), False, 'import torch\n'), ((2184, 2251), 'rl_agents.agents.common.memory.Transition', 'Transition', (['state', 'action', 'reward', 'next_state', 'terminal', 'batch.info'], {}), '(state, action, reward, next_state, terminal, batch.info)\n', (2194, 2251), False, 'from rl_agents.agents.common.memory import Transition\n'), ((2584, 2599), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2597, 2599), False, 'import torch\n'), ((4614, 4708), 'torch.zeros', 'torch.zeros', (['(1, *self.env.observation_space.shape)'], {'dtype': 'torch.float', 'device': 'self.device'}), '((1, *self.env.observation_space.shape), dtype=torch.float,\n device=self.device)\n', (4625, 4708), False, 'import torch\n'), ((1822, 1866), 'torch.tensor', 'torch.tensor', (['batch.action'], {'dtype': 'torch.long'}), '(batch.action, dtype=torch.long)\n', (1834, 1866), False, 'import torch\n'), ((1904, 1949), 'torch.tensor', 'torch.tensor', (['batch.reward'], {'dtype': 'torch.float'}), '(batch.reward, dtype=torch.float)\n', (1916, 1949), False, 'import torch\n'), ((2100, 2147), 'torch.tensor', 'torch.tensor', (['batch.terminal'], {'dtype': 'torch.uint8'}), '(batch.terminal, dtype=torch.uint8)\n', (2112, 2147), False, 'import torch\n'), ((2695, 2726), 'torch.zeros', 'torch.zeros', (['batch.reward.shape'], {}), '(batch.reward.shape)\n', (2706, 2726), False, 'import torch\n'), ((1736, 1782), 'torch.tensor', 'torch.tensor', (['[batch.state]'], {'dtype': 'torch.float'}), '([batch.state], dtype=torch.float)\n', (1748, 1782), False, 'import torch\n'), ((2007, 2058), 'torch.tensor', 'torch.tensor', (['[batch.next_state]'], {'dtype': 'torch.float'}), '([batch.next_state], dtype=torch.float)\n', (2019, 2058), False, 'import torch\n'), ((3568, 3607), 'torch.tensor', 'torch.tensor', (['states'], {'dtype': 'torch.float'}), '(states, dtype=torch.float)\n', (3580, 3607), False, 'import torch\n'), ((3785, 3824), 'torch.tensor', 'torch.tensor', (['states'], {'dtype': 'torch.float'}), '(states, dtype=torch.float)\n', (3797, 3824), False, 'import torch\n')]
#!/usr/bin/env python3get_coo """ Lattice dynamics model """ import h5py import os from itertools import product import numpy as np from numpy.linalg import norm import scipy.linalg import scipy.io import scipy.sparse from scipy.sparse import lil_matrix as spmat from .basic_lattice_model import BasicLatticeModel from .interface_vasp import Poscar from .cluster import Cluster from .structure import SupercellStructure from .util.mathtool import MixedIndexForImproper, IntegerDigits, tensor_constraint, RMS, mychop from .util.tool import pad_right, matrix2text from _c_util import fct_trans_c, ld_get_correlation, get_nullspace, init_ldff_basis, ldff_get_corr from .coord_utils import ReadPBC2Cart from .util.string_utils import str2arr, str2bool import logging logger = logging.getLogger(__name__) debug_level = 10 class LDModel(BasicLatticeModel): """ Lattice dynamics atomic variables: vector displacements on each atom property: total potential energy (electronic + nucleic potential, but NOT kinetic of nucleic) force """ def __init__(self, prim, raw_clusters, **kwargs): """ :param prim: :param raw_clusters: :return: """ BasicLatticeModel.__init__(self, prim, raw_clusters, **kwargs) self.all_range={o: max(self.proper_range[o],self.imp_range[o]) for o in self.proper_range} self.dipole_force = False if (not kwargs.get('dipole_force', True)) or \ ('born_charge' not in self.prim[0].properties.keys()) or \ (self.prim.intensive_properties['epsilon_inf'] is None) else True self.symm_residual_force = kwargs.get('symm_residual_force', True) self.Cmat = None self.ldff = None self._dpcor = None def translational_invariance(self): """ Translational invariance Matrix :return: C matrix """ print("Applying translational invariance.") crange = self.all_range prim = self.prim orbits = self.orbits rot_mats = [s.rot for s in prim.spacegroup] maxnpt = max([orb.cluster.order for orb in orbits]) Cmat = self.Cmat1 BmatCollect = [] #self.prim._init_all_nb_list([crange[ord] for ord in range(2, 1+max(crange.keys()))]) for iorb in range(len(orbits)): orb = orbits[iorb] clus = orb.cluster if clus.order >= maxnpt: continue npt_ex = clus.order + 1 cut = crange[npt_ex] if debug_level > 5555: print("debug> order %d cutoff %f" %(npt_ex, cut)) if clus.diameter > cut: if debug_level > 9990: print(" %d diameter %.4f out of range %.4f" % (iorb, clus.diameter, cut)) continue # find all points within range of clus if clus.order <=0: # empty cluster sumPts = [[0,0,0, l] for l in range(prim.num_sites)] else: sumPts = prim.find_nb_cluster(np.array(clus.ijkls), crange[npt_ex]) if debug_level > 55: print("debug> translation of", repr(clus)) dimTensor = 3** npt_ex #Bmat = spmat((dimTensor, self.nfct_tot)) # annoying bug in scipy.sparse matrices !! Bmat not computed properly. Using normal/dense matrices for now. Bmat = spmat((dimTensor, self.nfct_tot)).todense() foundClus = False for sumpt in sumPts: clusSum = clus.append_site(sumpt) if debug_level > 5555: print(" ", iorb, " searching ", sumpt) #idSum= Cluster2MI[clusSum]//Sort; # find the orbit that each summed cluster corresponds to *) [found, ioF, icF, igF, pi] = self.identify_cluster(clusSum) # print("identified",[found, ioF, igF, pi]) if found: foundClus = True # annoying bug in scipy.sparse matrices !! Bmat not computed properly Bmat[:, self.orb_idx_full[ioF]:self.orb_idx_full[ioF+1]]+=\ fct_trans_c(npt_ex, 3, rot_mats[igF], pi).todense() # fct_trans_c(npt_ex, 3, rot_mats[igF], pi) if not foundClus: print(' ',iorb, " nothing found for ", clus) continue # annoying bug in scipy.sparse matrices !! Bmat not computed properly Bmat = spmat(Bmat) BmatCollect.append(Bmat) Bmat = Bmat.dot(Cmat.T) if npt_ex > 999999: print("bypassing ", clus) # BmatCollect.extend( Select[RowReduce[Chop[Bmat,10.^-10]],(Norm[#]> 10.^-6)&]]) else: if debug_level > 99999: print(" calc null bmat") print(Bmat) # if not (scipy.sparse.isspmatrix(Bmat) and Bmat.getnnz()<=0): # Cmat = nullspace_rref(Bmat.toarray()).dot(Cmat) Cmat = mychop(get_nullspace(Bmat).dot(Cmat), 1e-12) print(" %4d + sum(a) %d remaining" % (iorb, Cmat.shape[0])) self.Cmat = Cmat return BmatCollect def symmetrize(self): self.isotropy_derivative_constraint() self.translational_invariance() #self.CheckNumericTranslationalInvariance() # self.process_fct_order() def prepare_index_full(self): for orb in self.orbits: orb.ncorr_full = 3**orb.cluster.order super().prepare_index_full() self.nfct_tot = self.ncorr_full def prepare_index(self): # self.process_all_fct() self.nfct = self.Cmat.shape[0] allfct_ord = np.hstack([np.full(3**o.cluster.order, o.cluster.order, dtype=np.int) for o in self.orbits]) self.fct_ord = [allfct_ord[row[0]] for row in self.Cmat.tolil().rows] self.ord_range = {o: len(self.fct_ord) - self.fct_ord[::-1].index(o) - 1 if o in self.fct_ord else 0 for o in range(self.maxorder+1)} self.ord_range[-1] = -1 self.fct_ord = np.array(self.fct_ord) self.allfct_orbidx = np.hstack([np.full(3**o.cluster.order, i,dtype=int) for i, o in enumerate(self.orbits)]) self.fct_orbidx = [self.allfct_orbidx[row[0]] for row in self.Cmat.tolil().rows] np.savetxt('num_fct_ord.txt', [self.ord_range[o]-self.ord_range[o-1] for o in range(self.maxorder+1)], fmt='%d') def isotropy_derivative_constraint(self): """ Apply isotropy and derivative commutativity constraints for all clusters in this model :return: """ clusters, iso_list, pi_list = zip(*[[orb.cluster, [x[0] for x in orb.isotropy[1:]], [x[1] for x in orb.isotropy[1:]]] for orb in self.orbits]) Cmats = self.calc_isotropy_derivative_constraint(self.prim.spacegroup, clusters, iso_list, pi_list, self.nfct_tot, self.symm_residual_force) self.Cmat1 = scipy.sparse.vstack(Cmats) print("Isotropy/deriv. constraints done. After dim/before=", self.Cmat1.shape) return Cmats @staticmethod def calc_isotropy_derivative_constraint(ops, clusters, iso_list, pi_list, nfct_tot, symm_residual_force=True): """ Apply isotropy and derivative commutativity constraints for all clusters in this model symm_residual_force: Whether to symmetrize the point cluster, i.e. residual forces. Default to True. If fitting phonons with small displacement and supercells that do not preserve symmetry (i.e. non-cubic low symmetry supercell for cubic system) :return: """ print("Applying point group symmetry") Cmats = [] ltothis=0 for iorb in range(len(clusters)): clus = clusters[iorb] npt = clus.order dimThis = 3**npt ltothis+=dimThis if npt <= 0: null = scipy.sparse.identity(dimThis) else: idx_constr = MixedIndexForImproper(clus.vertices, 3)[0] igs = iso_list[iorb] pis = pi_list[iorb] if (not symm_residual_force) and (npt==1): print("WARNING: symmetrization of point cluster (i.e. residual force) turned OFF") igs = igs[:0] pis = pis[:0] # print('debug isotropy2 igs, pis ', igs, list(igs), pis, 'zipped', *zip(*orb.isotropy[1:])) null = tensor_constraint(3, npt, [ops[ig].rot for ig in igs], pis, other_constraits=idx_constr) nfree = null.shape[0] print(" %4d null= %d/%d" %(iorb, nfree, dimThis), repr(clus), end='') if nfree>0: if npt <=2 and debug_level > 99999: print([clus, null]) if ltothis-dimThis>0: null = scipy.sparse.bmat([[spmat((nfree, ltothis-dimThis)), null]]) if nfct_tot-ltothis>0: null = scipy.sparse.bmat([[null, spmat((nfree, nfct_tot-ltothis))]]) Cmats.append(null) print() else: print(' vanishing cluster!') return Cmats @staticmethod def write_mat(Cmat, outf): """ Export Cmat :param outf: :return: """ if Cmat is None: raise ValueError("Cmat not set for this model") print("writing matrix", Cmat.shape, "to", outf) scipy.io.mmwrite(outf, Cmat) @property def ncorr(self): return self.nfct + (0 if self.ldff is None else self.ldff.ncorr) def get_params(self): all_ord=list(set([o.cluster.order for o in self.orbits])) ldff = self.ldff if ldff is None: ld_diag = np.arange(0) ld_scalelist = [1] maxfitord = self.maxorder else: ld_diag = np.ones(ldff.ncorr) ld_scalelist = ldffscale_list maxfitord = self.maxorder+1 all_ord+=[maxfitord] self.ord_range[maxfitord] = self.ord_range[self.maxorder] + ldff.ncorr param = dict() for o in range(maxfitord+1): param[o]=self.ord_range[o]-self.ord_range[o-1] return param def get_submodels(self, name_ord, u_list, lr_pair_penalty=0, ldffscale_list=[1], knownsol=None): """ :param name_ord: list of [name, fct order] :param u_list: list of uscale :param lr_pair_penalty: penalty exp(-penalty*radius) for pair clusters :return: list of [name, matrix] defining the different fittings """ all_ord=list(set([o.cluster.order for o in self.orbits])) ldff = self.ldff if ldff is None: ld_diag = np.arange(0) ld_scalelist = [1] maxfitord = self.maxorder else: ld_diag = np.ones(ldff.ncorr) ld_scalelist = ldffscale_list maxfitord = self.maxorder+1 all_ord+=[maxfitord] self.ord_range[maxfitord] = self.ord_range[self.maxorder] + ldff.ncorr sol0 = np.zeros(self.ncorr) if knownsol: print(" Reading previous solution from %s"%(knownsol)) input_sol = self.load_solution(knownsol).reshape(-1) sol0[:min(sol0.size, input_sol.size)] = input_sol[:min(sol0.size, input_sol.size)] param = dict() for o in range(maxfitord+1): param[o]=self.ord_range[o]-self.ord_range[o-1] name_ord = self.process_name_ord(name_ord, all_ord) pair_r0 = np.min([orb.cluster.diameter for orb in self.orbits if orb.cluster.order==2 and orb.cluster.order_uniq==2]) pair_diameter=np.array([self.orbits[idx].cluster.diameter-pair_r0 if self.orbits[idx].cluster.order==2 and self.orbits[idx].cluster.order_uniq==2 else 0 for idx in self.fct_orbidx]) return [[nm+ ' uscale= %g'%(uscale) + str('' if ldff is None else " ldffscale=%g"%(ldffscale)), scipy.sparse.diags(np.hstack(((1/uscale)**(self.fct_ord-1)* np.exp(-pair_diameter*lr_pair_penalty), ldffscale*ld_diag)), 0).tocsr()[:, sum([list(range(self.ord_range[i-1]+1, self.ord_range[i]+1)) for i in o if i<=maxfitord], []) if o[0]>=0 else list(range(-o[0],-o[1]))], sol0] for (nm, o) in name_ord for uscale in u_list for ldffscale in ld_scalelist] def CheckNumericTranslationalInvariance(self, trans=np.array([1.0, 2.0, 3.0])): """ Apply uniform translation, calculate the force :return: """ print(" To be implemented: checking translation", trans) def get_full_fct(self, sol_sym): """ Return all FCT elements from symmetry reduced parameters :param sol_sym: symmetrized solution vector :return: expanded, full FCT's without symmetry """ return self.Cmat.T.dot(sol_sym[:self.nfct]) def get_correlation(self, sclist, wtFunc= lambda x:1, corrtype='f', delForce=1, shift=True, **kwargs): """ :param sclist: [ [sc0, [sub0, sub1, ...]], [sc1, [sub11, ...]]] :param wtFunc: :param corrTyp: 'e' for energy, 'f' for force :param delForce: which components to delete :param shift: whether to subtract the shift (average force) :param residual_force: Whether to subtract residual forces of equilibrium structure, if found :return: correlation matrix A """ import os.path ldff = self.ldff theC = self.Cmat.T ncorr = theC.shape[1] residual_force= str2bool(kwargs['setting'].get('residual_force', 'F')) if ldff is not None: ncorr += ldff.ncorr if corrtype == 'e': totNF = sum([len(sc[1]) for sc in sclist]) elif corrtype == 'f': # ignore the last atom because of translational invariance (subtract delForce=1) totNF = sum([3*(Poscar.from_file(rd+"/POSCAR").structure.num_sites - delForce) for sc in sclist for rd in sc[1]]) else: raise ValueError("ERROR: expecting to fit f(orce) or e(energy) but found %s"%(corrtype)) print(" Total number of linear equations", totNF) assert totNF>0, ValueError("ERROR got no input data") Alist = np.zeros((totNF, ncorr)) Flist = np.zeros((totNF, 3)) Astart=0 for sc in sclist: print(" reading supercell", sc[0]) SCinfo= SupercellStructure.from_file(self.prim, sc[0]) SCinfo.to_unperturbed() x0frac = SCinfo.frac_coords #SCinfo = SupercellStructure(self.prim, SCmat, None, x0frac) ncell = SCinfo.n_cell clusALL = self.translate_to_supercell(SCinfo) if corrtype=='f' and residual_force and os.path.exists(os.path.dirname(sc[0])+"/residual_force.txt"): print(" found 'residual_force.txt'") f0 = np.loadtxt(os.path.dirname(sc[0])+"/residual_force.txt") if shift: f0-= np.mean(f0, axis=0) else: f0 = 0 if self.dipole_force: fcmfile= os.path.dirname(os.path.abspath(sc[0]))+"/fcm_dp" if False and os.path.isfile(fcmfile): print(" reading dipole FC for supercell "+fcmfile) fcm_dp = np.loadtxt(fcmfile) else: print(" computing long-range forces") fcm_dp = self.get_hessian_dipole_corrected(SCinfo) np.savetxt(fcmfile, fcm_dp) else: fcm_dp = None if ldff is not None: radialC= self.translate_to_supercell(SCinfo, ldff.orb_idx) if debug_level > 10: print("supercell clusters generated") for rd in sc[1]: rundir = rd dx= ReadPBC2Cart(rundir + "/POSCAR", x0frac) # weight= wtFunc(dx, uScale) # weight = np.ones(dx.shape[0]) weight = 1 if debug_level > 2: print(" config",rundir, " weight=", weight, " max |dx|=", np.amax(norm(dx,axis=1))) dx_sort = dx print('dx_sort : ',type(dx_sort),dx_sort.shape) print('clusALL : ',type(clusALL),len(clusALL)) print('theC : ',type(theC),theC.shape) Amat= self.calc_correlation(dx_sort, clusALL).dot(theC) if ldff is not None: Amat = scipy.sparse.hstack((Amat, ldff.calc_correlation(dx_sort, radialC, ncell))).tocsr() if corrtype == "e": thisNF = 1 if os.path.isfile(rundir+"/energy.txt"): values = np.loadtxt(rundir+"/energy.txt", ndmin=1)/ncell else: print("WARNING: no energy.txt file found. Proceeding with 0...") values = np.zeros(1) valuesFit = values.copy() if fcm_dp is not None: en_dp = 0.5*np.dot(dx_sort.reshape(-1),fcm_dp.dot(dx_sort.reshape(-1))) np.savetxt(rundir+"/energy.txt_dp", en_dp) valuesFit -= en_dp/ncell Amat = Amat[-1:]/ncell elif corrtype == 'f': thisNF = 3*(len(dx)- delForce) if os.path.exists(rundir+"/force.txt"): values = np.loadtxt(rundir+"/force.txt") shift_size= np.linalg.norm(np.sum(values, axis=0)) if shift_size >1e-3: print("WARNING: large shift in force %.4f in %"(shift_size, rundir+"/force.txt")) if shift: values-= np.mean(values, axis=0) valuesFit = values.copy() - f0 else: print("WARNING: force.txt not found in %s ... setting to zero ... OK for renormalization"%(rundir)) values = np.zeros((len(dx), 3)) valuesFit = values.copy() assert values.shape == dx.shape, 'force [%d %d] coords [%d %d]' % (values.shape[0], values.shape[1], dx.shape[0], dx.shape[1]) if fcm_dp is not None: f_dp = -fcm_dp.dot(dx_sort.reshape(-1)).reshape((-1,3)) #np.savetxt(rundir+"/force.txt_dp", f_dp) valuesFit -= f_dp values = values.flatten()[:thisNF] valuesFit = valuesFit.flatten()[:thisNF] if debug_level >30: print("forces read in") if thisNF != len(values): raise ValueError("ERROR: expecting ", thisNF, " but found ", len(values), " force components") Amat = Amat[:thisNF] if debug_level >9999: print(" A size", Amat.shape, Amat.__class__) Alist[Astart:Astart+thisNF, :] = (Amat * weight).todense() Flist[Astart:Astart+thisNF, 0] = valuesFit * weight Flist[Astart:Astart+thisNF, 1] = np.full((thisNF), weight, dtype=np.double) Flist[Astart:Astart+thisNF, 2] = values-valuesFit Astart += thisNF return [spmat(Alist), Flist] def calc_correlation(self, dx, clusALL): """ :param dx: :param clusALL: all clusters in the supercell :return: """ maxnpt = self.maxorder return spmat(ld_get_correlation(dx.shape[0], len(self.orbits), maxnpt, self.nfct_tot, np.array(dx), np.array([pad_right(np.array(clus[0]), maxnpt) for clus in clusALL], dtype=np.int32), np.array([clus[1] for clus in clusALL], dtype=np.int32), np.array([clus[2] for clus in clusALL], dtype=np.int32), np.array([orb.cluster.order for orb in self.orbits], dtype=np.int32), np.array([orb.cluster.factorial for orb in self.orbits]), np.array([op.rot_inv for op in self.prim.spacegroup]))) def save_fct(self, sol, outf, scmat, combine_improper=True): """ :param sol: solution vector :param outf: output filename. Two files .lat and .pot, will be generated :param scmat: supercell integer 3x3 matrix :return: """ print(" saving lattice and potential to", outf) scinfo = SupercellStructure.from_scmat(self.prim, scmat) self.save_fct_lat(outf+'.lat', scinfo) assert sol.shape[0] >= self.nfct self.save_fct_pot(outf+'.pot', self.get_full_fct(sol), sol[self.nfct:], scinfo, combine_improper=combine_improper) def save_fct_lat(self, outf, scinfo): """ :param outf: :param scinfo: :return: """ # write lattice points fp = open(outf, 'w') natom = self.prim.num_sites ncell = scinfo.n_cell SCposFrac= scinfo.frac_coords outs =[matrix2text(self.prim.lattice._matrix), matrix2text(scinfo.sc_mat), str(natom)] outs += ["%f %f %f %d" % tuple(self.prim.frac_coords[i].tolist()+[self.prim.atomic_numbers[i]]) for i in range(natom)] outs += [str(SCposFrac.shape[0]), ''] fp.write('\n'.join(outs)) for iA in range(natom): for jLat in range(scinfo.n_cell): fp.write("%d %s %d %s\n" %(iA*ncell+ jLat+1, matrix2text([scinfo.sc_ref[jLat]]), iA+1, matrix2text([SCposFrac[iA*ncell+ jLat]]))) fp.close() def save_fct_pot(self, outf, sol_fct, sol_ff, scinfo, tol=1.E-12, combine_improper=False, output_ijkl=True): """ write potential file. :param outf: :param scinfo: :return: """ ldff = self.ldff fp = open(outf, 'w') natom = self.prim.num_sites dim = 3 ops = self.prim.spacegroup fc_norm = [] if self.dipole_force: fcm_dp = self.get_hessian_dipole_corrected(scinfo) else: fcm_dp = np.zeros((scinfo.num_sites*3, scinfo.num_sites*3)) flag_dp = np.zeros((scinfo.num_sites, scinfo.num_sites),dtype=np.int) for iO, orb in enumerate(self.orbits): clus0 = orb.cluster npt = clus0.order fac = clus0.factorial val = sol_fct[self.orb_idx_full[iO]:self.orb_idx_full[iO+1]] fc_norm.append([clus0.diameter, np.linalg.norm(val),clus0.order, clus0.order_uniq]) if ldff is not None: ppout = ldff.tostr(sol_ff, iO) else: ppout = "0\n0" if fc_norm[-1][1]>tol or (ppout != "0\n0"): for ic, clus in enumerate(orb.clusters): trans_cluster = np.array(BasicLatticeModel.translate_cluster_to_supercell(scinfo, clus)) valTrans = fct_trans_c(npt, 3, ops[orb.clusters_ig[ic]].rot, np.arange(npt, dtype=int)).dot(val) if npt==2: valTrans+= fcm_dp[trans_cluster[0,0]*3:trans_cluster[0,0]*3+3,trans_cluster[0,1]*3:trans_cluster[0,1]*3+3].flatten() flag_dp[trans_cluster.T.tolist()]=1 if combine_improper: valTrans= clus.reduce_improper_fct_output(valTrans) # fewer terms to save! # fctTrans = valTrans.reshape([dim for _ in range(npt)]) # If[npt==2,AppendTo[pairc, orbitUniq[[iC,icOrb,1]]]; AppendTo[pairFCM, fctTrans]]; # if ic <= 3: # print(iO, ic, clus) # print(valTrans) # print(trans_cluster) clus_out = matrix2text(clus._ijkls if output_ijkl else clus.frac_coords) outs = [str(npt), clus_out, str(len(trans_cluster)), matrix2text(trans_cluster+1), LDModel.fct2str(npt, valTrans/fac, tol), ppout] fp.write("\n".join(outs) + "\n\n") np.savetxt("fct_norm_vs_diameter.txt", fc_norm, header='col1=diameter col2=norm col3=npt col4=npt_uniq') return for i1 in range(scinfo.num_sites): for i2 in range(i1, scinfo.num_sites): if flag_dp[i1,i2]: continue npt = 2 # WARNING: TODO: convert pair of coords to minimal distance within the supercell periodic boundary condition clus = Cluster.from_coords(scinfo.cart_coords[[i1,i2]], self.prim, frac_coords=False) fac = clus.factorial clus_out = matrix2text(clus._ijkls if output_ijkl else clus.frac_coords) ppout = "0\n0" valTrans = fcm_dp[i1*3:i1*3+3, i2*3:i2*3+3].flatten() outs = [str(npt), clus_out, str(1), matrix2text(np.array([[i1,i2]])+1), LDModel.fct2str(npt, valTrans/fac, tol), ppout] fp.write("\n".join(outs) + "\n\n") def save_fcshengbte(self, sol, ord, tol=1e-20): assert ord in (3,4), "Only order 3 or 4 FCTs are accepted by shengbte, got %d"%(ord) import io, re sol_fct = self.get_full_fct(sol) ops = self.prim.spacegroup print("WRITING FORCE_CONSTANTS_%s"%({3:"3RD",4:"4TH"}[ord])) fc_name="FORCE_CONSTANTS_%s"%({3:"3RD",4:"4TH"}[ord]) hf_name="fc%s.hdf5"%({3:"3RD",4:"4TH"}[ord]) fp=io.StringIO() icount=0 for iO, orb in enumerate(self.orbits): clus0 = orb.cluster npt = orb.order if npt != ord: continue val = sol_fct[self.orb_idx_full[iO]:self.orb_idx_full[iO+1]] if np.amax(np.abs(val)) <= tol: continue perms = clus0.permutations() for ic, clus in enumerate(orb.clusters): ijkls = clus._ijkls_np valTrans = fct_trans_c(npt, 3, ops[orb.clusters_ig[ic]].rot, np.arange(npt, dtype=int)).dot(val).reshape((3,)*ord) #print('debug iorb, ic iper', iO, ic, len(perms), clus) for iper in perms: icount+=1 #print('debug', icount, clus0.ijkls, iO, ic) ijk_other= matrix2text(self.prim.lattice.get_cartesian_coords(ijkls[iper[1:],:3]- ijkls[iper[0:1],:3])) valPerm = np.transpose(valTrans, iper).reshape((-1)) fp.write("\n%d\n%s\n%s\n"%(icount, ijk_other, matrix2text(ijkls[iper,3]+1))) fp.write(re.sub(r".*\n", r"",LDModel.fct2str(npt, valPerm, -1),count=1)+'\n') with open(fc_name, 'w') as modified: modified.write("%d\n"%(icount) + fp.getvalue()) fp.close() # for original (unmodified) version of ShengBTE def save_fcshengbte_original(self, sol, ord, tol=1e-20, output_ijkl=True): from .util.tool_for_original_shengbte import LPTClusterEquivalentByTranslation, relativePosition, FCTrans, ListFlat assert ord in (3,4), "Only order 3 or 4 FCTs are accepted by shengbte, got %d"%(ord) import io, re import os.path from f_util import f_util # uscale = u_list[0] np.savetxt('sol',sol) # print('SOL : ',len(sol),'\n',sol) sol_fct = self.get_full_fct(sol) # expend independent FCT over the null space (isotropy group, translational invariance) # print('SOL_FCT : ',len(sol_fct),'\n',sol_fct) np.savetxt('sol_fct',sol_fct) ops = self.prim.spacegroup # scinfo = SupercellStructure.from_scmat(self.prim, scmat) # flag_dp = np.zeros((scinfo.num_sites, scinfo.num_sites),dtype=np.int) fc_name="FORCE_CONSTANTS_%s"%({3:"3RD",4:"4TH"}[ord]) fp=io.StringIO() fp2=io.StringIO() icount=0 icount2=0 ####################################################################################### ### Apply triplet selection - JP modeled after Yi's codes phononFCT.py and tools.py ### ####################################################################################### R = self.prim.lattice.matrix rmat = np.array(R).tolist() print('rmat : \n',rmat) apos = self.prim.frac_coords poscar = np.array(apos).tolist() print('poscar : \n',poscar) natom = len(apos) print('natom : ',natom) cutoff = 6 nlat = 2 # normally requires 5 parameters, but calling using f2py relieves the need for 5th natom f_util.select_triplet(poscar, rmat, cutoff, nlat) #----------------------------------------------------------- lines=[list(map(int, line.split())) for line in open('triplet-selection','r')] selclus=[ [ [[0,0,0],line1[3]],[[line1[4],line1[5],line1[6]],line1[7]],[[line1[8],line1[9],line1[10]],line1[11]] ] for line1 in lines ] counter=0 icount2=0 fctsym=[] for clus in selclus: # loop over all selected triplet clusters (could be in any cell) print('CLUS : \n',clus) counter=counter+1 npt=len(clus) # 2 for pair, 3 for triplets, etc. foundOrb=False for iO, orb in enumerate(self.orbits): # loop over all orbits clus0 = orb.cluster npt = clus0.order if npt != ord: continue #val = sol_fct[self.orb_idx_full[iO]:self.orb_idx_full[iO+1]]/(pow(uscale, npt-1)) for ic2, clus2 in enumerate(orb.clusters): # loop over all clusters in the orbits tmp = list(clus2.vertices) clustry = [[[tmp[0].ijkl[0],tmp[0].ijkl[1],tmp[0].ijkl[2]],tmp[0].ijkl[3]],[[tmp[1].ijkl[0],tmp[1].ijkl[1],tmp[1].ijkl[2]],tmp[1].ijkl[3]],[[tmp[2].ijkl[0],tmp[2].ijkl[1],tmp[2].ijkl[2]],tmp[2].ijkl[3]]] foundOrb = LPTClusterEquivalentByTranslation(clustry, clus, True) # check if this cluster is the translated version of selclus if foundOrb != False: # if match found print('Cluster Matched! \n',clustry) print('SOL_FCT INDICES : ',self.orb_idx_full[iO],self.orb_idx_full[iO+1]) print('foundOrb : ',foundOrb) val = sol_fct[self.orb_idx_full[iO]:self.orb_idx_full[iO+1]] # load full FCT for a cluster print('Distinct FCT : \n',val) fctsym.append(ListFlat(list(val))) icount2+=1 valTrans2 = np.array(FCTrans(npt, 3, ops[orb.clusters_ig[ic2]].rot, relativePosition(clustry, foundOrb))).dot(val) print('Transformed FCT : \n',valTrans2,'\n') # get lattice coordinates of the 2 other cells by zero-referencing to the 1st cell #ijk_other= matrix2text(self.prim.lattice.get_cartesian_coords(clus._ijkls_np[1:,:3] - clus._ijkls_np[0:1,:3])) difference = np.array([np.array(clus[1][0]) - np.array(clus[0][0]),np.array(clus[2][0]) - np.array(clus[0][0])]) #print(difference) ijk_other= matrix2text(self.prim.lattice.get_cartesian_coords(difference)) #print('ijk_other : \n',ijk_other) #fp.write("\n%d\n%s\n%s\n"%(icount2, ijk_other, matrix2text(clus._ijkls_np[:,3]+1))) fp.write("\n%d\n%s\n%s\n"%(icount2, ijk_other, matrix2text(np.array([clus[0][1],clus[1][1],clus[2][1]])+1))) fp.write(re.sub(r".*\n", r"",LDModel.fct2str(npt, valTrans2, -1),count=1)+'\n') break np.savetxt('fctsym',fctsym) with open(fc_name, 'w') as modified: modified.write("%d\n"%(icount2) + fp.getvalue()) fp.close() def load_solution(self, sol_f, potential_coords_ijkl=True): """ sol_f: file_name_of_solution [order_to_keep] File format is either solution vector or potential file order_to_keep is like 0,1,2 (no space) """ solinf = sol_f.split() if solinf[0][-4:].lower()!='.pot': print(" Loading symmetrized FCT from %s"% (solinf[0])) sol= np.loadtxt(solinf[0], ndmin=2) if len(solinf)>1: print("WARNING!!!! only order %s will be kept"%(solinf[1])) tmp = np.zeros_like(sol) for ord in eval("[%s]"%(solinf[1])): print("ord= %d corresponding idx="%(ord), (self.ord_range[ord-1]+1,self.ord_range[ord]+1)) tmp[:,self.ord_range[ord-1]+1:self.ord_range[ord]+1] = sol[:,self.ord_range[ord-1]+1:self.ord_range[ord]+1] sol=tmp return sol else: from .util.io_utils import read_nrecord_array print(" Loading symmetrized FCT from potential %s"% (solinf[0])) full_fct= np.zeros(self.nfct_tot) lines = open(solinf[0], 'r').readlines() line=0 while line<len(lines): line, xyz=read_nrecord_array(lines, line) if potential_coords_ijkl: clus= Cluster.from_ijkl(xyz.astype(int), self.prim) else: clus= Cluster.from_coords(xyz, self.prim) line, clus_instances=read_nrecord_array(lines, line) line, ijval=read_nrecord_array(lines, line) line, rad1=read_nrecord_array(lines, line) line, rad2=read_nrecord_array(lines, line) line += 1 # empty line [found, ioF, icF, igF, pi] = self.identify_cluster(clus) if (not found) or (icF != 0) or (igF != 0): continue ord = clus.order # print("found cluster order=%d id=%d line=%d"%(ord, ioF, line)) fct= np.zeros((3,)*ord) for x in ijval: #print(" debug x=", x, tuple(x[:ord].astype(int)-1), x[-1]) fct[tuple(x[:ord].astype(int)-1)] = x[-1] fct = fct*clus.factorial fct = fct_trans_c(ord, 3, self.prim.spacegroup[igF].rot, pi).T.dot(fct.reshape((-1))) full_fct[self.orb_idx_full[ioF]:self.orb_idx_full[ioF+1]] = fct.reshape((-1)) #print("debug full_fct=", full_fct) sol = scipy.sparse.linalg.lsqr(self.Cmat.T, full_fct,atol=1e-20,btol=1e-20) #print("debug sol=", sol) np.savetxt(solinf[0]+'_loaded_sol', sol[0]) if sol[3] > 1E-4: print("WARNING large error %f loading potential to symmetrized FCT"%(sol[3])) return np.array([sol[0]]) def get_pair_info(self, sol_fct, ord=2, tol=1.E-20): """ extract pair interactions for phonon calculations. :param ord: usually 2 :param sol_fct: solution vector :return: """ natom = self.prim.num_sites ops = self.prim.spacegroup pairijk = [] pairTyp = [] pairFCM = [] dim=3 for iO, orb in enumerate(self.orbits): npt = orb.cluster.order if npt != ord: continue val = sol_fct[self.orb_idx_full[iO]:self.orb_idx_full[iO+1]] if (abs(val)<=tol).all(): continue for ic, clus in enumerate(orb.clusters): valTrans = fct_trans_c(npt, 3, ops[orb.clusters_ig[ic]].rot, np.arange(npt, dtype=int)).dot(val) fctTrans = valTrans.reshape([dim]*npt) pairijk.append(clus._ijkls_np[0,:3] - clus._ijkls_np[1,:3]) pairTyp.append(clus._ijkls_np[:,3]) pairFCM.append(fctTrans) if len(pairijk)>0: return (np.array(pairijk), pairTyp, pairFCM) else: return (np.zeros((1,3),dtype=int), np.zeros((1,2),dtype=int), np.zeros((1,3,3))) @staticmethod def fct2str(npt, fct, tol=1.E-12): """ Note fct is a 1-D array, NOT tensor """ outs = ["%s %.15f"%(matrix2text([IntegerDigits(i, 3, npt)+1]), fct[i]) for i in range(3**npt) if abs(fct[i]) > tol] return "\n".join([str(len(outs))] + outs) @staticmethod def get_hessian_dipole(s): """ :param s: structure with epsilon_inf and born_charge """ from f_phonon import f_phonon if s.intensive_properties['epsilon_inf'] is None: return np.zeros((3*s.num_sites,3*s.num_sites)) return f_phonon.get_fcm_dipole(s.lattice.matrix.T, s.lattice.inv_matrix, 1E-18, s.cart_coords.T, np.array(s.site_properties['born_charge']).transpose([1,2,0]), s.intensive_properties['epsilon_inf'].T, [0,0,0]).real def get_hessian_dipole_corrected(self, s): """ s: supercell """ from f_phonon import f_phonon fcm_dp = self.get_hessian_dipole(s) if self._dpcor is None: return fcm_dp f_phonon.init(s.lattice._matrix, s.atomic_masses, s.frac_coords, *self.translate_pairinfo_to_supercell(s, *self._dpcor)) fcm_cor= f_phonon.get_dm([[0.,0.,0.]],3*s.num_sites)[:,:,0].real #print(fcm_cor.shape, fcm_dp.shape, fcm_cor[0,0], fcm_dp[0,0], s.atomic_masses.__class__, s.atomic_masses) #for iA in range(s.num_sites): # for ix in range(3): # for jA in range(s.num_sites): # for jx in range(3): # fcm_cor[iA*3+ix,jA*3+jx]*=np.sqrt(s.atomic_masses[iA]*s.atomic_masses[jA]) #np.savetxt('fcm_dp', fcm_dp) #np.savetxt('fcm_cor', fcm_cor) mass= np.sqrt(np.array(s.atomic_masses).repeat(3)) fcm_cor*= np.outer(mass, mass) #np.savetxt('fcm_corScaled', fcm_cor) #np.savetxt('fcm_all', fcm_dp+fcm_cor) return fcm_dp+ fcm_cor def translate_pairinfo_to_supercell(self, sc, ijk_prim, typ_prim, fcm_prim): return LDModel.pairinfo_to_supercell(self.prim, sc, ijk_prim, typ_prim, fcm_prim) @staticmethod def pairinfo_to_supercell(prim, sc, ijk_prim, typ_prim, fcm_prim): nfcm_prim = len(ijk_prim) nfcm = nfcm_prim*sc.n_cell ijk=np.zeros((nfcm, 3),dtype=int) typ=np.zeros((nfcm, 2),dtype=int) fcm=np.zeros((nfcm, 3,3)) for i in range(nfcm_prim): clus= Cluster.from_ijkl([np.append(ijk_prim[i],typ_prim[i][0]), [0,0,0,typ_prim[i][1]]], prim) tc= BasicLatticeModel.translate_cluster_to_supercell(sc, clus) newijk= np.round(np.dot(clus.frac_coords[0]-clus.frac_coords[1], sc.inv_sc_mat)-(sc.frac_coords[tc[0][0]]-sc.frac_coords[tc[0][1]])).astype(int) # print(i, "cluster", clus, tc, newijk) for j in range(sc.n_cell): fcm[i*sc.n_cell+j] = fcm_prim[i] typ[i*sc.n_cell+j] = tc[j] ijk[i*sc.n_cell+j] = newijk return ijk, typ, fcm def get_dpcor(self, bondlen, errtol=1e-7): from f_phonon import f_phonon offd=[[0,0,1],[1,2,2]] offdflatUp = [1,2,5] offdflatDn = [3,6,7] diagflat= [0,4,8] fcm_dp = self.get_hessian_dipole(self.prim) np.savetxt("prim_dp", fcm_dp) non_symm = [fcm_dp[3*i:3*i+3, 3*i:3*i+3]-fcm_dp[3*i:3*i+3, 3*i:3*i+3].T for i in range(self.prim.num_sites)] pts=self.l_point_cls() npt=len(pts) bvec = np.array([non_symm[pt][offd[0],offd[1]] for pt in pts]).reshape((-1)) if np.linalg.norm(bvec)/np.sqrt(len(bvec)) <1E-13: print("++ no ASR violation in long-range force constant matrix") return None print('************** corrections to long-range force constant matrix *****************') # create an LD model using nearest neighbor only ldNN = init_ld_model(self.prim, {'model_type':'LD', 'max_order':2, 'cluster_diameter':str(bondlen), 'proper_diameter':str(bondlen),'cluster_filter':'lambda cls: True'}, {}, 2, 2, 0, False) #print(ldNN) C1mats = ldNN.isotropy_derivative_constraint()[1+2*npt:] if not C1mats: raise ValueError('ERROR: to get corrections properly, please increase [model]dpcor_bond to approximately the cutoff distance of first neighbor shell') C1 = spmat(scipy.sparse.vstack(C1mats))[:,1+12*npt:] nvar= C1.shape[0] Bmats= ldNN.translational_invariance()[1:] Bmats = [i[:,1+12*npt:] for i in Bmats] B1 = spmat(scipy.sparse.vstack(Bmats)) Acorrection = spmat(np.zeros((len(bvec), nvar))) for i, pt in enumerate(pts): Acorrection[3*i:3*i+3] = (Bmats[i][offdflatUp]-Bmats[i][offdflatDn]).dot(C1.T) Acorrection=spmat(Acorrection) # from cssolve.bregman_func import bregman_func # solution = bregman_func(Acorrection, bvec, method=1, mu=1E-5, lbd=3,maxIter=2000, tol=1E-6) # print(get_errors(bvec, Acorrection.dot(solution))) # print(solution) dpcor_sol_f='dpcor_sol.dat' if False and os.path.isfile(dpcor_sol_f): print('++ Loading dpcor from %s'%(dpcor_sol_f)) solution = np.loadtxt(dpcor_sol_f) else: solution = scipy.sparse.linalg.lsqr(Acorrection, bvec)[0] np.savetxt(dpcor_sol_f, solution) # solution = np.linalg.lstsq(Acorrection[:-3].todense(), bvec[:-3])[0] rmse = RMS(bvec - Acorrection.dot(solution)) if rmse > errtol: raise ValueError('dpcor correction FAILED rmse= %5g. Check symmetry or increase dpcor_bond'%(rmse)) # np.savetxt('Adpcor.out', Acorrection.todense()) # np.savetxt('bdpcor.out', bvec) #print('correction FCM=',solution) print('************** corrections done (rmse= %5g) *****************'%(rmse)) # send correction SR FCM to f_phonon full_sol_proper= np.array(C1.T.dot(solution)) #print(C1.T.dot(solution)) #print(np.zeros(1+3*len(pts)), "\nonsite", -B1.dot(full_sol_proper), "\npair", full_sol_proper) # print('debug onsite dpcor', B1.dot(full_sol_proper).reshape((-1,3,3))) full_sol = np.hstack((np.zeros(1+3*len(pts)), -B1.dot(full_sol_proper), full_sol_proper)) # print('debug trans inv', ldNN.translational_invariance()) # print('debug checking trans inv', scipy.sparse.vstack(ldNN.translational_invariance()).dot(full_sol)) #print('DEBUG CORR_pair_info',ldNN.get_pair_info(full_sol)) dpcor_pair = ldNN.get_pair_info(full_sol,2,1E-30) # f_phonon.init_dpcor(*dpcor_pair) self._dpcor = dpcor_pair return dpcor_pair def init_ld_model(prim, setting, setting_ldff, clus_step, symC_step, ldff_step, dpcor=True, pdfout=None): """ model initialization :param prim: :param setting: :param ldff_setting: :param clus_step: :param symC_step: :param ldff_step: :return: LD model and its associated LDFF model """ from scipy.io import mmread, mmwrite if clus_step <= 0: exit(0) assert setting['model_type'] == 'LD', ValueError("This script is intended for lattice dynamics only") maxorder = int(setting['max_order']) scale = prim.lattice._scale if str2bool(setting.get('fractional_distance','False')) else 1 irange = (np.hstack(([0.1, 0.1], str2arr(setting['cluster_diameter'])[:maxorder-1]))*scale).tolist() prange_str = setting.get('proper_diameter', '') # use cluster_diameter if proper_diameter not specified if not prange_str: prange_str = setting['cluster_diameter'] prange = (np.hstack(([0.1, 0.1], str2arr(prange_str)[:maxorder-1]))*scale).tolist() irange = dict(zip(range(maxorder+1), irange)) prange = dict(zip(range(maxorder+1), prange)) clus_sel = eval(setting['cluster_filter']) dipole_force = str2bool(setting.get('dipole_force', 'True')) symm_residual_force = str2bool(setting.get('symm_residual_force', 'True')) spec = {'maxorder':maxorder, 'prange':prange, 'filter':clus_sel, 'dipole_force':dipole_force, 'symm_residual_force':symm_residual_force} spec.update({'irange':irange}) if clus_step == 1: model = LDModel.from_file(prim, setting['cluster_in'], **spec) elif clus_step in [2, 3]: model = LDModel.generate_clusters(prim, **spec) # model.cleanup() if clus_step == 3: model.save_clusters(setting['cluster_out']) else: print("ERROR: Unknown clus_step: ", clus_step) exit(-1) print("+ Obtained %d proper clusters" %(len(model.clusters)), model.tally()) model.generate_improper() # model.cleanup() model.get_orbit_isotropy() model.prepare_index_full() # if we only need a NN model if not dpcor: return model if model.dipole_force and model._dpcor is None: model.get_dpcor(setting.getfloat('dpcor_bond', 2.8),setting.getfloat('dpcor_errtol', 1e-7)) print(model) #model.save_clusters('cluster_all') ######## independent parameters if symC_step <= 0: exit(0) elif symC_step == 1: model.Cmat = mmread(setting['symC_in']) # ld.process_fct_order(pdfout) elif symC_step in [2, 3]: model.symmetrize() if symC_step == 3: mmwrite(setting['symC_out'], model.Cmat) else: print("ERROR: Unknown symC_step: ", symC_step) exit(-1) model.prepare_index() print("+ LD symmetrization done. After dim/before=", model.Cmat.shape) ######## Force field on lattice if len(setting_ldff) <= 0: model.ldff = None return model entries = [k for k, v in setting_ldff.items()] if (ldff_step <= 0) or ('orbit_indices' not in entries): model.ldff = None elif ldff_step == 2: l234 = list(map(int, setting_ldff['num_basis'].split())) assert len(l234) >= 1 xpts = list(map(float, setting_ldff['interpolation_pts'].split())) assert len(xpts) >= 3 if 'polaron_force' in entries: ldfftype= PolaronFF model.ldff = PolaronFF(model, str2arr(setting_ldff['orbit_indices'], int).tolist(), xpts=np.arange(*xpts[:3]), lmax2=l234[0], bas2 = eval(setting_ldff['basis_2']), nradial=int(setting_ldff['nradial']), dimer_indices=str2arr(setting_ldff['dimer_indices'],int).tolist(), chgFunc=eval(setting_ldff['chgfunc']), dchgFunc=eval(setting_ldff['dchgfunc'])) else: ldfftype= LDFFmodel model.ldff = LDFFmodel(model, str2arr(setting_ldff['orbit_indices'], int).tolist(), xpts=np.arange(*xpts[:3]), lmax2=l234[0], cut12=str2arr(setting_ldff.get('cut12','-0.7 0.7'),float,(-1,2)), m12=str2arr(setting_ldff.get('m12','12 6'),int,(-1,2)), bas2 = eval(setting_ldff['basis_2'])) print("+ %s initialized %d parameters" %(ldfftype.__name__, model.ldff.ncorr)) else: print("ERROR: Unknown ldff_step: ", ldff_step) exit(-1) return model class LDFFmodel(): """ LD force field (only depends on interatomic distances) cut12: cutoffs for extrapolation. if r-r0<cut1 or >cut2, extrapolate to c0+c1/r**m m12: m1, m2 for cut1, cut2 respectively """ def __init__(self, ld, orb_idx, lmax2=-1, bas2=[], lmax3=-1, bas3=[], xpts=np.array([]), cut12=np.array([[-0.7,0.7]]), m12=np.array([[12,6]])): """ :param ld the LD model :param orb_idx indices of the selected orbits used in LDFF :param bas2 Either 1) a list of basis functions, each takes 1 parameter, dr=r-r0 2) a function b[l, dr] where l=0..lmax-1 :param lmax2 is ignored if bas2 is a list :param xpts a list of sampling points for dr, e.g. -1, -0.9, ..., 1 """ self.ld = ld self.orb_idx = orb_idx self.lmax2 = len(bas2) if isinstance(bas2, list) else lmax2 self.bas2 = bas2 self.lmax3 = len(bas3) if isinstance(bas3, list) else lmax3 self.bas3 = bas3 self.xpts = xpts self.cut12=cut12 self.m12=m12 n_xpts = len(xpts) ncorr_list = [] multi_list = [] ffidx_list = [] npt_list = [] ncorr = 0 for i in orb_idx: orb = ld.orbits[i] npt = orb.cluster.order assert npt == 2, TypeError("Pair interactions for LDFF only") assert orb.cluster.factorial <= 1, TypeError("LDFF cannot accept improper orbit %d"%(i)) # nvar = npt*(npt-1)/2 ffidx = self.symmetrize_idx(npt) multi = np.array([x.shape[0] for x in ffidx]) nc = len(ffidx) ncorr += nc ncorr_list.append(nc) multi_list.append(multi) ffidx_list.append(ffidx) npt_list.append(npt) self.ncorr_list = np.array(ncorr_list) self.ffidx_list = ffidx_list self.multi_list = np.array(multi_list) self.npt_list = npt_list self.ncorr = ncorr y2 = np.zeros((self.lmax2, n_xpts)) for l in range(self.lmax2): for ix, x in enumerate(xpts): y2[l, ix] = self.eval_bas(bas2, l, x) np.savetxt('ldff_bas.txt', np.vstack((xpts,y2)).T) self.y2 = y2 init_ldff_basis(2, self.lmax2, xpts, y2) @staticmethod def eval_bas(bas, l, x): return bas[l](x) if isinstance(bas, list) else bas(l, x) @staticmethod def eval_val(bas, ppval, x): return np.dot(ppval, [LDFFmodel.eval_bas(bas, l, x) for l in range(len(ppval))]) def symmetrize_idx(self, npt): if npt == 2: return np.arange(self.lmax2)[:,None,None] elif npt == 3: print("3-body LDFF symmetrization TO BE IMPLEMENTED") return [[list(i)] for i in product(range(self.lmax3), range(self.lmax3), range(self.lmax3))] def tostr(self, sol, io, tol=1.E-12): """ :param sol: the whole LDFF coefficients :param io: :return: """ if io not in self.orb_idx: return "0\n0" iff = self.orb_idx.index(io) npt = self.ld.orbits[io].cluster.order lm = self.lmax2 if npt == 2 else self.lmax3 ppval= sol[iff*lm:(iff+1)*lm] ppord= list(range(1,lm+1)) outs = [] # if (abs(ppval)> tol).any(): # ppval=Transpose[{ppord, ppval}]; # ppval=Select[ppval, (Abs[#[[2]]]>0)&]; npp = 0 for i in range(lm): if abs(ppval[i])>tol: npp += 1 outs.append("%d %.12f" % (ppord[i], ppval[i])) outs.append("1\n"+ " ".join(map(str,self.extrapolations(iff, ppval)))) return ("%d\n" % (npp)) + "\n".join(outs) def extrapolations(self, iff, ppval, dx=1E-4): exff = list(range(8)) xfrac = self.ld.orbits[self.orb_idx[iff]].cluster.frac_coords r0 = np.linalg.norm(self.ld.prim.lattice.get_cartesian_coords(xfrac[1]-xfrac[0])) for i in range(2): exff[i*3] = self.cut12[0,i] if len(self.cut12)<=1 else self.cut12[iff,i] xa = exff[i*3] xb = xa+dx ya = self.eval_val(self.bas2, ppval, xa) yb = self.eval_val(self.bas2, ppval, xb) m= self.m12[0,i] if len(self.m12)<=1 else self.m12[iff,i] r=r0+xa exff[i*3+2] = -(yb-ya)/dx*(r**(m+1))/m exff[i*3+1] = ya - exff[i*3+2]/(r**m) exff[6+i] = m return exff def calc_correlation(self, dx, clusALL, ncell): """ :param dx: :param clusALL: all clusters in the supercell :return: """ len_orb = np.array([len(self.ld.orbits[i].clusters) for i in self.orb_idx]) len_orb_sums = [len_orb[:i].sum() for i in range(len(self.orb_idx))] clus_id = [len_orb_sums[i] + j for i, ii in enumerate(self.orb_idx) for j in range(len(self.ld.orbits[ii].clusters)) for _ in range(ncell)] return spmat(ldff_get_corr( np.array([self.ld.orbits[i].cluster.order for i in self.orb_idx], dtype=np.int32), np.array([self.lmax2, self.lmax3, 0], dtype=np.int32), np.array(self.ncorr_list, dtype=np.int32), np.array([ii for i in self.multi_list for ii in i], dtype=np.int32), np.array([iiii for i in self.ffidx_list for ii in i for iii in ii for iiii in iii], dtype=np.int32), np.array([pad_right(clus.coords,[4,3]) for i in self.orb_idx for clus in self.ld.orbits[i].clusters]), np.array(dx), np.array([self.orb_idx.index(clus[1]) for clus in clusALL], dtype=np.int32), np.array(clus_id, dtype=np.int32), np.array([pad_right(np.array(clus[0]), 4) for clus in clusALL], dtype=np.int32))) def plot_pairPES(self, sols): fname= 'ldff_PES.txt' header= 'col1=du' col=2 mat=[self.xpts] for isol, sol0 in enumerate(sols): offset=0 sol = sol0[-self.ncorr:] for i,npt in enumerate(self.npt_list): if npt==2: mat.append(np.dot(sol[offset:offset+self.lmax2], self.y2)) header+=" %d=sol_%d_clus_%d"%(col, isol+1, self.orb_idx[i]+1) col+=1 offset+= self.ncorr_list[i] np.savetxt(fname, np.array(mat).T, header=header) print(" LDFF: pair PES exported to %s"%(fname))
[ "numpy.abs", "numpy.sum", "f_phonon.f_phonon.get_dm", "numpy.ones", "scipy.sparse.lil_matrix", "os.path.isfile", "numpy.arange", "numpy.linalg.norm", "numpy.mean", "numpy.exp", "numpy.full", "os.path.abspath", "numpy.zeros_like", "os.path.dirname", "numpy.savetxt", "os.path.exists", "numpy.transpose", "f_util.f_util.select_triplet", "_c_util.init_ldff_basis", "numpy.append", "numpy.loadtxt", "io.StringIO", "numpy.min", "_c_util.fct_trans_c", "numpy.dot", "numpy.vstack", "numpy.outer", "scipy.io.mmwrite", "numpy.zeros", "scipy.io.mmread", "numpy.array", "logging.getLogger", "_c_util.get_nullspace" ]
[((778, 805), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (795, 805), False, 'import logging\n'), ((6201, 6223), 'numpy.array', 'np.array', (['self.fct_ord'], {}), '(self.fct_ord)\n', (6209, 6223), True, 'import numpy as np\n'), ((11246, 11266), 'numpy.zeros', 'np.zeros', (['self.ncorr'], {}), '(self.ncorr)\n', (11254, 11266), True, 'import numpy as np\n'), ((11718, 11833), 'numpy.min', 'np.min', (['[orb.cluster.diameter for orb in self.orbits if orb.cluster.order == 2 and \n orb.cluster.order_uniq == 2]'], {}), '([orb.cluster.diameter for orb in self.orbits if orb.cluster.order ==\n 2 and orb.cluster.order_uniq == 2])\n', (11724, 11833), True, 'import numpy as np\n'), ((11848, 12032), 'numpy.array', 'np.array', (['[(self.orbits[idx].cluster.diameter - pair_r0 if self.orbits[idx].cluster.\n order == 2 and self.orbits[idx].cluster.order_uniq == 2 else 0) for idx in\n self.fct_orbidx]'], {}), '([(self.orbits[idx].cluster.diameter - pair_r0 if self.orbits[idx].\n cluster.order == 2 and self.orbits[idx].cluster.order_uniq == 2 else 0) for\n idx in self.fct_orbidx])\n', (11856, 12032), True, 'import numpy as np\n'), ((12582, 12607), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (12590, 12607), True, 'import numpy as np\n'), ((14431, 14455), 'numpy.zeros', 'np.zeros', (['(totNF, ncorr)'], {}), '((totNF, ncorr))\n', (14439, 14455), True, 'import numpy as np\n'), ((14472, 14492), 'numpy.zeros', 'np.zeros', (['(totNF, 3)'], {}), '((totNF, 3))\n', (14480, 14492), True, 'import numpy as np\n'), ((22642, 22702), 'numpy.zeros', 'np.zeros', (['(scinfo.num_sites, scinfo.num_sites)'], {'dtype': 'np.int'}), '((scinfo.num_sites, scinfo.num_sites), dtype=np.int)\n', (22650, 22702), True, 'import numpy as np\n'), ((24543, 24652), 'numpy.savetxt', 'np.savetxt', (['"""fct_norm_vs_diameter.txt"""', 'fc_norm'], {'header': '"""col1=diameter col2=norm col3=npt col4=npt_uniq"""'}), "('fct_norm_vs_diameter.txt', fc_norm, header=\n 'col1=diameter col2=norm col3=npt col4=npt_uniq')\n", (24553, 24652), True, 'import numpy as np\n'), ((25950, 25963), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (25961, 25963), False, 'import io, re\n'), ((27717, 27739), 'numpy.savetxt', 'np.savetxt', (['"""sol"""', 'sol'], {}), "('sol', sol)\n", (27727, 27739), True, 'import numpy as np\n'), ((27974, 28004), 'numpy.savetxt', 'np.savetxt', (['"""sol_fct"""', 'sol_fct'], {}), "('sol_fct', sol_fct)\n", (27984, 28004), True, 'import numpy as np\n'), ((28257, 28270), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (28268, 28270), False, 'import io, re\n'), ((28283, 28296), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (28294, 28296), False, 'import io, re\n'), ((29040, 29089), 'f_util.f_util.select_triplet', 'f_util.select_triplet', (['poscar', 'rmat', 'cutoff', 'nlat'], {}), '(poscar, rmat, cutoff, nlat)\n', (29061, 29089), False, 'from f_util import f_util\n'), ((32232, 32260), 'numpy.savetxt', 'np.savetxt', (['"""fctsym"""', 'fctsym'], {}), "('fctsym', fctsym)\n", (32242, 32260), True, 'import numpy as np\n'), ((38346, 38366), 'numpy.outer', 'np.outer', (['mass', 'mass'], {}), '(mass, mass)\n', (38354, 38366), True, 'import numpy as np\n'), ((38835, 38865), 'numpy.zeros', 'np.zeros', (['(nfcm, 3)'], {'dtype': 'int'}), '((nfcm, 3), dtype=int)\n', (38843, 38865), True, 'import numpy as np\n'), ((38877, 38907), 'numpy.zeros', 'np.zeros', (['(nfcm, 2)'], {'dtype': 'int'}), '((nfcm, 2), dtype=int)\n', (38885, 38907), True, 'import numpy as np\n'), ((38919, 38941), 'numpy.zeros', 'np.zeros', (['(nfcm, 3, 3)'], {}), '((nfcm, 3, 3))\n', (38927, 38941), True, 'import numpy as np\n'), ((39833, 39862), 'numpy.savetxt', 'np.savetxt', (['"""prim_dp"""', 'fcm_dp'], {}), "('prim_dp', fcm_dp)\n", (39843, 39862), True, 'import numpy as np\n'), ((41350, 41368), 'scipy.sparse.lil_matrix', 'spmat', (['Acorrection'], {}), '(Acorrection)\n', (41355, 41368), True, 'from scipy.sparse import lil_matrix as spmat\n'), ((48145, 48157), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (48153, 48157), True, 'import numpy as np\n'), ((48165, 48188), 'numpy.array', 'np.array', (['[[-0.7, 0.7]]'], {}), '([[-0.7, 0.7]])\n', (48173, 48188), True, 'import numpy as np\n'), ((48193, 48212), 'numpy.array', 'np.array', (['[[12, 6]]'], {}), '([[12, 6]])\n', (48201, 48212), True, 'import numpy as np\n'), ((49698, 49718), 'numpy.array', 'np.array', (['ncorr_list'], {}), '(ncorr_list)\n', (49706, 49718), True, 'import numpy as np\n'), ((49782, 49802), 'numpy.array', 'np.array', (['multi_list'], {}), '(multi_list)\n', (49790, 49802), True, 'import numpy as np\n'), ((49876, 49906), 'numpy.zeros', 'np.zeros', (['(self.lmax2, n_xpts)'], {}), '((self.lmax2, n_xpts))\n', (49884, 49906), True, 'import numpy as np\n'), ((50127, 50167), '_c_util.init_ldff_basis', 'init_ldff_basis', (['(2)', 'self.lmax2', 'xpts', 'y2'], {}), '(2, self.lmax2, xpts, y2)\n', (50142, 50167), False, 'from _c_util import fct_trans_c, ld_get_correlation, get_nullspace, init_ldff_basis, ldff_get_corr\n'), ((4555, 4566), 'scipy.sparse.lil_matrix', 'spmat', (['Bmat'], {}), '(Bmat)\n', (4560, 4566), True, 'from scipy.sparse import lil_matrix as spmat\n'), ((9910, 9922), 'numpy.arange', 'np.arange', (['(0)'], {}), '(0)\n', (9919, 9922), True, 'import numpy as np\n'), ((10028, 10047), 'numpy.ones', 'np.ones', (['ldff.ncorr'], {}), '(ldff.ncorr)\n', (10035, 10047), True, 'import numpy as np\n'), ((10894, 10906), 'numpy.arange', 'np.arange', (['(0)'], {}), '(0)\n', (10903, 10906), True, 'import numpy as np\n'), ((11012, 11031), 'numpy.ones', 'np.ones', (['ldff.ncorr'], {}), '(ldff.ncorr)\n', (11019, 11031), True, 'import numpy as np\n'), ((19609, 19621), 'scipy.sparse.lil_matrix', 'spmat', (['Alist'], {}), '(Alist)\n', (19614, 19621), True, 'from scipy.sparse import lil_matrix as spmat\n'), ((22573, 22627), 'numpy.zeros', 'np.zeros', (['(scinfo.num_sites * 3, scinfo.num_sites * 3)'], {}), '((scinfo.num_sites * 3, scinfo.num_sites * 3))\n', (22581, 22627), True, 'import numpy as np\n'), ((32806, 32836), 'numpy.loadtxt', 'np.loadtxt', (['solinf[0]'], {'ndmin': '(2)'}), '(solinf[0], ndmin=2)\n', (32816, 32836), True, 'import numpy as np\n'), ((33497, 33520), 'numpy.zeros', 'np.zeros', (['self.nfct_tot'], {}), '(self.nfct_tot)\n', (33505, 33520), True, 'import numpy as np\n'), ((35084, 35129), 'numpy.savetxt', 'np.savetxt', (["(solinf[0] + '_loaded_sol')", 'sol[0]'], {}), "(solinf[0] + '_loaded_sol', sol[0])\n", (35094, 35129), True, 'import numpy as np\n'), ((35271, 35289), 'numpy.array', 'np.array', (['[sol[0]]'], {}), '([sol[0]])\n', (35279, 35289), True, 'import numpy as np\n'), ((37077, 37121), 'numpy.zeros', 'np.zeros', (['(3 * s.num_sites, 3 * s.num_sites)'], {}), '((3 * s.num_sites, 3 * s.num_sites))\n', (37085, 37121), True, 'import numpy as np\n'), ((41668, 41695), 'os.path.isfile', 'os.path.isfile', (['dpcor_sol_f'], {}), '(dpcor_sol_f)\n', (41682, 41695), False, 'import os\n'), ((41780, 41803), 'numpy.loadtxt', 'np.loadtxt', (['dpcor_sol_f'], {}), '(dpcor_sol_f)\n', (41790, 41803), True, 'import numpy as np\n'), ((41900, 41933), 'numpy.savetxt', 'np.savetxt', (['dpcor_sol_f', 'solution'], {}), '(dpcor_sol_f, solution)\n', (41910, 41933), True, 'import numpy as np\n'), ((45718, 45744), 'scipy.io.mmread', 'mmread', (["setting['symC_in']"], {}), "(setting['symC_in'])\n", (45724, 45744), False, 'from scipy.io import mmread, mmwrite\n'), ((49441, 49478), 'numpy.array', 'np.array', (['[x.shape[0] for x in ffidx]'], {}), '([x.shape[0] for x in ffidx])\n', (49449, 49478), True, 'import numpy as np\n'), ((5818, 5878), 'numpy.full', 'np.full', (['(3 ** o.cluster.order)', 'o.cluster.order'], {'dtype': 'np.int'}), '(3 ** o.cluster.order, o.cluster.order, dtype=np.int)\n', (5825, 5878), True, 'import numpy as np\n'), ((6264, 6307), 'numpy.full', 'np.full', (['(3 ** o.cluster.order)', 'i'], {'dtype': 'int'}), '(3 ** o.cluster.order, i, dtype=int)\n', (6271, 6307), True, 'import numpy as np\n'), ((19450, 19490), 'numpy.full', 'np.full', (['thisNF', 'weight'], {'dtype': 'np.double'}), '(thisNF, weight, dtype=np.double)\n', (19457, 19490), True, 'import numpy as np\n'), ((19952, 19964), 'numpy.array', 'np.array', (['dx'], {}), '(dx)\n', (19960, 19964), True, 'import numpy as np\n'), ((20120, 20175), 'numpy.array', 'np.array', (['[clus[1] for clus in clusALL]'], {'dtype': 'np.int32'}), '([clus[1] for clus in clusALL], dtype=np.int32)\n', (20128, 20175), True, 'import numpy as np\n'), ((20211, 20266), 'numpy.array', 'np.array', (['[clus[2] for clus in clusALL]'], {'dtype': 'np.int32'}), '([clus[2] for clus in clusALL], dtype=np.int32)\n', (20219, 20266), True, 'import numpy as np\n'), ((20302, 20370), 'numpy.array', 'np.array', (['[orb.cluster.order for orb in self.orbits]'], {'dtype': 'np.int32'}), '([orb.cluster.order for orb in self.orbits], dtype=np.int32)\n', (20310, 20370), True, 'import numpy as np\n'), ((20406, 20462), 'numpy.array', 'np.array', (['[orb.cluster.factorial for orb in self.orbits]'], {}), '([orb.cluster.factorial for orb in self.orbits])\n', (20414, 20462), True, 'import numpy as np\n'), ((20498, 20551), 'numpy.array', 'np.array', (['[op.rot_inv for op in self.prim.spacegroup]'], {}), '([op.rot_inv for op in self.prim.spacegroup])\n', (20506, 20551), True, 'import numpy as np\n'), ((28674, 28685), 'numpy.array', 'np.array', (['R'], {}), '(R)\n', (28682, 28685), True, 'import numpy as np\n'), ((28781, 28795), 'numpy.array', 'np.array', (['apos'], {}), '(apos)\n', (28789, 28795), True, 'import numpy as np\n'), ((32966, 32984), 'numpy.zeros_like', 'np.zeros_like', (['sol'], {}), '(sol)\n', (32979, 32984), True, 'import numpy as np\n'), ((34466, 34486), 'numpy.zeros', 'np.zeros', (['((3,) * ord)'], {}), '((3,) * ord)\n', (34474, 34486), True, 'import numpy as np\n'), ((36380, 36397), 'numpy.array', 'np.array', (['pairijk'], {}), '(pairijk)\n', (36388, 36397), True, 'import numpy as np\n'), ((36451, 36478), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {'dtype': 'int'}), '((1, 3), dtype=int)\n', (36459, 36478), True, 'import numpy as np\n'), ((36478, 36505), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {'dtype': 'int'}), '((1, 2), dtype=int)\n', (36486, 36505), True, 'import numpy as np\n'), ((36505, 36524), 'numpy.zeros', 'np.zeros', (['(1, 3, 3)'], {}), '((1, 3, 3))\n', (36513, 36524), True, 'import numpy as np\n'), ((37759, 37810), 'f_phonon.f_phonon.get_dm', 'f_phonon.get_dm', (['[[0.0, 0.0, 0.0]]', '(3 * s.num_sites)'], {}), '([[0.0, 0.0, 0.0]], 3 * s.num_sites)\n', (37774, 37810), False, 'from f_phonon import f_phonon\n'), ((40047, 40103), 'numpy.array', 'np.array', (['[non_symm[pt][offd[0], offd[1]] for pt in pts]'], {}), '([non_symm[pt][offd[0], offd[1]] for pt in pts])\n', (40055, 40103), True, 'import numpy as np\n'), ((40128, 40148), 'numpy.linalg.norm', 'np.linalg.norm', (['bvec'], {}), '(bvec)\n', (40142, 40148), True, 'import numpy as np\n'), ((50074, 50095), 'numpy.vstack', 'np.vstack', (['(xpts, y2)'], {}), '((xpts, y2))\n', (50083, 50095), True, 'import numpy as np\n'), ((50502, 50523), 'numpy.arange', 'np.arange', (['self.lmax2'], {}), '(self.lmax2)\n', (50511, 50523), True, 'import numpy as np\n'), ((52897, 52983), 'numpy.array', 'np.array', (['[self.ld.orbits[i].cluster.order for i in self.orb_idx]'], {'dtype': 'np.int32'}), '([self.ld.orbits[i].cluster.order for i in self.orb_idx], dtype=np.\n int32)\n', (52905, 52983), True, 'import numpy as np\n'), ((52992, 53045), 'numpy.array', 'np.array', (['[self.lmax2, self.lmax3, 0]'], {'dtype': 'np.int32'}), '([self.lmax2, self.lmax3, 0], dtype=np.int32)\n', (53000, 53045), True, 'import numpy as np\n'), ((53059, 53100), 'numpy.array', 'np.array', (['self.ncorr_list'], {'dtype': 'np.int32'}), '(self.ncorr_list, dtype=np.int32)\n', (53067, 53100), True, 'import numpy as np\n'), ((53114, 53181), 'numpy.array', 'np.array', (['[ii for i in self.multi_list for ii in i]'], {'dtype': 'np.int32'}), '([ii for i in self.multi_list for ii in i], dtype=np.int32)\n', (53122, 53181), True, 'import numpy as np\n'), ((53195, 53298), 'numpy.array', 'np.array', (['[iiii for i in self.ffidx_list for ii in i for iii in ii for iiii in iii]'], {'dtype': 'np.int32'}), '([iiii for i in self.ffidx_list for ii in i for iii in ii for iiii in\n iii], dtype=np.int32)\n', (53203, 53298), True, 'import numpy as np\n'), ((53423, 53435), 'numpy.array', 'np.array', (['dx'], {}), '(dx)\n', (53431, 53435), True, 'import numpy as np\n'), ((53538, 53571), 'numpy.array', 'np.array', (['clus_id'], {'dtype': 'np.int32'}), '(clus_id, dtype=np.int32)\n', (53546, 53571), True, 'import numpy as np\n'), ((54247, 54260), 'numpy.array', 'np.array', (['mat'], {}), '(mat)\n', (54255, 54260), True, 'import numpy as np\n'), ((3140, 3160), 'numpy.array', 'np.array', (['clus.ijkls'], {}), '(clus.ijkls)\n', (3148, 3160), True, 'import numpy as np\n'), ((3487, 3520), 'scipy.sparse.lil_matrix', 'spmat', (['(dimTensor, self.nfct_tot)'], {}), '((dimTensor, self.nfct_tot))\n', (3492, 3520), True, 'from scipy.sparse import lil_matrix as spmat\n'), ((15190, 15209), 'numpy.mean', 'np.mean', (['f0'], {'axis': '(0)'}), '(f0, axis=0)\n', (15197, 15209), True, 'import numpy as np\n'), ((15390, 15413), 'os.path.isfile', 'os.path.isfile', (['fcmfile'], {}), '(fcmfile)\n', (15404, 15413), False, 'import os\n'), ((15518, 15537), 'numpy.loadtxt', 'np.loadtxt', (['fcmfile'], {}), '(fcmfile)\n', (15528, 15537), True, 'import numpy as np\n'), ((15711, 15738), 'numpy.savetxt', 'np.savetxt', (['fcmfile', 'fcm_dp'], {}), '(fcmfile, fcm_dp)\n', (15721, 15738), True, 'import numpy as np\n'), ((16888, 16926), 'os.path.isfile', 'os.path.isfile', (["(rundir + '/energy.txt')"], {}), "(rundir + '/energy.txt')\n", (16902, 16926), False, 'import os\n'), ((22963, 22982), 'numpy.linalg.norm', 'np.linalg.norm', (['val'], {}), '(val)\n', (22977, 22982), True, 'import numpy as np\n'), ((26236, 26247), 'numpy.abs', 'np.abs', (['val'], {}), '(val)\n', (26242, 26247), True, 'import numpy as np\n'), ((38291, 38316), 'numpy.array', 'np.array', (['s.atomic_masses'], {}), '(s.atomic_masses)\n', (38299, 38316), True, 'import numpy as np\n'), ((39013, 39051), 'numpy.append', 'np.append', (['ijk_prim[i]', 'typ_prim[i][0]'], {}), '(ijk_prim[i], typ_prim[i][0])\n', (39022, 39051), True, 'import numpy as np\n'), ((45879, 45919), 'scipy.io.mmwrite', 'mmwrite', (["setting['symC_out']", 'model.Cmat'], {}), "(setting['symC_out'], model.Cmat)\n", (45886, 45919), False, 'from scipy.io import mmread, mmwrite\n'), ((14960, 14982), 'os.path.dirname', 'os.path.dirname', (['sc[0]'], {}), '(sc[0])\n', (14975, 14982), False, 'import os\n'), ((15093, 15115), 'os.path.dirname', 'os.path.dirname', (['sc[0]'], {}), '(sc[0])\n', (15108, 15115), False, 'import os\n'), ((15327, 15349), 'os.path.abspath', 'os.path.abspath', (['sc[0]'], {}), '(sc[0])\n', (15342, 15349), False, 'import os\n'), ((17155, 17166), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (17163, 17166), True, 'import numpy as np\n'), ((17376, 17420), 'numpy.savetxt', 'np.savetxt', (["(rundir + '/energy.txt_dp')", 'en_dp'], {}), "(rundir + '/energy.txt_dp', en_dp)\n", (17386, 17420), True, 'import numpy as np\n'), ((17623, 17660), 'os.path.exists', 'os.path.exists', (["(rundir + '/force.txt')"], {}), "(rundir + '/force.txt')\n", (17637, 17660), False, 'import os\n'), ((37261, 37303), 'numpy.array', 'np.array', (["s.site_properties['born_charge']"], {}), "(s.site_properties['born_charge'])\n", (37269, 37303), True, 'import numpy as np\n'), ((46774, 46794), 'numpy.arange', 'np.arange', (['*xpts[:3]'], {}), '(*xpts[:3])\n', (46783, 46794), True, 'import numpy as np\n'), ((47356, 47376), 'numpy.arange', 'np.arange', (['*xpts[:3]'], {}), '(*xpts[:3])\n', (47365, 47376), True, 'import numpy as np\n'), ((54016, 54064), 'numpy.dot', 'np.dot', (['sol[offset:offset + self.lmax2]', 'self.y2'], {}), '(sol[offset:offset + self.lmax2], self.y2)\n', (54022, 54064), True, 'import numpy as np\n'), ((4228, 4269), '_c_util.fct_trans_c', 'fct_trans_c', (['npt_ex', '(3)', 'rot_mats[igF]', 'pi'], {}), '(npt_ex, 3, rot_mats[igF], pi)\n', (4239, 4269), False, 'from _c_util import fct_trans_c, ld_get_correlation, get_nullspace, init_ldff_basis, ldff_get_corr\n'), ((5121, 5140), '_c_util.get_nullspace', 'get_nullspace', (['Bmat'], {}), '(Bmat)\n', (5134, 5140), False, 'from _c_util import fct_trans_c, ld_get_correlation, get_nullspace, init_ldff_basis, ldff_get_corr\n'), ((16347, 16363), 'numpy.linalg.norm', 'norm', (['dx'], {'axis': '(1)'}), '(dx, axis=1)\n', (16351, 16363), False, 'from numpy.linalg import norm\n'), ((16959, 17002), 'numpy.loadtxt', 'np.loadtxt', (["(rundir + '/energy.txt')"], {'ndmin': '(1)'}), "(rundir + '/energy.txt', ndmin=1)\n", (16969, 17002), True, 'import numpy as np\n'), ((17693, 17726), 'numpy.loadtxt', 'np.loadtxt', (["(rundir + '/force.txt')"], {}), "(rundir + '/force.txt')\n", (17703, 17726), True, 'import numpy as np\n'), ((20020, 20037), 'numpy.array', 'np.array', (['clus[0]'], {}), '(clus[0])\n', (20028, 20037), True, 'import numpy as np\n'), ((25363, 25383), 'numpy.array', 'np.array', (['[[i1, i2]]'], {}), '([[i1, i2]])\n', (25371, 25383), True, 'import numpy as np\n'), ((26902, 26930), 'numpy.transpose', 'np.transpose', (['valTrans', 'iper'], {}), '(valTrans, iper)\n', (26914, 26930), True, 'import numpy as np\n'), ((34723, 34777), '_c_util.fct_trans_c', 'fct_trans_c', (['ord', '(3)', 'self.prim.spacegroup[igF].rot', 'pi'], {}), '(ord, 3, self.prim.spacegroup[igF].rot, pi)\n', (34734, 34777), False, 'from _c_util import fct_trans_c, ld_get_correlation, get_nullspace, init_ldff_basis, ldff_get_corr\n'), ((36073, 36098), 'numpy.arange', 'np.arange', (['npt'], {'dtype': 'int'}), '(npt, dtype=int)\n', (36082, 36098), True, 'import numpy as np\n'), ((39187, 39251), 'numpy.dot', 'np.dot', (['(clus.frac_coords[0] - clus.frac_coords[1])', 'sc.inv_sc_mat'], {}), '(clus.frac_coords[0] - clus.frac_coords[1], sc.inv_sc_mat)\n', (39193, 39251), True, 'import numpy as np\n'), ((53605, 53622), 'numpy.array', 'np.array', (['clus[0]'], {}), '(clus[0])\n', (53613, 53622), True, 'import numpy as np\n'), ((9020, 9053), 'scipy.sparse.lil_matrix', 'spmat', (['(nfree, ltothis - dimThis)'], {}), '((nfree, ltothis - dimThis))\n', (9025, 9053), True, 'from scipy.sparse import lil_matrix as spmat\n'), ((9153, 9187), 'scipy.sparse.lil_matrix', 'spmat', (['(nfree, nfct_tot - ltothis)'], {}), '((nfree, nfct_tot - ltothis))\n', (9158, 9187), True, 'from scipy.sparse import lil_matrix as spmat\n'), ((17776, 17798), 'numpy.sum', 'np.sum', (['values'], {'axis': '(0)'}), '(values, axis=0)\n', (17782, 17798), True, 'import numpy as np\n'), ((18026, 18049), 'numpy.mean', 'np.mean', (['values'], {'axis': '(0)'}), '(values, axis=0)\n', (18033, 18049), True, 'import numpy as np\n'), ((23449, 23474), 'numpy.arange', 'np.arange', (['npt'], {'dtype': 'int'}), '(npt, dtype=int)\n', (23458, 23474), True, 'import numpy as np\n'), ((26492, 26517), 'numpy.arange', 'np.arange', (['npt'], {'dtype': 'int'}), '(npt, dtype=int)\n', (26501, 26517), True, 'import numpy as np\n'), ((31548, 31568), 'numpy.array', 'np.array', (['clus[1][0]'], {}), '(clus[1][0])\n', (31556, 31568), True, 'import numpy as np\n'), ((31571, 31591), 'numpy.array', 'np.array', (['clus[0][0]'], {}), '(clus[0][0])\n', (31579, 31591), True, 'import numpy as np\n'), ((31592, 31612), 'numpy.array', 'np.array', (['clus[2][0]'], {}), '(clus[2][0])\n', (31600, 31612), True, 'import numpy as np\n'), ((31615, 31635), 'numpy.array', 'np.array', (['clus[0][0]'], {}), '(clus[0][0])\n', (31623, 31635), True, 'import numpy as np\n'), ((12196, 12236), 'numpy.exp', 'np.exp', (['(-pair_diameter * lr_pair_penalty)'], {}), '(-pair_diameter * lr_pair_penalty)\n', (12202, 12236), True, 'import numpy as np\n'), ((32031, 32077), 'numpy.array', 'np.array', (['[clus[0][1], clus[1][1], clus[2][1]]'], {}), '([clus[0][1], clus[1][1], clus[2][1]])\n', (32039, 32077), True, 'import numpy as np\n')]
from typing import List, Optional, Union from aiocqhttp import Event as CQEvent from aiocqhttp.bus import EventBus from . import NoneBot from .log import logger from .exceptions import CQHttpError from .session import BaseSession from .typing import NoticeHandler_T, RequestHandler_T class EventHandler: """INTERNAL API""" __slots__ = ('events', 'func') def __init__(self, events: List[str], func: Union[NoticeHandler_T, RequestHandler_T]): self.events = events self.func = func class EventManager: """INTERNAL API""" bus = EventBus() @classmethod def add_event_handler(cls, handler: EventHandler) -> None: for event in handler.events: cls.bus.subscribe(event, handler.func) @classmethod def remove_event_handler(cls, handler: EventHandler) -> None: for event in handler.events: cls.bus.unsubscribe(event, handler.func) @classmethod def switch_event_handler_global(cls, handler: EventHandler, state: Optional[bool] = None) -> None: for event in handler.events: if handler.func in cls.bus._subscribers[event] and not state: cls.bus.unsubscribe(event, handler.func) elif handler.func not in cls.bus._subscribers[ event] and state is not False: cls.bus.subscribe(event, handler.func) class NoticeSession(BaseSession): __slots__ = () def __init__(self, bot: NoneBot, event: CQEvent): super().__init__(bot, event) class RequestSession(BaseSession): __slots__ = () def __init__(self, bot: NoneBot, event: CQEvent): super().__init__(bot, event) async def approve(self, remark: str = '') -> None: """ Approve the request. :param remark: remark of friend (only works in friend request) """ try: await self.bot.call_action(action='.handle_quick_operation_async', self_id=self.event.self_id, context=self.event, operation={ 'approve': True, 'remark': remark }) except CQHttpError: pass async def reject(self, reason: str = '') -> None: """ Reject the request. :param reason: reason to reject (only works in group request) """ try: await self.bot.call_action(action='.handle_quick_operation_async', self_id=self.event.self_id, context=self.event, operation={ 'approve': False, 'reason': reason }) except CQHttpError: pass async def handle_notice_or_request(bot: NoneBot, event: CQEvent) -> None: """INTERNAL API""" if event.type == 'notice': _log_notice(event) session = NoticeSession(bot, event) else: # must be 'request' _log_request(event) session = RequestSession(bot, event) ev_name = event.name logger.debug(f'Emitting event: {ev_name}') try: await EventManager.bus.emit(ev_name, session) except Exception as e: logger.error(f'An exception occurred while handling event {ev_name}:') logger.exception(e) def _log_notice(event: CQEvent) -> None: logger.info(f'Notice: {event}') def _log_request(event: CQEvent) -> None: logger.info(f'Request: {event}') __all__ = [ 'NoticeSession', 'RequestSession', ]
[ "aiocqhttp.bus.EventBus" ]
[((568, 578), 'aiocqhttp.bus.EventBus', 'EventBus', ([], {}), '()\n', (576, 578), False, 'from aiocqhttp.bus import EventBus\n')]
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import json import multiprocessing import os import subprocess import time import redis from utils import get_node_details, set_node_details INSPECT_CONTAINER_COMMAND = "docker inspect {container}" GET_CONTAINERS_COMMAND = "docker container ls -a --format='{{.Names}}'" UPTIME_COMMAND = "uptime" FREE_COMMAND = "free" NVIDIA_SMI_COMMAND = "nvidia-smi --query-gpu=utilization.gpu --format=csv,noheader,nounits" class NodeAgent: def __init__(self, cluster_name: str, node_name: str, master_hostname: str, redis_port: int): self._cluster_name = cluster_name self._node_name = node_name self._master_hostname = master_hostname self._redis_port = redis_port def start(self) -> None: container_tracking_agent = ContainerTrackingAgent( cluster_name=self._cluster_name, node_name=self._node_name, master_hostname=self._master_hostname, redis_port=self._redis_port) container_tracking_agent.start() class ContainerTrackingAgent(multiprocessing.Process): def __init__(self, cluster_name: str, node_name: str, master_hostname: str, redis_port: int, check_interval: int = 10): super().__init__() self._cluster_name = cluster_name self._node_name = node_name self._redis = redis.Redis( host=master_hostname, port=redis_port, charset="utf-8", decode_responses=True ) self._check_interval = check_interval def run(self) -> None: while True: self._update_node_details() time.sleep(self._check_interval) def _update_node_details(self) -> None: # Get node details node_details = get_node_details( redis=self._redis, cluster_name=self._cluster_name, node_name=self._node_name ) # Main update self._update_container_details(node_details=node_details) self._update_system_resources_details(node_details=node_details) # Other update node_details['state'] = 'Running' node_details['check_time'] = self._redis.time()[0] # Save node details set_node_details( redis=self._redis, cluster_name=self._cluster_name, node_name=self._node_name, node_details=node_details ) def _update_container_details(self, node_details: dict) -> None: # Get containers completed_process = subprocess.run(GET_CONTAINERS_COMMAND, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8') return_str = completed_process.stdout.strip('\n') containers = [] if return_str == '' else return_str.split('\n') # Iterate containers node_details['containers'] = {} container_details = node_details['containers'] occupied_cpu_sum = 0 occupied_memory_sum = 0 occupied_gpu_sum = 0 for container in containers: # Get inspect detail completed_process = subprocess.run(INSPECT_CONTAINER_COMMAND.format(container=container), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8') return_str = completed_process.stdout inspect_details = json.loads(return_str)[0] # Init container details container_details[container] = {} # Extract occupied resource occupied_resource = ContainerTrackingAgent._extract_occupied_resources( inspect_details=inspect_details) occupied_cpu_sum += occupied_resource[0] occupied_memory_sum += occupied_resource[1] occupied_gpu_sum += occupied_resource[2] # Extract container state container_state = ContainerTrackingAgent._extract_state( inspect_details=inspect_details) container_details[container]['state'] = container_state # Update resources node_details['resources']['target_free_cpu'] = node_details['resources']['cpu'] - occupied_cpu_sum node_details['resources']['target_free_memory'] = node_details['resources']['memory'] - occupied_memory_sum node_details['resources']['target_free_gpu'] = node_details['resources']['gpu'] - occupied_gpu_sum def _update_system_resources_details(self, node_details: dict): # Get actual cpu completed_process = subprocess.run(UPTIME_COMMAND, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8') uptime_str = completed_process.stdout split_uptime = uptime_str.split() node_details['resources']['actual_free_cpu'] = \ node_details['resources']['cpu'] - float(split_uptime[-3].replace(',', '')) # update actual memory completed_process = subprocess.run(FREE_COMMAND, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8') free_str = completed_process.stdout split_free = free_str.split() node_details['resources']['actual_free_memory'] = float(split_free[12]) / 1024 # Update actual cpu node_details['resources']['actual_free_gpu'] = node_details['resources']['target_free_gpu'] # Get nvidia-smi result try: completed_process = subprocess.run(NVIDIA_SMI_COMMAND, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8') nvidia_smi_str = completed_process.stdout node_details['resources']['actual_gpu_usage'] = f"{float(nvidia_smi_str)}%" except: pass @staticmethod def _extract_state(inspect_details: dict) -> dict: return inspect_details['State'] @staticmethod def _extract_occupied_resources(inspect_details: dict) -> tuple: if inspect_details['State']['Running'] is True: occupied_cpu = float(inspect_details['Config']['Labels'].get('required_cpu', 0)) occupied_memory = float(inspect_details['Config']['Labels'].get('required_memory', '0m').replace("m", "")) occupied_gpu = int(inspect_details['Config']['Labels'].get('required_gpu', 0)) return occupied_cpu, occupied_memory, occupied_gpu else: return 0, 0, 0 if __name__ == "__main__": # FIXME: what about get it from argparse with open(os.path.expanduser(f"~/.maro-local/agents/node_agent.config"), 'r') as fr: node_agent_config = json.load(fr) node_agent = NodeAgent( cluster_name=node_agent_config['cluster_name'], node_name=node_agent_config['node_name'], master_hostname=node_agent_config['master_hostname'], redis_port=node_agent_config['redis_port'] ) node_agent.start()
[ "redis.Redis", "subprocess.run", "json.load", "json.loads", "utils.get_node_details", "time.sleep", "os.path.expanduser", "utils.set_node_details" ]
[((1388, 1482), 'redis.Redis', 'redis.Redis', ([], {'host': 'master_hostname', 'port': 'redis_port', 'charset': '"""utf-8"""', 'decode_responses': '(True)'}), "(host=master_hostname, port=redis_port, charset='utf-8',\n decode_responses=True)\n", (1399, 1482), False, 'import redis\n'), ((1800, 1899), 'utils.get_node_details', 'get_node_details', ([], {'redis': 'self._redis', 'cluster_name': 'self._cluster_name', 'node_name': 'self._node_name'}), '(redis=self._redis, cluster_name=self._cluster_name,\n node_name=self._node_name)\n', (1816, 1899), False, 'from utils import get_node_details, set_node_details\n'), ((2266, 2392), 'utils.set_node_details', 'set_node_details', ([], {'redis': 'self._redis', 'cluster_name': 'self._cluster_name', 'node_name': 'self._node_name', 'node_details': 'node_details'}), '(redis=self._redis, cluster_name=self._cluster_name,\n node_name=self._node_name, node_details=node_details)\n', (2282, 2392), False, 'from utils import get_node_details, set_node_details\n'), ((2570, 2689), 'subprocess.run', 'subprocess.run', (['GET_CONTAINERS_COMMAND'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'encoding': '"""utf8"""'}), "(GET_CONTAINERS_COMMAND, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, encoding='utf8')\n", (2584, 2689), False, 'import subprocess\n'), ((4648, 4760), 'subprocess.run', 'subprocess.run', (['UPTIME_COMMAND'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'encoding': '"""utf8"""'}), "(UPTIME_COMMAND, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.PIPE, encoding='utf8')\n", (4662, 4760), False, 'import subprocess\n'), ((5092, 5202), 'subprocess.run', 'subprocess.run', (['FREE_COMMAND'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'encoding': '"""utf8"""'}), "(FREE_COMMAND, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.PIPE, encoding='utf8')\n", (5106, 5202), False, 'import subprocess\n'), ((6853, 6866), 'json.load', 'json.load', (['fr'], {}), '(fr)\n', (6862, 6866), False, 'import json\n'), ((1672, 1704), 'time.sleep', 'time.sleep', (['self._check_interval'], {}), '(self._check_interval)\n', (1682, 1704), False, 'import time\n'), ((5616, 5731), 'subprocess.run', 'subprocess.run', (['NVIDIA_SMI_COMMAND'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'encoding': '"""utf8"""'}), "(NVIDIA_SMI_COMMAND, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, encoding='utf8')\n", (5630, 5731), False, 'import subprocess\n'), ((6750, 6811), 'os.path.expanduser', 'os.path.expanduser', (['f"""~/.maro-local/agents/node_agent.config"""'], {}), "(f'~/.maro-local/agents/node_agent.config')\n", (6768, 6811), False, 'import os\n'), ((3497, 3519), 'json.loads', 'json.loads', (['return_str'], {}), '(return_str)\n', (3507, 3519), False, 'import json\n')]
# -*- coding: UTF-8 -*- import flask import os import sys import socket PACKAGE_PARENT = '../..' SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__)))) sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT))) print(sys.path) from src.log.utils import * def get_host_ip(): """ 查询本机ip地址 :return: ip """ try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(('8.8.8.8', 80)) ip = s.getsockname()[0] finally: s.close() return ip def server_path(path): if not os.path.exists(path): print('%s does not exist' % path) return # throw Exception('path not exists') print(path) os.system('pwd') ip = get_host_ip() app = flask.Flask(__name__, static_folder=path) print(ip) app.run(host=ip, port=5000) if __name__ == "__main__": server_path(sys.argv[1])
[ "os.path.expanduser", "os.getcwd", "socket.socket", "flask.Flask", "os.system", "os.path.exists", "os.path.join" ]
[((758, 774), 'os.system', 'os.system', (['"""pwd"""'], {}), "('pwd')\n", (767, 774), False, 'import os\n'), ((808, 849), 'flask.Flask', 'flask.Flask', (['__name__'], {'static_folder': 'path'}), '(__name__, static_folder=path)\n', (819, 849), False, 'import flask\n'), ((237, 277), 'os.path.join', 'os.path.join', (['SCRIPT_DIR', 'PACKAGE_PARENT'], {}), '(SCRIPT_DIR, PACKAGE_PARENT)\n', (249, 277), False, 'import os\n'), ((412, 460), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (425, 460), False, 'import socket\n'), ((614, 634), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (628, 634), False, 'import os\n'), ((159, 170), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (168, 170), False, 'import os\n'), ((172, 200), 'os.path.expanduser', 'os.path.expanduser', (['__file__'], {}), '(__file__)\n', (190, 200), False, 'import os\n')]
# -*- coding: utf-8 -*- # Copyright 2020 Green Valley Belgium NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.7@@ from contextlib import closing import zlib from google.appengine.ext import db from mcfw.properties import azzert from mcfw.serialization import CustomProperty, s_long_list, ds_long_list try: import cStringIO as StringIO except ImportError: import StringIO class CompressedIntegerList(list): def __init__(self, value, model, prop_name): azzert(model and prop_name) self._model = model self._prop_name = prop_name self._paused_updates = False if isinstance(value, basestring): dsc = zlib.decompress(value) with closing(StringIO.StringIO(dsc)) as stream: super(CompressedIntegerList, self).__init__(ds_long_list(stream)) else: super(CompressedIntegerList, self).__init__(value) def ljust(self, delta, value, limit): if limit < 0: raise ValueError('Illegal limit: %s' % limit) self._paused_updates = True try: self +=delta * [value] while len(self) > limit: self.pop(0) finally: self._paused_updates = False setattr(self._model, self._prop_name, db.Blob(str(self))) def __str__(self): with closing(StringIO.StringIO()) as stream: s_long_list(stream, self) return zlib.compress(stream.getvalue()) # def __getattr__(self, name): # attr = object.__getattr__(self, name) # if hasattr(attr, '__call__') and name in ('__add__', '__delitem__', '__delslice__', '__iadd__', '__imul__', '__mul__', '__reduce__', '__reduce_ex__', '__rmul__', '__setitem__', '__setslice__', 'append', 'extend', 'insert', 'pop', 'remove', 'sort'): # def newfunc(*args, **kwargs): # result = attr(*args, **kwargs) # if not self._paused_updates: # setattr(self._model, self._prop_name, db.Blob(str(self))) # return result # return newfunc # else: # return attr for method in ('__add__', '__delitem__', '__delslice__', '__iadd__', '__imul__', '__mul__', '__reduce__', '__reduce_ex__', '__rmul__', '__setitem__', '__setslice__', 'append', 'extend', 'insert', 'pop', 'remove', 'sort'): def wrap(method_name): def f(self, *args, **kwargs): super_f = getattr(super(CompressedIntegerList, self), method_name) r = super_f(*args, **kwargs) if not self._paused_updates: setattr(self._model, self._prop_name, db.Blob(str(self))) return r return f setattr(CompressedIntegerList, method, wrap(method))
[ "mcfw.serialization.s_long_list", "mcfw.properties.azzert", "zlib.decompress", "StringIO.StringIO", "mcfw.serialization.ds_long_list" ]
[((1003, 1030), 'mcfw.properties.azzert', 'azzert', (['(model and prop_name)'], {}), '(model and prop_name)\n', (1009, 1030), False, 'from mcfw.properties import azzert\n'), ((1192, 1214), 'zlib.decompress', 'zlib.decompress', (['value'], {}), '(value)\n', (1207, 1214), False, 'import zlib\n'), ((1920, 1945), 'mcfw.serialization.s_long_list', 's_long_list', (['stream', 'self'], {}), '(stream, self)\n', (1931, 1945), False, 'from mcfw.serialization import CustomProperty, s_long_list, ds_long_list\n'), ((1876, 1895), 'StringIO.StringIO', 'StringIO.StringIO', ([], {}), '()\n', (1893, 1895), False, 'import StringIO\n'), ((1240, 1262), 'StringIO.StringIO', 'StringIO.StringIO', (['dsc'], {}), '(dsc)\n', (1257, 1262), False, 'import StringIO\n'), ((1335, 1355), 'mcfw.serialization.ds_long_list', 'ds_long_list', (['stream'], {}), '(stream)\n', (1347, 1355), False, 'from mcfw.serialization import CustomProperty, s_long_list, ds_long_list\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import itertools import os import os.path import sys import logging def cartesian_product(dicts): return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values())) def summary(configuration): kvs = sorted([(k, v) for k, v in configuration.items()], key=lambda e: e[0]) return '_'.join([('%s=%s' % (k, v)) for (k, v) in kvs]) def to_cmd(c): command = 'python3 ./bin/jack-train.py ' \ 'with config=conf/lp/transe.yaml ' \ 'learning_rate={} repr_dim={} num_negative={} batch_size={} ' \ 'save_dir=/tmp/distmult_{}_{}_{}_{}' \ ''.format(c['lr'], c['dim'], c['nn'], c['bs'], c['lr'], c['dim'], c['nn'], c['bs']) return command def to_logfile(c, path): outfile = "%s/legion_lp_distmult.%s.log" % (path, summary(c).replace("/", "_")) return outfile def main(_): hyperparameters_space_1 = dict( lr=[0.001, 0.005, 0.01], dim=[100, 150, 200, 250, 300, 350], nn=[1, 2, 4, 8, 16], bs=[32, 64, 128, 256] ) configurations = list(cartesian_product(hyperparameters_space_1)) path = '/home/ucacmin/Scratch/jack/scripts/experiments/logs/legion_lp_transe/' # Check that we are on the Legion cluster first if os.path.exists('/home/ucacmin/'): # If the folder that will contain logs does not exist, create it if not os.path.exists(path): os.makedirs(path) command_lines = set() for cfg in configurations: logfile = to_logfile(cfg, path) completed = False if os.path.isfile(logfile): with open(logfile, 'r', encoding='utf-8', errors='ignore') as f: content = f.read() completed = 'hits@10' in content if not completed: command_line = '{} > {} 2>&1'.format(to_cmd(cfg), logfile) command_lines |= {command_line} # Sort command lines and remove duplicates sorted_command_lines = sorted(command_lines) nb_jobs = len(sorted_command_lines) header = """#!/bin/bash -l #$ -cwd #$ -S /bin/bash #$ -o /dev/null #$ -e /dev/null #$ -t 1-{} #$ -l mem=8G #$ -l h_rt=12:00:00 cd /home/ucacmin/workspace/jack """.format(nb_jobs) print(header) for job_id, command_line in enumerate(sorted_command_lines, 1): print('test $SGE_TASK_ID -eq {} && {}'.format(job_id, command_line)) if __name__ == '__main__': logging.basicConfig(level=logging.INFO) main(sys.argv[1:])
[ "os.path.isfile", "os.path.exists", "os.makedirs", "logging.basicConfig" ]
[((1322, 1354), 'os.path.exists', 'os.path.exists', (['"""/home/ucacmin/"""'], {}), "('/home/ucacmin/')\n", (1336, 1354), False, 'import os\n'), ((2482, 2521), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (2501, 2521), False, 'import logging\n'), ((1632, 1655), 'os.path.isfile', 'os.path.isfile', (['logfile'], {}), '(logfile)\n', (1646, 1655), False, 'import os\n'), ((1444, 1464), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1458, 1464), False, 'import os\n'), ((1478, 1495), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1489, 1495), False, 'import os\n')]
from unittest.mock import MagicMock, patch from pydantic_kms_secrets.kms import decrypt, encrypt ENCRYPTED_BYTES = ( b"\x01\x02\x02\x00x@\xcau^\xa5\x9c\xdd\x1e\xed\xe0\x00w]A\xba-\x83\x89\xb2" b"N\x98<?\x10vkL\xd3\x87\x03:\xa6\x01X\x12\xc7Y\x1d\x17\xe2a\xef\xa0Pz\xef\x81" b"\xc2\x04\x00\x00\x00c0a\x06\t*\x86H\x86\xf7\r\x01\x07\x06\xa0T0R\x02" b"\x01\x000M\x06\t*\x86H\x86\xf7\r\x01\x07\x010\x1e\x06\t`\x86H\x01e" b"\x03\x04\x01.0\x11\x04\x0c\x13\x07d$Du\x10\xf1\xdb\x91\x932\x02\x01\x10\x80" b" \x0c\x81:G3b(\x01\xeb\xe1\xa6\x1c\x03a\xa7R\x84\xbcVT\xe4AG\x9fn\x1b|" b"\x143\x96\x11\xd0" ) ENCRYPTED_STRING = ( "AQICAHhAynVepZzdHu3gAHddQbotg4myTpg8PxB2a0zThwM6pgFYEsdZHRfiYe+gUHrvgc" "IEAAAAYzBhBgkqhkiG9w0BBwagVDBSAgEAME0GCSqGSIb3DQEHATAeBglghkgBZQMEAS4w" "EQQMEwdkJER1EPHbkZMyAgEQgCAMgTpHM2IoAevhphwDYadShLxWVORBR59uG3wUM5YR0A==" ) @patch("pydantic_kms_secrets.kms.boto3") def test_encrypt(boto3_mock): client_mock = MagicMock() client_mock.encrypt.return_value = { "CiphertextBlob": ENCRYPTED_BYTES, } boto3_mock.client.return_value = client_mock assert encrypt("key", "value") == ENCRYPTED_STRING client_mock.encrypt.assert_called_once_with( KeyId="key", Plaintext=b"value", ) @patch("pydantic_kms_secrets.kms.boto3") def test_decrypt(boto3_mock): client_mock = MagicMock() client_mock.decrypt.return_value = { "Plaintext": b"stuff", } boto3_mock.client.return_value = client_mock assert decrypt("key", ENCRYPTED_STRING) == "stuff" client_mock.decrypt.assert_called_once_with( KeyId="key", CiphertextBlob=ENCRYPTED_BYTES, )
[ "unittest.mock.patch", "pydantic_kms_secrets.kms.encrypt", "pydantic_kms_secrets.kms.decrypt", "unittest.mock.MagicMock" ]
[((882, 921), 'unittest.mock.patch', 'patch', (['"""pydantic_kms_secrets.kms.boto3"""'], {}), "('pydantic_kms_secrets.kms.boto3')\n", (887, 921), False, 'from unittest.mock import MagicMock, patch\n'), ((1277, 1316), 'unittest.mock.patch', 'patch', (['"""pydantic_kms_secrets.kms.boto3"""'], {}), "('pydantic_kms_secrets.kms.boto3')\n", (1282, 1316), False, 'from unittest.mock import MagicMock, patch\n'), ((970, 981), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (979, 981), False, 'from unittest.mock import MagicMock, patch\n'), ((1365, 1376), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1374, 1376), False, 'from unittest.mock import MagicMock, patch\n'), ((1133, 1156), 'pydantic_kms_secrets.kms.encrypt', 'encrypt', (['"""key"""', '"""value"""'], {}), "('key', 'value')\n", (1140, 1156), False, 'from pydantic_kms_secrets.kms import decrypt, encrypt\n'), ((1516, 1548), 'pydantic_kms_secrets.kms.decrypt', 'decrypt', (['"""key"""', 'ENCRYPTED_STRING'], {}), "('key', ENCRYPTED_STRING)\n", (1523, 1548), False, 'from pydantic_kms_secrets.kms import decrypt, encrypt\n')]
# Copyright (c) 2019-2020, <NAME> # License: MIT License from typing import ( TYPE_CHECKING, Union, Iterable, cast, Tuple, Sequence, Optional, ) from copy import deepcopy from ezdxf.lldxf.attributes import ( DXFAttr, DXFAttributes, DefSubclass, group_code_mapping, ) from ezdxf.lldxf.const import DXF12, SUBCLASS_MARKER from ezdxf.lldxf.types import DXFTag from ezdxf.lldxf.tags import Tags from ezdxf.entities.dxfentity import base_class, SubclassProcessor, DXFEntity from ezdxf.entities.layer import acdb_symbol_table_record from ezdxf.lldxf.validator import is_valid_table_name from ezdxf.tools.complex_ltype import lin_compiler from .factory import register_entity if TYPE_CHECKING: from ezdxf.eztypes import TagWriter, DXFNamespace, Drawing __all__ = ['Linetype', 'compile_line_pattern', 'CONTINUOUS_PATTERN'] acdb_linetype = DefSubclass('AcDbLinetypeTableRecord', { 'name': DXFAttr(2, validator=is_valid_table_name), 'description': DXFAttr(3, default=''), 'flags': DXFAttr(70, default=0), # 'length': DXFAttr(40), # 'items': DXFAttr(73), }) acdb_linetype_group_codes = group_code_mapping(acdb_linetype) CONTINUOUS_PATTERN = tuple() class LinetypePattern: def __init__(self, tags: Tags): """ For now just store tags """ self.tags = tags def __len__(self): return len(self.tags) def export_dxf(self, tagwriter: 'TagWriter'): if tagwriter.dxfversion <= DXF12: self.export_r12_dxf(tagwriter) else: tagwriter.write_tags(self.tags) def export_r12_dxf(self, tagwriter: 'TagWriter'): tags49 = Tags(tag for tag in self.tags if tag.code == 49) tagwriter.write_tag2(72, 65) tagwriter.write_tag2(73, len(tags49)) tagwriter.write_tag(self.tags.get_first_tag(40)) if len(tags49): tagwriter.write_tags(tags49) def is_complex_type(self): return self.tags.has_tag(340) def get_style_handle(self): return self.tags.get_first_value(340, '0') def set_style_handle(self, handle): return self.tags.update(DXFTag(340, handle)) def compile(self) -> Tuple[float, ...]: """ Returns the simplified dash-gap-dash... line pattern, a dash-length of 0 represents a point. """ # complex line types with text and shapes are not supported if self.is_complex_type(): return CONTINUOUS_PATTERN pattern_length = 0.0 elements = [] for tag in self.tags: if tag.code == 40: pattern_length = tag.value elif tag.code == 49: elements.append(tag.value) if len(elements) < 2: return CONTINUOUS_PATTERN return compile_line_pattern(pattern_length, elements) def _merge_dashes(elements: Sequence[float]) -> Iterable[float]: """ Merge multiple consecutive lines, gaps or points into a single element. """ def sign(v): if v < 0: return -1 elif v > 0: return +1 return 0 buffer = elements[0] prev_sign = sign(buffer) for e in elements[1:]: if sign(e) == prev_sign: buffer += e else: yield buffer buffer = e prev_sign = sign(e) yield buffer def compile_line_pattern( total_length: Optional[float], elements: Sequence[float]) -> Tuple[float, ...]: """ Returns the simplified dash-gap-dash... line pattern, a dash-length of 0 represents a point. """ elements = list(_merge_dashes(elements)) if total_length is None: pass elif len(elements) < 2 or total_length <= 0.0: return CONTINUOUS_PATTERN sum_elements = sum(abs(e) for e in elements) if total_length and total_length > sum_elements: # append a gap elements.append(sum_elements - total_length) if elements[0] < 0: # start with a gap e = elements.pop(0) if elements[-1] < 0: # extend last gap elements[-1] += e else: # add last gap elements.append(e) # returns dash-gap-point # possible: dash-point or point-dash - ignore this yet # never: dash-dash or gap-gap or point-point return tuple(abs(e) for e in elements) @register_entity class Linetype(DXFEntity): """ DXF LTYPE entity """ DXFTYPE = 'LTYPE' DXFATTRIBS = DXFAttributes(base_class, acdb_symbol_table_record, acdb_linetype) def __init__(self): """ Default constructor """ super().__init__() self.pattern_tags = LinetypePattern(Tags()) def _copy_data(self, entity: 'Linetype') -> None: """ Copy pattern_tags. """ entity.pattern_tags = deepcopy(self.pattern_tags) def load_dxf_attribs(self, processor: SubclassProcessor = None) -> 'DXFNamespace': dxf = super().load_dxf_attribs(processor) if processor: tags = processor.fast_load_dxfattribs( dxf, acdb_linetype_group_codes, 2, log=False) self.pattern_tags = LinetypePattern(tags) return dxf def preprocess_export(self, tagwriter: 'TagWriter'): if len(self.pattern_tags) == 0: return False # Do not export complex linetypes for DXF12 if tagwriter.dxfversion == DXF12: return not self.pattern_tags.is_complex_type() return True def export_entity(self, tagwriter: 'TagWriter') -> None: super().export_entity(tagwriter) # AcDbEntity export is done by parent class if tagwriter.dxfversion > DXF12: tagwriter.write_tag2(SUBCLASS_MARKER, acdb_symbol_table_record.name) tagwriter.write_tag2(SUBCLASS_MARKER, acdb_linetype.name) self.dxf.export_dxf_attribs(tagwriter, ['name', 'flags', 'description']) if self.pattern_tags: self.pattern_tags.export_dxf(tagwriter) def setup_pattern(self, pattern: Union[Iterable[float], str], length: float = 0) -> None: # The new() function gets no doc reference, therefore complex linetype # setup has to be done later. See also: LineTypeTable.new_entry() complex_line_type = True if isinstance(pattern, str) else False if complex_line_type: # a .lin like line type definition string tags = self._setup_complex_pattern(pattern, length) else: # pattern: [2.0, 1.25, -0.25, 0.25, -0.25] - 1. element is total # pattern length pattern elements: >0 line, <0 gap, =0 point tags = Tags([ DXFTag(72, 65), # letter 'A' DXFTag(73, len(pattern) - 1), DXFTag(40, float(pattern[0])), ]) for element in pattern[1:]: tags.append(DXFTag(49, float(element))) tags.append(DXFTag(74, 0)) self.pattern_tags = LinetypePattern(tags) def _setup_complex_pattern(self, pattern: str, length: float) -> Tags: tokens = lin_compiler(pattern) tags = Tags([ DXFTag(72, 65), # letter 'A' ]) tags2 = [DXFTag(73, 0), DXFTag(40, length)] # temp length of 0 count = 0 for token in tokens: if isinstance(token, DXFTag): if tags2[-1].code == 49: # useless 74 only after 49 :)) tags2.append(DXFTag(74, 0)) tags2.append(token) count += 1 else: # TEXT or SHAPE tags2.extend(cast( 'ComplexLineTypePart', token).complex_ltype_tags(self.doc)) tags2.append(DXFTag(74, 0)) # useless 74 at the end :)) tags2[0] = DXFTag(73, count) tags.extend(tags2) return tags
[ "copy.deepcopy", "typing.cast", "ezdxf.lldxf.attributes.DXFAttributes", "ezdxf.lldxf.tags.Tags", "ezdxf.lldxf.attributes.DXFAttr", "ezdxf.tools.complex_ltype.lin_compiler", "ezdxf.lldxf.types.DXFTag", "ezdxf.lldxf.attributes.group_code_mapping" ]
[((1110, 1143), 'ezdxf.lldxf.attributes.group_code_mapping', 'group_code_mapping', (['acdb_linetype'], {}), '(acdb_linetype)\n', (1128, 1143), False, 'from ezdxf.lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass, group_code_mapping\n'), ((4404, 4470), 'ezdxf.lldxf.attributes.DXFAttributes', 'DXFAttributes', (['base_class', 'acdb_symbol_table_record', 'acdb_linetype'], {}), '(base_class, acdb_symbol_table_record, acdb_linetype)\n', (4417, 4470), False, 'from ezdxf.lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass, group_code_mapping\n'), ((899, 940), 'ezdxf.lldxf.attributes.DXFAttr', 'DXFAttr', (['(2)'], {'validator': 'is_valid_table_name'}), '(2, validator=is_valid_table_name)\n', (906, 940), False, 'from ezdxf.lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass, group_code_mapping\n'), ((961, 983), 'ezdxf.lldxf.attributes.DXFAttr', 'DXFAttr', (['(3)'], {'default': '""""""'}), "(3, default='')\n", (968, 983), False, 'from ezdxf.lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass, group_code_mapping\n'), ((998, 1020), 'ezdxf.lldxf.attributes.DXFAttr', 'DXFAttr', (['(70)'], {'default': '(0)'}), '(70, default=0)\n', (1005, 1020), False, 'from ezdxf.lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass, group_code_mapping\n'), ((1619, 1667), 'ezdxf.lldxf.tags.Tags', 'Tags', (['(tag for tag in self.tags if tag.code == 49)'], {}), '(tag for tag in self.tags if tag.code == 49)\n', (1623, 1667), False, 'from ezdxf.lldxf.tags import Tags\n'), ((4762, 4789), 'copy.deepcopy', 'deepcopy', (['self.pattern_tags'], {}), '(self.pattern_tags)\n', (4770, 4789), False, 'from copy import deepcopy\n'), ((7072, 7093), 'ezdxf.tools.complex_ltype.lin_compiler', 'lin_compiler', (['pattern'], {}), '(pattern)\n', (7084, 7093), False, 'from ezdxf.tools.complex_ltype import lin_compiler\n'), ((7749, 7766), 'ezdxf.lldxf.types.DXFTag', 'DXFTag', (['(73)', 'count'], {}), '(73, count)\n', (7755, 7766), False, 'from ezdxf.lldxf.types import DXFTag\n'), ((2100, 2119), 'ezdxf.lldxf.types.DXFTag', 'DXFTag', (['(340)', 'handle'], {}), '(340, handle)\n', (2106, 2119), False, 'from ezdxf.lldxf.types import DXFTag\n'), ((4634, 4640), 'ezdxf.lldxf.tags.Tags', 'Tags', ([], {}), '()\n', (4638, 4640), False, 'from ezdxf.lldxf.tags import Tags\n'), ((7187, 7200), 'ezdxf.lldxf.types.DXFTag', 'DXFTag', (['(73)', '(0)'], {}), '(73, 0)\n', (7193, 7200), False, 'from ezdxf.lldxf.types import DXFTag\n'), ((7202, 7220), 'ezdxf.lldxf.types.DXFTag', 'DXFTag', (['(40)', 'length'], {}), '(40, length)\n', (7208, 7220), False, 'from ezdxf.lldxf.types import DXFTag\n'), ((7686, 7699), 'ezdxf.lldxf.types.DXFTag', 'DXFTag', (['(74)', '(0)'], {}), '(74, 0)\n', (7692, 7699), False, 'from ezdxf.lldxf.types import DXFTag\n'), ((7128, 7142), 'ezdxf.lldxf.types.DXFTag', 'DXFTag', (['(72)', '(65)'], {}), '(72, 65)\n', (7134, 7142), False, 'from ezdxf.lldxf.types import DXFTag\n'), ((6652, 6666), 'ezdxf.lldxf.types.DXFTag', 'DXFTag', (['(72)', '(65)'], {}), '(72, 65)\n', (6658, 6666), False, 'from ezdxf.lldxf.types import DXFTag\n'), ((6914, 6927), 'ezdxf.lldxf.types.DXFTag', 'DXFTag', (['(74)', '(0)'], {}), '(74, 0)\n', (6920, 6927), False, 'from ezdxf.lldxf.types import DXFTag\n'), ((7437, 7450), 'ezdxf.lldxf.types.DXFTag', 'DXFTag', (['(74)', '(0)'], {}), '(74, 0)\n', (7443, 7450), False, 'from ezdxf.lldxf.types import DXFTag\n'), ((7579, 7613), 'typing.cast', 'cast', (['"""ComplexLineTypePart"""', 'token'], {}), "('ComplexLineTypePart', token)\n", (7583, 7613), False, 'from typing import TYPE_CHECKING, Union, Iterable, cast, Tuple, Sequence, Optional\n')]
import glob import os import rawpy import numpy as np # Returns a tuble def getInputImagesList(): # Get all short exposure images res = glob.glob('./dataset/sony/short/0*.ARW') res.sort() return (res, [int(os.path.basename(res)[0:5]) for res in res]) def getGroundtruthImagesList(): # Get all short exposure images res = glob.glob('./dataset/sony/long/0*.ARW') res.sort() return (res, [int(os.path.basename(res)[0:5]) for res in res]) def getTestInputImagesList(): res = glob.glob('./dataset/sony/short/1*.ARW') res.sort() return(res, [int(os.path.basename(res)[0:5]) for test_fn in test_fns]) def getTestGroundtruthImagesList(): # Get all short exposure images res = glob.glob('./dataset/sony/long/1*.ARW') res.sort() return (res, [int(os.path.basename(res)[0:5]) for res in res]) # Use rawpy to get pictures def pack_raw(raw): # Pack Bayer image to 4 channels im = raw.raw_image_visible.astype(np.float32) # Subtract the black level # 16383 == 2^14 (data is 14 bits) # 512 is hardware specific to the camera im = np.maximum(im - 512, 0) / (16383 - 512) im = np.expand_dims(im, axis=2) img_shape = im.shape H = img_shape[0] W = img_shape[1] out = np.concatenate((im[0:H:2, 0:W:2, :], im[0:H:2, 1:W:2, :], im[1:H:2, 1:W:2, :], im[1:H:2, 0:W:2, :]), axis=2) return out def pack_raw_test(raw): #pack Bayer image to 4 channels im = np.maximum(im - 512,0)/ (16383 - 512) #subtract the black level im = np.expand_dims(im,axis=2) img_shape = im.shape H = img_shape[0] W = img_shape[1] out = np.concatenate((im[0:H:2,0:W:2,:], im[0:H:2,1:W:2,:], im[1:H:2,1:W:2,:], im[1:H:2,0:W:2,:]), axis=2) return out
[ "numpy.maximum", "os.path.basename", "numpy.expand_dims", "glob.glob", "numpy.concatenate" ]
[((146, 186), 'glob.glob', 'glob.glob', (['"""./dataset/sony/short/0*.ARW"""'], {}), "('./dataset/sony/short/0*.ARW')\n", (155, 186), False, 'import glob\n'), ((348, 387), 'glob.glob', 'glob.glob', (['"""./dataset/sony/long/0*.ARW"""'], {}), "('./dataset/sony/long/0*.ARW')\n", (357, 387), False, 'import glob\n'), ((511, 551), 'glob.glob', 'glob.glob', (['"""./dataset/sony/short/1*.ARW"""'], {}), "('./dataset/sony/short/1*.ARW')\n", (520, 551), False, 'import glob\n'), ((726, 765), 'glob.glob', 'glob.glob', (['"""./dataset/sony/long/1*.ARW"""'], {}), "('./dataset/sony/long/1*.ARW')\n", (735, 765), False, 'import glob\n'), ((1158, 1184), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(2)'}), '(im, axis=2)\n', (1172, 1184), True, 'import numpy as np\n'), ((1263, 1375), 'numpy.concatenate', 'np.concatenate', (['(im[0:H:2, 0:W:2, :], im[0:H:2, 1:W:2, :], im[1:H:2, 1:W:2, :], im[1:H:2, 0\n :W:2, :])'], {'axis': '(2)'}), '((im[0:H:2, 0:W:2, :], im[0:H:2, 1:W:2, :], im[1:H:2, 1:W:2,\n :], im[1:H:2, 0:W:2, :]), axis=2)\n', (1277, 1375), True, 'import numpy as np\n'), ((1609, 1635), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(2)'}), '(im, axis=2)\n', (1623, 1635), True, 'import numpy as np\n'), ((1714, 1826), 'numpy.concatenate', 'np.concatenate', (['(im[0:H:2, 0:W:2, :], im[0:H:2, 1:W:2, :], im[1:H:2, 1:W:2, :], im[1:H:2, 0\n :W:2, :])'], {'axis': '(2)'}), '((im[0:H:2, 0:W:2, :], im[0:H:2, 1:W:2, :], im[1:H:2, 1:W:2,\n :], im[1:H:2, 0:W:2, :]), axis=2)\n', (1728, 1826), True, 'import numpy as np\n'), ((1108, 1131), 'numpy.maximum', 'np.maximum', (['(im - 512)', '(0)'], {}), '(im - 512, 0)\n', (1118, 1131), True, 'import numpy as np\n'), ((1535, 1558), 'numpy.maximum', 'np.maximum', (['(im - 512)', '(0)'], {}), '(im - 512, 0)\n', (1545, 1558), True, 'import numpy as np\n'), ((224, 245), 'os.path.basename', 'os.path.basename', (['res'], {}), '(res)\n', (240, 245), False, 'import os\n'), ((425, 446), 'os.path.basename', 'os.path.basename', (['res'], {}), '(res)\n', (441, 446), False, 'import os\n'), ((589, 610), 'os.path.basename', 'os.path.basename', (['res'], {}), '(res)\n', (605, 610), False, 'import os\n'), ((803, 824), 'os.path.basename', 'os.path.basename', (['res'], {}), '(res)\n', (819, 824), False, 'import os\n')]
import regex import lib.logger as logging from lib.functions import wait_until from lib.game import ui from lib.game.battle_bot import ManualBattleBot from lib.game.missions.missions import Missions logger = logging.get_logger(__name__) class WorldBossInvasion(Missions): """Class for working with World Boss Invasion missions.""" class MissionFilter: """Class for working with mission types of World Boss Invasion.""" DEFAULT_ERROR = 3 # Number of errors in the form of inserted, deleted or substituted characters in regex def __init__(self, pattern, opposite_pattern, mission_filter, opposite_filter): """Class initialization. :param str pattern: regular expression pattern for mission's condition. :param str opposite_pattern: regular expression pattern for opposite mission's condition. :param ui.UIElement mission_filter: UI for main mission filter. :param ui.UIElement opposite_filter: UI for opposite mission filter. """ self.pattern = f"({pattern}){{e<={self.DEFAULT_ERROR}}}" self._regexp = regex.compile(self.pattern) self.opposite_pattern = f"({opposite_pattern}){{e<={self.DEFAULT_ERROR}}}" self._opposite_regexp = regex.compile(self.opposite_pattern) self.filter = mission_filter self.opposite_filter = opposite_filter def get_filter(self, text): if self._regexp.match(text): return self.filter if self._opposite_regexp.match(text): return self.opposite_filter class SuperHeroes(MissionFilter): def __init__(self): super().__init__(pattern="Clear the stage with more than N Super Heroes", opposite_pattern="Clear the stage with less than N Super Heroes", mission_filter=ui.INVASION_CHARACTER_FILTER_HERO, opposite_filter=ui.INVASION_CHARACTER_FILTER_VILLAIN) class SuperVillain(MissionFilter): def __init__(self): super().__init__(pattern="Clear the stage with more than N Super Villain", opposite_pattern="Clear the stage with less than N Super Villain", mission_filter=ui.INVASION_CHARACTER_FILTER_VILLAIN, opposite_filter=ui.INVASION_CHARACTER_FILTER_HERO) class BlastCharacters(MissionFilter): def __init__(self): super().__init__(pattern="Clear the stage with more than N Blast type Characters", opposite_pattern="Clear the stage with less than N Blast type Characters", mission_filter=ui.INVASION_CHARACTER_FILTER_BLAST, opposite_filter=ui.INVASION_CHARACTER_FILTER_ALL) class CombatCharacters(MissionFilter): def __init__(self): super().__init__(pattern="Clear the stage with more than N Combat type Characters", opposite_pattern="Clear the stage with less than N Combat type Characters", mission_filter=ui.INVASION_CHARACTER_FILTER_COMBAT, opposite_filter=ui.INVASION_CHARACTER_FILTER_ALL) class SpeedCharacters(MissionFilter): def __init__(self): super().__init__(pattern="Clear the stage with more than N Speed type Characters", opposite_pattern="Clear the stage with less than N Speed type Characters", mission_filter=ui.INVASION_CHARACTER_FILTER_SPEED, opposite_filter=ui.INVASION_CHARACTER_FILTER_ALL) class UniversalCharacters(MissionFilter): def __init__(self): super().__init__(pattern="Clear the stage with more than N Universal type Characters", opposite_pattern="Clear the stage with less than N Universal type Characters", mission_filter=ui.INVASION_CHARACTER_FILTER_UNIVERSAL, opposite_filter=ui.INVASION_CHARACTER_FILTER_ALL) class MaleCharacters(MissionFilter): def __init__(self): super().__init__(pattern="Clear the stage with more than N Male Characters", opposite_pattern="Clear the stage with less than N Male Characters", mission_filter=ui.INVASION_CHARACTER_FILTER_MALE, opposite_filter=ui.INVASION_CHARACTER_FILTER_FEMALE) class FemaleCharacters(MissionFilter): def __init__(self): super().__init__(pattern="Clear the stage with more than N Female Characters", opposite_pattern="Clear the stage with less than N Female Characters", mission_filter=ui.INVASION_CHARACTER_FILTER_FEMALE, opposite_filter=ui.INVASION_CHARACTER_FILTER_MALE) def __init__(self, game): """Class initialization. :param lib.game.game.Game game: instance of the game. """ super().__init__(game, mode_name='WORLD BOSS INVASION') self._chests = None self._max_chests = None self._boss_mission = None self.mission_filters = [self.SuperHeroes(), self.SuperVillain(), self.MaleCharacters(), self.FemaleCharacters(), self.CombatCharacters(), self.SpeedCharacters(), self.BlastCharacters(), self.UniversalCharacters()] @property def battle_over_conditions(self): def damage(): if self.emulator.is_ui_element_on_screen(ui.INVASION_END_BATTLE_DAMAGE): logger.info("Won battle, chest was acquired.") self._chests += 1 return True return False def failed(): return self.emulator.is_ui_element_on_screen(ui.INVASION_FAILED) return [damage, failed] def do_missions(self, times=None, ignore_coop_mission=False): """Does missions.""" self.start_missions(times=times, ignore_coop_mission=ignore_coop_mission) self.end_missions() def start_missions(self, times=None, ignore_coop_mission=False): """Starts World Boss Invasion.""" if self.open_world_boss_invasion(): if self.chests > 0: if not self.acquire_chests(): return if times: self._max_chests = times if self.chests < self.max_chests and self._find_boss_for_fight(): while self.chests < self.max_chests: logger.debug(f"{times} stages left to complete ({self.chests} out of {self.max_chests}.") if not self.press_start_button(ignore_coop_mission=ignore_coop_mission): return self._wait_for_players_and_start_fight() logger.info("No more stages.") def end_missions(self): """Ends missions.""" if not self.game.is_main_menu(): if self.emulator.is_image_on_screen(ui.HOME): self.emulator.click_button(ui.HOME) self.close_after_mission_notifications() self.game.close_ads() else: logger.error("Can't return to main menu, HOME button is missing.") def open_world_boss_invasion(self): """Opens World Boss Invasion missions. :return: is WBI missions open or not. :rtype: bool """ self.game.go_to_coop() if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.INVASION_LABEL): self.emulator.click_button(ui.INVASION_LABEL) if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.INVASION_MENU_LABEL): return wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.INVASION_MANAGE_CHESTS) return False def _get_chests_count(self): """Stores current chests and max chests amount.""" chests_text = self.emulator.get_screen_text(ui_element=ui.INVASION_STAGES) current_chest, max_chest = self.game.get_current_and_max_values_from_text(chests_text) self._chests = 5 if current_chest > 5 else current_chest self._max_chests = 5 if max_chest > 5 else max_chest logger.info(f"{self._chests} chests out of {self._max_chests} (from '{chests_text}' text).") @property def chests(self): """Get current amount of chests. :rtype: int """ if self._chests is None: self._get_chests_count() return self._chests @chests.setter def chests(self, value): """Update available chests value. :param int value: value to set. """ self._chests = value @property def max_chests(self): """Get max amount of chests. :rtype: int """ if self._max_chests is None: self._get_chests_count() return self._max_chests def acquire_chests(self): """Acquires all available chests.""" logger.debug("Starting to acquire all available chests.") self.emulator.click_button(ui.INVASION_MANAGE_CHESTS) if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.INVASION_CHESTS_MENU_LABEL): for chest_index in range(1, self.max_chests + 1): self._acquire_chest(chest_index) logger.debug("Going back to mission's lobby.") self.emulator.click_button(ui.MENU_BACK) if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.INVASION_MANAGE_CHESTS): self._get_chests_count() return True logger.error("Can't get back to mission's lobby.") def _acquire_chest(self, chest_index): """Acquires chest by chest index. :param int chest_index: chest index (from 1 to max chests + 1) :return: was chest acquired or not. :rtype: bool """ logger.debug(f"Trying to acquire chest #{chest_index}") chest_ui = ui.get_by_name(f'INVASION_CHEST_AVAILABLE_{chest_index}') if wait_until(self.emulator.is_ui_element_on_screen, timeout=1, ui_element=chest_ui): logger.debug(f"Chest {chest_index} is available. Trying to open.") self.emulator.click_button(chest_ui) if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.INVASION_SKIP_CHEST): while self.emulator.is_ui_element_on_screen(ui_element=ui.INVASION_SKIP_CHEST): logger.debug("Skipping chests items.") self.emulator.click_button(ui.INVASION_SKIP_CHEST, min_duration=0.5, max_duration=0.8) while not self.emulator.is_ui_element_on_screen(ui_element=ui.INVASION_CHESTS_MENU_LABEL): self.emulator.click_button(ui.INVASION_SKIP_CHEST, min_duration=0.5, max_duration=0.8) logger.debug("Chest acquired, going back to chest's menu.") return True logger.debug(f"Chest #{chest_index} isn't available.") return False def _find_boss_for_fight(self): """Finds available boss fight and enter it. :return: was fight found and entered or not. :rtype: bool """ weekly_boss_name = self.emulator.get_screen_text(ui_element=ui.INVASION_NAME) logger.debug(f"Weekly boss name: {weekly_boss_name}") for bosses in ['INVASION_TWILIGHT_BATTLE_', 'INVASION_BLACK_ORDER_BATTLE_']: for boss_index in range(1, 8): boss_ui = ui.get_by_name(f'{bosses}{boss_index}') boss_time = self.emulator.get_screen_text(ui_element=boss_ui) if boss_time: logger.debug(f"Found boss with UI: {boss_ui} with time {boss_time}, entering.") self.emulator.click_button(boss_ui) if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.INVASION_BOSS_FIGHT_ENTER): self._boss_mission = self.emulator.get_screen_text(ui.INVASION_BOSS_MISSION) logger.debug(f"Current boss mission: {self._boss_mission}") self.emulator.click_button(ui.INVASION_BOSS_FIGHT_ENTER) return True logger.error(f"Something went wrong with found boss {boss_ui}") if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.INVASION_BOSS_FIGHT_CLOSE): logger.warning(f"Closing {boss_ui}") self.emulator.click_button(ui.INVASION_BOSS_FIGHT_CLOSE) logger.error("Failed to found boss.") return False def _check_notifications_before_fight(self): """Checks fight notifications about any obstacles to start a fight. :return: can we start a fight or not. :rtype: bool """ waiting_for_other_players = self.emulator.is_ui_element_on_screen( ui_element=ui.WAITING_FOR_OTHER_PLAYERS) if not waiting_for_other_players: if self.emulator.is_ui_element_on_screen(ui_element=ui.NOT_ENOUGH_ENERGY): self.emulator.click_button(ui.NOT_ENOUGH_ENERGY) self._chests = self._max_chests if self.emulator.is_ui_element_on_screen(ui_element=ui.INVASION_NOT_ENOUGH_CHARACTERS): self.emulator.click_button(ui.INVASION_NOT_ENOUGH_CHARACTERS) self._chests = self._max_chests return False return True def press_start_button(self, start_button_ui=ui.INVASION_BOSS_FIGHT_START, ignore_coop_mission=False): """Presses start button of the mission. :return: was button clicked successfully or not. :rtype: bool """ logger.debug(f"Pressing START button with UI Element: {start_button_ui}.") if wait_until(self.emulator.is_ui_element_on_screen, ui_element=start_button_ui): self._deploy_characters(ignore_coop_mission=ignore_coop_mission) self.emulator.click_button(start_button_ui) if wait_until(self._check_notifications_before_fight, timeout=10): return True if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.INVASION_NO_CHEST_SLOTS): logger.warning("No slots for chests. Exiting.") self.emulator.click_button(ui.INVASION_NO_CHEST_SLOTS) return False if wait_until(self.emulator.is_ui_element_on_screen, timeout=2, ui_element=ui.DISCONNECT_NEW_OPPONENT): logger.debug("Found disconnect notification. Trying to start again.") self.emulator.click_button(ui.DISCONNECT_NEW_OPPONENT) return True logger.error(f"Unable to press {start_button_ui} button.") return False def _deploy_characters(self, ignore_coop_mission=False): """Deploys 3 characters to battle.""" no_main = self.emulator.is_image_on_screen(ui_element=ui.INVASION_NO_CHARACTER_MAIN) no_left = self.emulator.is_image_on_screen(ui_element=ui.INVASION_NO_CHARACTER_LEFT) no_right = self.emulator.is_image_on_screen(ui_element=ui.INVASION_NO_CHARACTER_RIGHT) if not ignore_coop_mission and (no_main or no_left or no_right): self._select_character_filter_by_mission() if no_main: self.emulator.click_button(ui.INVASION_CHARACTER_1) if no_left: self.emulator.click_button(ui.INVASION_CHARACTER_2) if no_right: self.emulator.click_button(ui.INVASION_CHARACTER_3) def _select_character_filter_by_mission(self): """Selects character filter by current mission.""" for mission_filter in self.mission_filters: characters_filter = mission_filter.get_filter(text=self._boss_mission) if characters_filter: logger.debug(f"Found filter {characters_filter} by {mission_filter.__class__.__name__}") self.emulator.click_button(ui.INVASION_CHARACTER_FILTER, min_duration=1, max_duration=1) self.emulator.click_button(characters_filter, min_duration=1, max_duration=1) def _wait_for_players_and_start_fight(self): """Waits for players before start of the fight.""" if wait_until(self.emulator.is_ui_element_on_screen, ui_element=ui.WAITING_FOR_OTHER_PLAYERS): logger.debug("Waiting for other players before battle.") if wait_until(self.emulator.is_ui_element_on_screen, timeout=60, condition=False, period=0.5, ui_element=ui.WAITING_FOR_OTHER_PLAYERS): if wait_until(self.emulator.is_ui_element_on_screen, timeout=2, ui_element=ui.DISCONNECT_NEW_OPPONENT): logger.debug("Found disconnect notification. Trying to start again.") self.emulator.click_button(ui.DISCONNECT_NEW_OPPONENT) return self._wait_for_players_and_start_fight() logger.debug("Battle is loading. Starting manual bot.") return self._manual_bot_start() logger.warning("Waiting other players very long, trying to reset.") self.emulator.click_button(ui.WAITING_FOR_OTHER_PLAYERS) def _manual_bot_start(self): """Starts manual bot for the fight.""" ManualBattleBot(self.game, self.battle_over_conditions, self.disconnect_conditions).fight() if wait_until(self.emulator.is_image_on_screen, timeout=2, ui_element=ui.INVASION_HOME_BUTTON): if self._chests < self._max_chests: self.press_repeat_button(repeat_button_ui=ui.INVASION_REPEAT_BUTTON, start_button_ui=ui.INVASION_BOSS_FIGHT_START) else: self.press_home_button(home_button=ui.INVASION_HOME_BUTTON) return # In case we got back from fight by disconnect or something else logger.debug("Any chest after boss fight wasn't acquired.") if wait_until(self.emulator.is_ui_element_on_screen, timeout=20, ui_element=ui.INVASION_BOSS_FIGHT_START): if self.press_start_button(): self._wait_for_players_and_start_fight()
[ "lib.logger.get_logger", "regex.compile", "lib.functions.wait_until", "lib.game.battle_bot.ManualBattleBot", "lib.game.ui.get_by_name" ]
[((210, 238), 'lib.logger.get_logger', 'logging.get_logger', (['__name__'], {}), '(__name__)\n', (228, 238), True, 'import lib.logger as logging\n'), ((7669, 7748), 'lib.functions.wait_until', 'wait_until', (['self.emulator.is_ui_element_on_screen'], {'ui_element': 'ui.INVASION_LABEL'}), '(self.emulator.is_ui_element_on_screen, ui_element=ui.INVASION_LABEL)\n', (7679, 7748), False, 'from lib.functions import wait_until\n'), ((9353, 9449), 'lib.functions.wait_until', 'wait_until', (['self.emulator.is_ui_element_on_screen'], {'ui_element': 'ui.INVASION_CHESTS_MENU_LABEL'}), '(self.emulator.is_ui_element_on_screen, ui_element=ui.\n INVASION_CHESTS_MENU_LABEL)\n', (9363, 9449), False, 'from lib.functions import wait_until\n'), ((9672, 9764), 'lib.functions.wait_until', 'wait_until', (['self.emulator.is_ui_element_on_screen'], {'ui_element': 'ui.INVASION_MANAGE_CHESTS'}), '(self.emulator.is_ui_element_on_screen, ui_element=ui.\n INVASION_MANAGE_CHESTS)\n', (9682, 9764), False, 'from lib.functions import wait_until\n'), ((10200, 10257), 'lib.game.ui.get_by_name', 'ui.get_by_name', (['f"""INVASION_CHEST_AVAILABLE_{chest_index}"""'], {}), "(f'INVASION_CHEST_AVAILABLE_{chest_index}')\n", (10214, 10257), False, 'from lib.game import ui\n'), ((10269, 10355), 'lib.functions.wait_until', 'wait_until', (['self.emulator.is_ui_element_on_screen'], {'timeout': '(1)', 'ui_element': 'chest_ui'}), '(self.emulator.is_ui_element_on_screen, timeout=1, ui_element=\n chest_ui)\n', (10279, 10355), False, 'from lib.functions import wait_until\n'), ((14135, 14212), 'lib.functions.wait_until', 'wait_until', (['self.emulator.is_ui_element_on_screen'], {'ui_element': 'start_button_ui'}), '(self.emulator.is_ui_element_on_screen, ui_element=start_button_ui)\n', (14145, 14212), False, 'from lib.functions import wait_until\n'), ((14734, 14838), 'lib.functions.wait_until', 'wait_until', (['self.emulator.is_ui_element_on_screen'], {'timeout': '(2)', 'ui_element': 'ui.DISCONNECT_NEW_OPPONENT'}), '(self.emulator.is_ui_element_on_screen, timeout=2, ui_element=ui.\n DISCONNECT_NEW_OPPONENT)\n', (14744, 14838), False, 'from lib.functions import wait_until\n'), ((16592, 16687), 'lib.functions.wait_until', 'wait_until', (['self.emulator.is_ui_element_on_screen'], {'ui_element': 'ui.WAITING_FOR_OTHER_PLAYERS'}), '(self.emulator.is_ui_element_on_screen, ui_element=ui.\n WAITING_FOR_OTHER_PLAYERS)\n', (16602, 16687), False, 'from lib.functions import wait_until\n'), ((17771, 17867), 'lib.functions.wait_until', 'wait_until', (['self.emulator.is_image_on_screen'], {'timeout': '(2)', 'ui_element': 'ui.INVASION_HOME_BUTTON'}), '(self.emulator.is_image_on_screen, timeout=2, ui_element=ui.\n INVASION_HOME_BUTTON)\n', (17781, 17867), False, 'from lib.functions import wait_until\n'), ((18349, 18456), 'lib.functions.wait_until', 'wait_until', (['self.emulator.is_ui_element_on_screen'], {'timeout': '(20)', 'ui_element': 'ui.INVASION_BOSS_FIGHT_START'}), '(self.emulator.is_ui_element_on_screen, timeout=20, ui_element=ui\n .INVASION_BOSS_FIGHT_START)\n', (18359, 18456), False, 'from lib.functions import wait_until\n'), ((1137, 1164), 'regex.compile', 'regex.compile', (['self.pattern'], {}), '(self.pattern)\n', (1150, 1164), False, 'import regex\n'), ((1288, 1324), 'regex.compile', 'regex.compile', (['self.opposite_pattern'], {}), '(self.opposite_pattern)\n', (1301, 1324), False, 'import regex\n'), ((7823, 7912), 'lib.functions.wait_until', 'wait_until', (['self.emulator.is_ui_element_on_screen'], {'ui_element': 'ui.INVASION_MENU_LABEL'}), '(self.emulator.is_ui_element_on_screen, ui_element=ui.\n INVASION_MENU_LABEL)\n', (7833, 7912), False, 'from lib.functions import wait_until\n'), ((10495, 10584), 'lib.functions.wait_until', 'wait_until', (['self.emulator.is_ui_element_on_screen'], {'ui_element': 'ui.INVASION_SKIP_CHEST'}), '(self.emulator.is_ui_element_on_screen, ui_element=ui.\n INVASION_SKIP_CHEST)\n', (10505, 10584), False, 'from lib.functions import wait_until\n'), ((14362, 14424), 'lib.functions.wait_until', 'wait_until', (['self._check_notifications_before_fight'], {'timeout': '(10)'}), '(self._check_notifications_before_fight, timeout=10)\n', (14372, 14424), False, 'from lib.functions import wait_until\n'), ((14469, 14562), 'lib.functions.wait_until', 'wait_until', (['self.emulator.is_ui_element_on_screen'], {'ui_element': 'ui.INVASION_NO_CHEST_SLOTS'}), '(self.emulator.is_ui_element_on_screen, ui_element=ui.\n INVASION_NO_CHEST_SLOTS)\n', (14479, 14562), False, 'from lib.functions import wait_until\n'), ((16768, 16904), 'lib.functions.wait_until', 'wait_until', (['self.emulator.is_ui_element_on_screen'], {'timeout': '(60)', 'condition': '(False)', 'period': '(0.5)', 'ui_element': 'ui.WAITING_FOR_OTHER_PLAYERS'}), '(self.emulator.is_ui_element_on_screen, timeout=60, condition=\n False, period=0.5, ui_element=ui.WAITING_FOR_OTHER_PLAYERS)\n', (16778, 16904), False, 'from lib.functions import wait_until\n'), ((7932, 8024), 'lib.functions.wait_until', 'wait_until', (['self.emulator.is_ui_element_on_screen'], {'ui_element': 'ui.INVASION_MANAGE_CHESTS'}), '(self.emulator.is_ui_element_on_screen, ui_element=ui.\n INVASION_MANAGE_CHESTS)\n', (7942, 8024), False, 'from lib.functions import wait_until\n'), ((11817, 11856), 'lib.game.ui.get_by_name', 'ui.get_by_name', (['f"""{bosses}{boss_index}"""'], {}), "(f'{bosses}{boss_index}')\n", (11831, 11856), False, 'from lib.game import ui\n'), ((16946, 17050), 'lib.functions.wait_until', 'wait_until', (['self.emulator.is_ui_element_on_screen'], {'timeout': '(2)', 'ui_element': 'ui.DISCONNECT_NEW_OPPONENT'}), '(self.emulator.is_ui_element_on_screen, timeout=2, ui_element=ui.\n DISCONNECT_NEW_OPPONENT)\n', (16956, 17050), False, 'from lib.functions import wait_until\n'), ((17668, 17756), 'lib.game.battle_bot.ManualBattleBot', 'ManualBattleBot', (['self.game', 'self.battle_over_conditions', 'self.disconnect_conditions'], {}), '(self.game, self.battle_over_conditions, self.\n disconnect_conditions)\n', (17683, 17756), False, 'from lib.game.battle_bot import ManualBattleBot\n'), ((12144, 12239), 'lib.functions.wait_until', 'wait_until', (['self.emulator.is_ui_element_on_screen'], {'ui_element': 'ui.INVASION_BOSS_FIGHT_ENTER'}), '(self.emulator.is_ui_element_on_screen, ui_element=ui.\n INVASION_BOSS_FIGHT_ENTER)\n', (12154, 12239), False, 'from lib.functions import wait_until\n'), ((12645, 12740), 'lib.functions.wait_until', 'wait_until', (['self.emulator.is_ui_element_on_screen'], {'ui_element': 'ui.INVASION_BOSS_FIGHT_CLOSE'}), '(self.emulator.is_ui_element_on_screen, ui_element=ui.\n INVASION_BOSS_FIGHT_CLOSE)\n', (12655, 12740), False, 'from lib.functions import wait_until\n')]
#!/usr/bin/env python3 import sys import re import argparse from collections import Counter def main(): parser = argparse.ArgumentParser( "print_phonemes.py", description="Prints the unique set of phonemes from a pronunciation dictionary", ) parser.add_argument("dictionary", help="Path to pronunciation dictionary") parser.add_argument( "--min-count", type=int, help="Minimum use count before printing phoneme in list", ) parser.add_argument( "--counts", action="store_true", help="Print counts with list" ) args = parser.parse_args() if args.dictionary == "-": dict_file = sys.stdin else: dict_file = open(args.dictionary, "r") phonemes = Counter() with dict_file: for line in dict_file: line = line.strip() if len(line) == 0: continue # Use explicit whitespace (avoid 0xA0) parts = re.split(r"[ \t]+", line) for phoneme in parts[1:]: phonemes[phoneme] += 1 for phoneme in sorted(phonemes): count = phonemes[phoneme] if args.min_count is None or (count >= args.min_count): if args.counts: print(phoneme, count) else: print(phoneme) # ----------------------------------------------------------------------------- if __name__ == "__main__": main()
[ "collections.Counter", "re.split", "argparse.ArgumentParser" ]
[((119, 249), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""print_phonemes.py"""'], {'description': '"""Prints the unique set of phonemes from a pronunciation dictionary"""'}), "('print_phonemes.py', description=\n 'Prints the unique set of phonemes from a pronunciation dictionary')\n", (142, 249), False, 'import argparse\n'), ((753, 762), 'collections.Counter', 'Counter', ([], {}), '()\n', (760, 762), False, 'from collections import Counter\n'), ((974, 999), 're.split', 're.split', (['"""[ \\\\t]+"""', 'line'], {}), "('[ \\\\t]+', line)\n", (982, 999), False, 'import re\n')]
import datetime import importlib import sys # Force CPython to use the pure-Python implementation of the datetime module # for testing. https://stackoverflow.com/a/62506055/232571 sys.modules["_datetime"] = None importlib.reload(datetime)
[ "importlib.reload" ]
[((213, 239), 'importlib.reload', 'importlib.reload', (['datetime'], {}), '(datetime)\n', (229, 239), False, 'import importlib\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # File: example_stream_signal.py # # Part of ‘UNICORN Binance WebSocket API’ # Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api # Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api # PyPI: https://pypi.org/project/unicorn-binance-websocket-api/ # # Author: <NAME> # https://about.me/oliver-zehentleitner # # Copyright (c) 2019-2021, <NAME> # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager import logging import os import time import threading # https://docs.python.org/3/library/logging.html#logging-levels logging.basicConfig(level=logging.INFO, filename=os.path.basename(__file__) + '.log', format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}", style="{") binance_websocket_api_manager = BinanceWebSocketApiManager(enable_stream_signal_buffer=True) def print_stream_signals(binance_websocket_api_manager): while True: if binance_websocket_api_manager.is_manager_stopping(): exit(0) stream_signal = binance_websocket_api_manager.pop_stream_signal_from_stream_signal_buffer() if stream_signal is False: time.sleep(0.01) else: print(stream_signal) # start a worker process to process to move the received stream_data from the stream_buffer to a print function worker_thread = threading.Thread(target=print_stream_signals, args=(binance_websocket_api_manager,)) worker_thread.start() print("\r\n========================================== Starting aggTrade ==========================================\r\n") # start markets = ['xrpbearbusd', 'zeceth', 'cndbtc', 'dashbtc', 'atompax', 'perlbtc', 'ardreth', 'zecbnb', 'bchabctusd', 'usdsbusdt', 'winbnb', 'xzcxrp', 'bchusdc', 'wavesbnb', 'kavausdt', 'btsusdt', 'chzbnb', 'tusdbnb', 'xtzbusd', 'bcptusdc', 'dogebnb', 'eosbearusdt', 'ambbnb', 'wrxbnb', 'poabtc', 'wanbtc', 'ardrbtc', 'icnbtc', 'bchabcbusd', 'ltcbnb', 'pivxeth', 'skybtc', 'tntbtc', 'poebtc', 'steembtc', 'icxusdt', 'tfuelbtc', 'chzbtc', 'vibeth', 'winusdc', 'gtobtc', 'linkusdc', 'batbusd', 'rdnbtc', 'dataeth', 'bttpax', 'zrxbnb', 'vibbtc', 'neobnb', 'cosbtc', 'powreth', 'rlcusdt', 'hbarbnb', 'wabieth', 'bqxeth', 'aionbtc', 'aeeth', 'mthbtc', 'wrxbtc', 'pptbtc', 'nknbtc', 'zecusdt', 'stormeth', 'qtumusdt'] aggtrade_stream_id = binance_websocket_api_manager.create_stream(["aggTrade"], markets) time.sleep(7) # stop binance_websocket_api_manager.stop_stream(aggtrade_stream_id) time.sleep(2) print("\r\n=========================================== Stopped aggTrade ==========================================\r\n") print("\r\n====================================== Starting trade and kline_1m ====================================\r\n") trade_stream_id = binance_websocket_api_manager.create_stream(["trade"], markets) kline_1m_stream_id = binance_websocket_api_manager.create_stream("kline_1m", markets) time.sleep(7) binance_websocket_api_manager.stop_stream(trade_stream_id) binance_websocket_api_manager.stop_stream(kline_1m_stream_id) time.sleep(2) print("\r\n====================================== Stopped trade and kline_1m =====================================\r\n") print("\r\n======================================== Starting ticker ==============================================\r\n") ticker_bnbbtc_stream_id = binance_websocket_api_manager.create_stream(["ticker"], markets) time.sleep(7) binance_websocket_api_manager.stop_stream(ticker_bnbbtc_stream_id) time.sleep(2) print("\r\n======================================== Stopped ticker ===============================================\r\n") print("\r\n========================================== Starting miniticker ========================================\r\n") miniticker_stream_id = binance_websocket_api_manager.create_stream(["miniTicker"], markets) time.sleep(7) binance_websocket_api_manager.stop_stream(miniticker_stream_id) time.sleep(2) print("\r\n========================================= Stopped miniticker =========================================\r\n") print("\r\n========================================== Starting kline_5m ==========================================\r\n") kline_5m_stream_id = binance_websocket_api_manager.create_stream(["kline_5m"], markets) time.sleep(7) binance_websocket_api_manager.stop_stream(kline_5m_stream_id) time.sleep(2) print("\r\n========================================= Stopped kline_5m ===========================================\r\n") print("\r\n=========================================== Starting depth5 ===========================================\r\n") depth5_stream_id = binance_websocket_api_manager.create_stream(["depth5"], markets) time.sleep(7) binance_websocket_api_manager.stop_stream(depth5_stream_id) time.sleep(2) print("\r\n========================================== Stopped depth5 ============================================\r\n") print("\r\n========================================== Starting depth =============================================\r\n") depth_stream_id = binance_websocket_api_manager.create_stream(["depth"], markets) time.sleep(7) binance_websocket_api_manager.stop_stream(depth_stream_id) time.sleep(2) print("\r\n============================================ Stopped depth ===========================================\r\n") print("\r\n========================================== Starting !miniticker ========================================\r\n") miniticker_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!miniTicker"]) time.sleep(7) binance_websocket_api_manager.stop_stream(miniticker_stream_id) time.sleep(2) print("\r\n========================================= Stopped !miniticker =========================================\r\n") print("\r\n========================================== Starting ticker all ========================================\r\n") ticker_all_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!ticker"]) time.sleep(7) binance_websocket_api_manager.stop_stream(ticker_all_stream_id) time.sleep(2) print("\r\n=========================================== Stopped ticker all ========================================\r\n") print("\r\n=================================== Starting multi multi socket =======================================\r\n") channels = {'trade', 'kline_1', 'kline_5', 'kline_15', 'kline_30', 'kline_1h', 'kline_12h', 'kline_1w', 'miniTicker', 'depth20'} print(channels) print(markets, "\r\n") time.sleep(3) multi_multi_stream_id = binance_websocket_api_manager.create_stream(channels, markets) time.sleep(3) binance_websocket_api_manager.stop_stream(multi_multi_stream_id) time.sleep(2) print("\r\n================================== Stopped multi multi socket ========================================\r\n") print("\r\n============================= Starting multi multi socket subscribe ===================================\r\n") channels = {'trade', 'kline_1', 'kline_5', 'kline_15', 'kline_30', 'kline_1h', 'kline_12h', 'kline_1w', 'miniTicker', 'depth20', '!miniTicker', '!ticker'} multi_multi_stream_id = binance_websocket_api_manager.create_stream(channels, markets) time.sleep(5) binance_websocket_api_manager.stop_stream(multi_multi_stream_id) time.sleep(2) print("\r\n============================== Stopped multi multi socket subscribe ===================================\r\n") print("\r\n=============================== Stopping BinanceWebSocketManager ======================================\r\n") binance_websocket_api_manager.stop_manager_with_all_streams() print("finished!")
[ "unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager", "threading.Thread", "os.path.basename", "time.sleep" ]
[((2026, 2086), 'unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager.BinanceWebSocketApiManager', 'BinanceWebSocketApiManager', ([], {'enable_stream_signal_buffer': '(True)'}), '(enable_stream_signal_buffer=True)\n', (2052, 2086), False, 'from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager\n'), ((2586, 2675), 'threading.Thread', 'threading.Thread', ([], {'target': 'print_stream_signals', 'args': '(binance_websocket_api_manager,)'}), '(target=print_stream_signals, args=(\n binance_websocket_api_manager,))\n', (2602, 2675), False, 'import threading\n'), ((3687, 3700), 'time.sleep', 'time.sleep', (['(7)'], {}), '(7)\n', (3697, 3700), False, 'import time\n'), ((3770, 3783), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3780, 3783), False, 'import time\n'), ((4195, 4208), 'time.sleep', 'time.sleep', (['(7)'], {}), '(7)\n', (4205, 4208), False, 'import time\n'), ((4330, 4343), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4340, 4343), False, 'import time\n'), ((4678, 4691), 'time.sleep', 'time.sleep', (['(7)'], {}), '(7)\n', (4688, 4691), False, 'import time\n'), ((4759, 4772), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4769, 4772), False, 'import time\n'), ((5108, 5121), 'time.sleep', 'time.sleep', (['(7)'], {}), '(7)\n', (5118, 5121), False, 'import time\n'), ((5186, 5199), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (5196, 5199), False, 'import time\n'), ((5531, 5544), 'time.sleep', 'time.sleep', (['(7)'], {}), '(7)\n', (5541, 5544), False, 'import time\n'), ((5607, 5620), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (5617, 5620), False, 'import time\n'), ((5948, 5961), 'time.sleep', 'time.sleep', (['(7)'], {}), '(7)\n', (5958, 5961), False, 'import time\n'), ((6022, 6035), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (6032, 6035), False, 'import time\n'), ((6361, 6374), 'time.sleep', 'time.sleep', (['(7)'], {}), '(7)\n', (6371, 6374), False, 'import time\n'), ((6434, 6447), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (6444, 6447), False, 'import time\n'), ((6785, 6798), 'time.sleep', 'time.sleep', (['(7)'], {}), '(7)\n', (6795, 6798), False, 'import time\n'), ((6863, 6876), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (6873, 6876), False, 'import time\n'), ((7210, 7223), 'time.sleep', 'time.sleep', (['(7)'], {}), '(7)\n', (7220, 7223), False, 'import time\n'), ((7288, 7301), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (7298, 7301), False, 'import time\n'), ((7725, 7738), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (7735, 7738), False, 'import time\n'), ((7826, 7839), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (7836, 7839), False, 'import time\n'), ((7905, 7918), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (7915, 7918), False, 'import time\n'), ((8416, 8429), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (8426, 8429), False, 'import time\n'), ((8495, 8508), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (8505, 8508), False, 'import time\n'), ((1830, 1856), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (1846, 1856), False, 'import os\n'), ((2393, 2409), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (2403, 2409), False, 'import time\n')]
# -*- coding: utf-8 -*- """ .. moduleauthor:: <NAME> <<EMAIL>> """ from csv import DictReader from io import TextIOWrapper from nose.tools import eq_ from pkg_resources import resource_stream def full_test(): reader = DictReader(TextIOWrapper(resource_stream('pycaptioner', 'test/data/points.csv'))) for line in reader: print (line)
[ "pkg_resources.resource_stream" ]
[((250, 304), 'pkg_resources.resource_stream', 'resource_stream', (['"""pycaptioner"""', '"""test/data/points.csv"""'], {}), "('pycaptioner', 'test/data/points.csv')\n", (265, 304), False, 'from pkg_resources import resource_stream\n')]
from redis import Redis # TODO: 初始化redis实例变量 cache = Redis(host='192.168.184.128', port=6379, password='<PASSWORD>') # TODO: 1.对字符串操作 print('对字符串操作.....') cache.set(name='username', value='zhiliao') print(cache.get('username')) cache.delete('username') cache.set(name='read_count', value=1) cache.incr(name='read_count', amount=5) # TODO: 递增5 cache.decr(name='read_count', amount=3) # TODO: 递增3 print(cache.get('read_count')) # TODO: 2.对列表的操作 print('对列表的操作....') cache.lpush('languges', 'python') cache.lpush('languges', 'javascript') print(cache.lrange('languges', 0, -1)) # TODO: 读取列表所有数据 # TODO: 3.对集合的操作 print('对集合的操作....') cache.sadd('teams', 'python') cache.sadd('teams', 'javscript') print(cache.smembers('teams')) # TODO: 4.对哈希(hash)的操作 print('对哈希(hash)的操作....') cache.hset(name='website', key='baidu', value='https://www.baidu.com') cache.hset(name='website', key='google', value='https://www.google.com') print(cache.hgetall('website')) # TODO: 5.对事务(管道)操作 print('对事务(管道)操作....') pip = cache.pipeline() # TODO: 定义一个管道实例 pip.set(name='height', value=170) pip.set(name='weight', value=120) pip.execute() # TODO: 6.发布与订阅功能 print('发布与订阅功能....') ps = cache.pubsub() ps.subscribe('email') while True: for item in ps.listen(): if item.get('type') == 'message': print(item.get('data'))
[ "redis.Redis" ]
[((54, 117), 'redis.Redis', 'Redis', ([], {'host': '"""192.168.184.128"""', 'port': '(6379)', 'password': '"""<PASSWORD>"""'}), "(host='192.168.184.128', port=6379, password='<PASSWORD>')\n", (59, 117), False, 'from redis import Redis\n')]
# Copyright 2014 Intel Corporation, All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the"License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging from django.utils.translation import ugettext_lazy as _ from horizon import forms LOG = logging.getLogger(__name__) class AddOpenstackEndpointForm(forms.SelfHandlingForm): failure_url = 'horizon:vsm:openstackconnect:index' os_tenant_name = forms.CharField( label = _("Tenant Name"), max_length = 255, min_length = 1, error_messages = { 'required': _('This field is required.') } ) os_username = forms.CharField( label = _("UserName"), max_length = 255, min_length = 1, error_messages = { 'required': _('This field is required.') } ) os_password = forms.CharField( label = _("Password"), widget=forms.PasswordInput(render_value=False), max_length=255, min_length=1, error_messages = { 'required': _('This field is required.') } ) os_auth_url = forms.CharField( label = _("Auth Url"), max_length = 255, min_length = 1, error_messages = { 'required': _('This field is required.') } ) os_region_name = forms.CharField( label = _("Region Name"), max_length = 255, min_length = 0, required = False ) ssh_user = forms.CharField( label = _("SSH User Name"), max_length = 255, min_length = 1, error_messages = { 'required': _('This field is required.') } ) def handle(self, request, data): pass # TODO deliver a cluster id in data # data['cluster_id'] = 1 # try: # LOG.info("CEPH_LOG in ADD ip, %s" % str(data)) # os_tenant_name = data['os_tenant_name'] # os_username = data['os_username'] # os_password = data['<PASSWORD>'] # os_auth_url = data['os_auth_url'] # ip = os_auth_url.split(":")[1][2:] # appnodes = vsm_api.appnode_list(request) # for appnode in appnodes: # old_os_auth_url = appnode.os_auth_url # old_ip = old_os_auth_url.split(":")[1][2:] # if ip == old_ip: # messages.error(request, "duplicate ip address") # return False # body = { # 'appnodes': { # 'os_tenant_name': os_tenant_name, # 'os_username': os_username, # 'os_password': <PASSWORD>, # 'os_auth_url': os_auth_url # } # } # LOG.info("CEPH_LOG in handle body %s" % str(body)) # ret = vsm_api.add_appnodes(request, body['appnodes']) # # messages.success(request, # _('Successfully add openstack: %s') # % data['os_auth_url']) # return ret # except: # redirect = reverse("horizon:vsm:openstackconnect:index") # exceptions.handle(request, # _('Unable to create appnode.'), # redirect=redirect) class UpdateOpenstackEndpointForm(forms.SelfHandlingForm): id = forms.CharField(label=_("ID"), widget=forms.HiddenInput) os_tenant_name = forms.CharField( label = _("Tenant Name"), max_length = 255, min_length = 1, error_messages = { 'required': _('This field is required.') } ) os_username = forms.CharField( label = _("UserName"), max_length = 255, min_length = 1, error_messages = { 'required': _('This field is required.') } ) os_password = forms.CharField( label = _("Password"), widget=forms.PasswordInput(render_value=False), max_length=255, min_length=1, error_messages = { 'required': _('This field is required.') } ) os_auth_url = forms.CharField( label = _("Auth Url"), max_length = 255, min_length = 1, error_messages = { 'required': _('This field is required.') } ) os_region_name = forms.CharField( label = _("Region Name"), max_length = 255, min_length = 0, required = False ) ssh_user = forms.CharField( label = _("SSH User Name"), max_length = 255, min_length = 1, error_messages = { 'required': _('This field is required.') } ) def handle(self, request, data): pass # failed, succeeded = [], [] # id = data.pop('id') # # ip = data.pop('ip') # os_tenant_name = data.pop('os_tenant_name') # os_username = data.pop('os_username') # os_password = data.pop('os_password') # os_auth_url = data.pop('os_auth_url') # vsm_api.update_appnode(request, id, # os_tenant_name=os_tenant_name, # os_username=os_username, # os_password=<PASSWORD>, # os_auth_url=os_auth_url, # ssh_status="", # log_info="") # # messages.success(request, _('OpenStack auth has been updated successfully.')) # return True # # if failed: # failed = map(force_unicode, failed) # messages.error(request, # _('Unable to update %(attributes)s for the user.') # % {"attributes": ", ".join(failed)}) # return True
[ "django.utils.translation.ugettext_lazy", "logging.getLogger", "horizon.forms.PasswordInput" ]
[((706, 733), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (723, 733), False, 'import logging\n'), ((902, 918), 'django.utils.translation.ugettext_lazy', '_', (['"""Tenant Name"""'], {}), "('Tenant Name')\n", (903, 918), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1117, 1130), 'django.utils.translation.ugettext_lazy', '_', (['"""UserName"""'], {}), "('UserName')\n", (1118, 1130), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1329, 1342), 'django.utils.translation.ugettext_lazy', '_', (['"""Password"""'], {}), "('Password')\n", (1330, 1342), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1359, 1398), 'horizon.forms.PasswordInput', 'forms.PasswordInput', ([], {'render_value': '(False)'}), '(render_value=False)\n', (1378, 1398), False, 'from horizon import forms\n'), ((1593, 1606), 'django.utils.translation.ugettext_lazy', '_', (['"""Auth Url"""'], {}), "('Auth Url')\n", (1594, 1606), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1808, 1824), 'django.utils.translation.ugettext_lazy', '_', (['"""Region Name"""'], {}), "('Region Name')\n", (1809, 1824), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1955, 1973), 'django.utils.translation.ugettext_lazy', '_', (['"""SSH User Name"""'], {}), "('SSH User Name')\n", (1956, 1973), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3882, 3889), 'django.utils.translation.ugettext_lazy', '_', (['"""ID"""'], {}), "('ID')\n", (3883, 3889), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3972, 3988), 'django.utils.translation.ugettext_lazy', '_', (['"""Tenant Name"""'], {}), "('Tenant Name')\n", (3973, 3988), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4187, 4200), 'django.utils.translation.ugettext_lazy', '_', (['"""UserName"""'], {}), "('UserName')\n", (4188, 4200), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4399, 4412), 'django.utils.translation.ugettext_lazy', '_', (['"""Password"""'], {}), "('Password')\n", (4400, 4412), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4429, 4468), 'horizon.forms.PasswordInput', 'forms.PasswordInput', ([], {'render_value': '(False)'}), '(render_value=False)\n', (4448, 4468), False, 'from horizon import forms\n'), ((4663, 4676), 'django.utils.translation.ugettext_lazy', '_', (['"""Auth Url"""'], {}), "('Auth Url')\n", (4664, 4676), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4878, 4894), 'django.utils.translation.ugettext_lazy', '_', (['"""Region Name"""'], {}), "('Region Name')\n", (4879, 4894), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5025, 5043), 'django.utils.translation.ugettext_lazy', '_', (['"""SSH User Name"""'], {}), "('SSH User Name')\n", (5026, 5043), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1021, 1049), 'django.utils.translation.ugettext_lazy', '_', (['"""This field is required."""'], {}), "('This field is required.')\n", (1022, 1049), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1233, 1261), 'django.utils.translation.ugettext_lazy', '_', (['"""This field is required."""'], {}), "('This field is required.')\n", (1234, 1261), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1497, 1525), 'django.utils.translation.ugettext_lazy', '_', (['"""This field is required."""'], {}), "('This field is required.')\n", (1498, 1525), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1709, 1737), 'django.utils.translation.ugettext_lazy', '_', (['"""This field is required."""'], {}), "('This field is required.')\n", (1710, 1737), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2076, 2104), 'django.utils.translation.ugettext_lazy', '_', (['"""This field is required."""'], {}), "('This field is required.')\n", (2077, 2104), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4091, 4119), 'django.utils.translation.ugettext_lazy', '_', (['"""This field is required."""'], {}), "('This field is required.')\n", (4092, 4119), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4303, 4331), 'django.utils.translation.ugettext_lazy', '_', (['"""This field is required."""'], {}), "('This field is required.')\n", (4304, 4331), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4567, 4595), 'django.utils.translation.ugettext_lazy', '_', (['"""This field is required."""'], {}), "('This field is required.')\n", (4568, 4595), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4779, 4807), 'django.utils.translation.ugettext_lazy', '_', (['"""This field is required."""'], {}), "('This field is required.')\n", (4780, 4807), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5146, 5174), 'django.utils.translation.ugettext_lazy', '_', (['"""This field is required."""'], {}), "('This field is required.')\n", (5147, 5174), True, 'from django.utils.translation import ugettext_lazy as _\n')]
# coding=utf-8 import xml.etree.ElementTree as ET import sys import os import glob import shutil import cv2 from multiprocessing import Pool from multiprocessing import Manager from multiprocessing import Process import numpy as np import pickle def restore_file(path): df = open(path, 'rb') file = pickle.load(df) df.close() return file def save_file(file, path, protocol=None): df = open(path, 'wb') if protocol is None: pickle.dump(file, df) else: pickle.dump(file, df, protocol=protocol) df.close() print('Successfully save ', path) def get_direction(xml_path): tree = ET.parse(xml_path) rect={} line="" root = tree.getroot() #for name in root.iter('path'): # rect['path'] = os.path.basename(name.text) def get_info(ob, name): for front in ob.iter(name): return int(front.text) for ob in root.iter('attributes'): rect['front'] = get_info(ob, 'front') rect['back'] = get_info(ob, 'back') rect['side'] = get_info(ob, 'side') rect['front_side'] = get_info(ob, 'front_side') rect['back_side'] = get_info(ob, 'back_side') rect['noise'] = get_info(ob, 'noise') try: sums = sum(rect.values()) except: sums = 0 return rect, sums def mkdirs(root_dir): if os.path.exists(root_dir) is False: os.mkdir(root_dir) direction_list = ['front', 'back', 'side', 'front_side', 'back_side', 'noise', 'null', 'error'] for i in direction_list: if os.path.exists(root_dir+i) is False: os.mkdir(root_dir+i) def get_copy_list(): save_dir = 'cuhk03_train_fixed2/' mkdirs(save_dir) xml_list = glob.glob('cuhk03_annotations/*.xml') copy_list = [] print('len(xml_list):', len(xml_list)) key_list = ['front', 'back', 'side', 'front_side', 'back_side', 'noise'] num_dict = {} for i in key_list: num_dict[i] = 0 for index, path in enumerate(xml_list): if index % 5000 == 0: print(index, len(xml_list)) rect, sums = get_direction(path) if sums == 0: #shutil.copyfile(path, save_dir+'null/'+os.path.basename(path)) copy_list.append([path, save_dir+'null/'+os.path.basename(path)]) path1 = path.replace('.xml', '.jpg') #shutil.copyfile(path1, save_dir+'null/'+os.path.basename(path1)) copy_list.append([path1, save_dir+'null/'+os.path.basename(path1)]) continue if sums > 1: #shutil.copyfile(path, save_dir+'error/'+os.path.basename(path)) copy_list.append([path, save_dir+'error/'+os.path.basename(path)]) path1 = path.replace('.xml', '.jpg') #shutil.copyfile(path1, save_dir+'error/'+os.path.basename(path1)) copy_list.append([path1, save_dir+'error/'+os.path.basename(path1)]) continue for key in rect.keys(): if rect[key] == 1: num_dict[key] += 1 path1 = path.replace('.xml', '.jpg') #shutil.copyfile(path1, save_dir+key+'/'+os.path.basename(path1)) copy_list.append([path1, save_dir+key+'/'+os.path.basename(path1)]) break print('-------------') for i in key_list: print(i, num_dict[i], round(num_dict[i]/len(xml_list), 3)) print('-------------') print(round((num_dict['front']+num_dict['front_side'])/len(xml_list), 3)) print(round((num_dict['back']+num_dict['back_side'])/len(xml_list), 3)) print(round((num_dict['side'])/len(xml_list), 3)) return copy_list def copy_img(path_list): for path in path_list: shutil.copyfile(path[0], path[1]) def split_direction(): copy_list = get_copy_list() print('len(copy_list):', len(copy_list)) #exit(0) num_jobs = 8 index_list = len(copy_list)*np.arange(0,1,1/num_jobs) index_list = [int(i) for i in index_list] index_list.append(len(copy_list)) print(index_list) processes = list() for i in range(num_jobs): p = Process(target=copy_img, args=(copy_list[index_list[i]:index_list[i+1]],)) print('Process %d will start.' % i) p.start() processes.append(p) for p in processes: p.join() def get_bbox(xml_path): tree = ET.parse(xml_path) rect={} line="" root = tree.getroot() #for name in root.iter('path'): # rect['path'] = os.path.basename(name.text) def get_info(ob, name): for front in ob.iter(name): return int(front.text) for ob in root.iter('bndbox'): #for obb in root.iter('bndbox'): xmin = get_info(ob, 'xmin') ymin = get_info(ob, 'ymin') xmax = get_info(ob, 'xmax') ymax = get_info(ob, 'ymax') break print(xmin, xmax, ymin, ymax) return xmin, xmax, ymin, ymax if __name__ == '__main__': ''' name = 'wait_to_crop_train/0010_c6s4_002427_07.jpg' xmin, xmax, ymin, ymax = get_bbox('wait_to_crop_train/0010_c6s4_002427_07.xml') img = cv2.imread(name) #cv2.rectangle(img, (xmin,ymin),(xmax,ymax), (255,0,0),1) img2 = img[ymin:ymax, xmin:xmax] cv2.imshow('image', img2) cv2.waitKey(0) exit(0) ''' image_list = glob.glob('wait_to_crop_test/*.jpg') for name in image_list: basename = os.path.basename(name) img = cv2.imread(name) if os.path.exists('wait_to_crop_test/'+basename[:-4]+'.xml'): xmin, xmax, ymin, ymax = get_bbox('wait_to_crop_test/'+basename[:-4]+'.xml') img = cv2.imread(name) img2 = img[ymin:ymax, xmin:xmax] cv2.imwrite('crop_test/'+basename, img2) exit(0) #split_direction() image_map_direction = {} direction_map_image = {} img_list = [] save_dir = 'cuhk03_train_fixed2/' direction_list = ['front', 'back', 'side', 'front_side', 'back_side', 'noise'] map_int = {'front':0, 'front_side': 0, 'side':1, 'noise':1, 'back': 2, 'back_side':2,} map_int2 = {'front':0, 'front_side': 1, 'side':2, 'noise':5, 'back': 3, 'back_side':4,} direction_int_list = [] direction_int_list2 = [] for i in direction_list: image_list = os.listdir(save_dir+i) direction_map_image[i] = image_list for name in image_list: image_map_direction[name] = (map_int[i], i) direction_int_list.append(map_int[i]) direction_int_list2.append(map_int2[i]) if name[-8:] == '.jpg.jpg': image_map_direction[name[:-4]] = (map_int[i], i) print(name, name[:-4]) print(len(direction_int_list), round(direction_int_list.count(0)/len(direction_int_list), 2), round(direction_int_list.count(1)/len(direction_int_list), 2), round(direction_int_list.count(2)/len(direction_int_list), 2)) print(set(direction_int_list)) print(len(direction_int_list2), round(direction_int_list2.count(0)/len(direction_int_list2), 2), round(direction_int_list2.count(1)/len(direction_int_list2), 2), round(direction_int_list2.count(2)/len(direction_int_list2), 2), round(direction_int_list2.count(3)/len(direction_int_list2), 2), round(direction_int_list2.count(4)/len(direction_int_list2), 2), round(direction_int_list2.count(5)/len(direction_int_list2), 2)) print(set(direction_int_list2)) save_file(image_map_direction, 'cuhk03_image_map_direction.pkl') save_file(direction_map_image, 'cuhk03_direction_map_image.pkl') save_file(image_map_direction, 'cuhk03_image_map_direction_py2.pkl', 2) save_file(direction_map_image, 'cuhk03_direction_map_image_py2.pkl', 2) print(len(image_map_direction)) exit(0) print(image_map_direction) exit(0) image_map_direction = {} direction_map_image = {} save_dir = 'market1501_full_fixed2/' direction_list = ['front', 'back', 'side', 'front_side', 'back_side', 'noise', 'null', 'error'] for i in direction_list: image_list = os.listdir(save_dir+i) exit(0) exit(0) #save_dir = 'DukeMTMC-reID_detail/' save_dir = 'DukeMTMC-reID_detail/' direction_list = ['front', 'back', 'side', 'front_side', 'back_side'] for i in direction_list: listglob1 = glob.glob(save_dir+i+'/*.jpg') for path in listglob1: img = cv2.imread(path) img = cv2.resize(img, ((50,120))) cv2.imwrite(path, img) #line = rect['path'] + "\t"+ rect['xmin']+ "\t"+rect['ymin']+"\t"+rect['xmax']+"\t"+rect['ymax']
[ "xml.etree.ElementTree.parse", "pickle.dump", "os.mkdir", "os.path.basename", "cv2.imwrite", "os.path.exists", "cv2.imread", "pickle.load", "numpy.arange", "glob.glob", "shutil.copyfile", "multiprocessing.Process", "os.listdir", "cv2.resize" ]
[((309, 324), 'pickle.load', 'pickle.load', (['df'], {}), '(df)\n', (320, 324), False, 'import pickle\n'), ((634, 652), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_path'], {}), '(xml_path)\n', (642, 652), True, 'import xml.etree.ElementTree as ET\n'), ((1723, 1760), 'glob.glob', 'glob.glob', (['"""cuhk03_annotations/*.xml"""'], {}), "('cuhk03_annotations/*.xml')\n", (1732, 1760), False, 'import glob\n'), ((4349, 4367), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_path'], {}), '(xml_path)\n', (4357, 4367), True, 'import xml.etree.ElementTree as ET\n'), ((5306, 5342), 'glob.glob', 'glob.glob', (['"""wait_to_crop_test/*.jpg"""'], {}), "('wait_to_crop_test/*.jpg')\n", (5315, 5342), False, 'import glob\n'), ((458, 479), 'pickle.dump', 'pickle.dump', (['file', 'df'], {}), '(file, df)\n', (469, 479), False, 'import pickle\n'), ((498, 538), 'pickle.dump', 'pickle.dump', (['file', 'df'], {'protocol': 'protocol'}), '(file, df, protocol=protocol)\n', (509, 538), False, 'import pickle\n'), ((1354, 1378), 'os.path.exists', 'os.path.exists', (['root_dir'], {}), '(root_dir)\n', (1368, 1378), False, 'import os\n'), ((1397, 1415), 'os.mkdir', 'os.mkdir', (['root_dir'], {}), '(root_dir)\n', (1405, 1415), False, 'import os\n'), ((3709, 3742), 'shutil.copyfile', 'shutil.copyfile', (['path[0]', 'path[1]'], {}), '(path[0], path[1])\n', (3724, 3742), False, 'import shutil\n'), ((3908, 3937), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(1 / num_jobs)'], {}), '(0, 1, 1 / num_jobs)\n', (3917, 3937), True, 'import numpy as np\n'), ((4106, 4182), 'multiprocessing.Process', 'Process', ([], {'target': 'copy_img', 'args': '(copy_list[index_list[i]:index_list[i + 1]],)'}), '(target=copy_img, args=(copy_list[index_list[i]:index_list[i + 1]],))\n', (4113, 4182), False, 'from multiprocessing import Process\n'), ((5390, 5412), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (5406, 5412), False, 'import os\n'), ((5427, 5443), 'cv2.imread', 'cv2.imread', (['name'], {}), '(name)\n', (5437, 5443), False, 'import cv2\n'), ((5455, 5516), 'os.path.exists', 'os.path.exists', (["('wait_to_crop_test/' + basename[:-4] + '.xml')"], {}), "('wait_to_crop_test/' + basename[:-4] + '.xml')\n", (5469, 5516), False, 'import os\n'), ((6422, 6446), 'os.listdir', 'os.listdir', (['(save_dir + i)'], {}), '(save_dir + i)\n', (6432, 6446), False, 'import os\n'), ((8269, 8293), 'os.listdir', 'os.listdir', (['(save_dir + i)'], {}), '(save_dir + i)\n', (8279, 8293), False, 'import os\n'), ((8526, 8560), 'glob.glob', 'glob.glob', (["(save_dir + i + '/*.jpg')"], {}), "(save_dir + i + '/*.jpg')\n", (8535, 8560), False, 'import glob\n'), ((1556, 1584), 'os.path.exists', 'os.path.exists', (['(root_dir + i)'], {}), '(root_dir + i)\n', (1570, 1584), False, 'import os\n'), ((1605, 1627), 'os.mkdir', 'os.mkdir', (['(root_dir + i)'], {}), '(root_dir + i)\n', (1613, 1627), False, 'import os\n'), ((5621, 5637), 'cv2.imread', 'cv2.imread', (['name'], {}), '(name)\n', (5631, 5637), False, 'import cv2\n'), ((5695, 5737), 'cv2.imwrite', 'cv2.imwrite', (["('crop_test/' + basename)", 'img2'], {}), "('crop_test/' + basename, img2)\n", (5706, 5737), False, 'import cv2\n'), ((8606, 8622), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (8616, 8622), False, 'import cv2\n'), ((8641, 8667), 'cv2.resize', 'cv2.resize', (['img', '(50, 120)'], {}), '(img, (50, 120))\n', (8651, 8667), False, 'import cv2\n'), ((8681, 8703), 'cv2.imwrite', 'cv2.imwrite', (['path', 'img'], {}), '(path, img)\n', (8692, 8703), False, 'import cv2\n'), ((2273, 2295), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2289, 2295), False, 'import os\n'), ((2479, 2502), 'os.path.basename', 'os.path.basename', (['path1'], {}), '(path1)\n', (2495, 2502), False, 'import os\n'), ((2678, 2700), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2694, 2700), False, 'import os\n'), ((2886, 2909), 'os.path.basename', 'os.path.basename', (['path1'], {}), '(path1)\n', (2902, 2909), False, 'import os\n'), ((3224, 3247), 'os.path.basename', 'os.path.basename', (['path1'], {}), '(path1)\n', (3240, 3247), False, 'import os\n')]
# this is a short and simple script # for taking the total deaths and putting them in a more human readable format import pandas as pd countries = ["South Korea", "United States"] diseases = ["1918h1n1", "coronavirus", "measles", "smallpox"] files = [cnt + "_" + dis + ".csv" for cnt in countries for dis in diseases] filesToCountries = dict() for cnt in countries: for dis in diseases: filesToCountries[cnt + "_" + dis + ".csv"] = cnt total_deaths = [] for datafile in files: df = pd.read_csv("output-data/" + datafile) total_deaths.append(list(df.iloc[-1, :])[25::25]) out_dict = {} for i in range(len(total_deaths)): name = files[i][:-4] deaths = total_deaths[i] out_dict[name] = deaths # print(out_dict) out_df = pd.DataFrame.from_dict(out_dict) out_df.index = [ 25, 50, 75, 100, 125, 150, 175, 200, 225, 250, 275, 300, 325, 350, 375, 400, 425, 450, 475, 500, ] print(out_df) out_df.to_csv("output-data/total-deaths-time-slices-all-simulations.csv")
[ "pandas.read_csv", "pandas.DataFrame.from_dict" ]
[((755, 787), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['out_dict'], {}), '(out_dict)\n', (777, 787), True, 'import pandas as pd\n'), ((502, 540), 'pandas.read_csv', 'pd.read_csv', (["('output-data/' + datafile)"], {}), "('output-data/' + datafile)\n", (513, 540), True, 'import pandas as pd\n')]
# coding: utf-8 import pprint import re import six class RefreshPreheatingBody: """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ sensitive_list = [] openapi_types = { 'id': 'str', 'task_type': 'str', 'status': 'str', 'processing': 'int', 'succeed': 'int', 'failed': 'int', 'total': 'int', 'create_time': 'int', 'urls': 'list[str]' } attribute_map = { 'id': 'id', 'task_type': 'task_type', 'status': 'status', 'processing': 'processing', 'succeed': 'succeed', 'failed': 'failed', 'total': 'total', 'create_time': 'create_time', 'urls': 'urls' } def __init__(self, id=None, task_type=None, status=None, processing=None, succeed=None, failed=None, total=None, create_time=None, urls=None): """RefreshPreheatingBody - a model defined in huaweicloud sdk""" self._id = None self._task_type = None self._status = None self._processing = None self._succeed = None self._failed = None self._total = None self._create_time = None self._urls = None self.discriminator = None if id is not None: self.id = id if task_type is not None: self.task_type = task_type if status is not None: self.status = status if processing is not None: self.processing = processing if succeed is not None: self.succeed = succeed if failed is not None: self.failed = failed if total is not None: self.total = total if create_time is not None: self.create_time = create_time if urls is not None: self.urls = urls @property def id(self): """Gets the id of this RefreshPreheatingBody. 任务id。 :return: The id of this RefreshPreheatingBody. :rtype: str """ return self._id @id.setter def id(self, id): """Sets the id of this RefreshPreheatingBody. 任务id。 :param id: The id of this RefreshPreheatingBody. :type: str """ self._id = id @property def task_type(self): """Gets the task_type of this RefreshPreheatingBody. 任务的类型, 其值可以为refresh或preheating。 :return: The task_type of this RefreshPreheatingBody. :rtype: str """ return self._task_type @task_type.setter def task_type(self, task_type): """Sets the task_type of this RefreshPreheatingBody. 任务的类型, 其值可以为refresh或preheating。 :param task_type: The task_type of this RefreshPreheatingBody. :type: str """ self._task_type = task_type @property def status(self): """Gets the status of this RefreshPreheatingBody. 刷新结果。task_done表示刷新成功 ,task_inprocess表示刷新中。 :return: The status of this RefreshPreheatingBody. :rtype: str """ return self._status @status.setter def status(self, status): """Sets the status of this RefreshPreheatingBody. 刷新结果。task_done表示刷新成功 ,task_inprocess表示刷新中。 :param status: The status of this RefreshPreheatingBody. :type: str """ self._status = status @property def processing(self): """Gets the processing of this RefreshPreheatingBody. 处理中的url个数。 :return: The processing of this RefreshPreheatingBody. :rtype: int """ return self._processing @processing.setter def processing(self, processing): """Sets the processing of this RefreshPreheatingBody. 处理中的url个数。 :param processing: The processing of this RefreshPreheatingBody. :type: int """ self._processing = processing @property def succeed(self): """Gets the succeed of this RefreshPreheatingBody. 成功处理的url个数。 :return: The succeed of this RefreshPreheatingBody. :rtype: int """ return self._succeed @succeed.setter def succeed(self, succeed): """Sets the succeed of this RefreshPreheatingBody. 成功处理的url个数。 :param succeed: The succeed of this RefreshPreheatingBody. :type: int """ self._succeed = succeed @property def failed(self): """Gets the failed of this RefreshPreheatingBody. 处理失败的url个数。 :return: The failed of this RefreshPreheatingBody. :rtype: int """ return self._failed @failed.setter def failed(self, failed): """Sets the failed of this RefreshPreheatingBody. 处理失败的url个数。 :param failed: The failed of this RefreshPreheatingBody. :type: int """ self._failed = failed @property def total(self): """Gets the total of this RefreshPreheatingBody. 总共的任务个数。 :return: The total of this RefreshPreheatingBody. :rtype: int """ return self._total @total.setter def total(self, total): """Sets the total of this RefreshPreheatingBody. 总共的任务个数。 :param total: The total of this RefreshPreheatingBody. :type: int """ self._total = total @property def create_time(self): """Gets the create_time of this RefreshPreheatingBody. 任务创建时间,相对于UTC 1970-01-01到当前时间相隔的毫秒数。 :return: The create_time of this RefreshPreheatingBody. :rtype: int """ return self._create_time @create_time.setter def create_time(self, create_time): """Sets the create_time of this RefreshPreheatingBody. 任务创建时间,相对于UTC 1970-01-01到当前时间相隔的毫秒数。 :param create_time: The create_time of this RefreshPreheatingBody. :type: int """ self._create_time = create_time @property def urls(self): """Gets the urls of this RefreshPreheatingBody. 刷新缓存的url列表。 :return: The urls of this RefreshPreheatingBody. :rtype: list[str] """ return self._urls @urls.setter def urls(self, urls): """Sets the urls of this RefreshPreheatingBody. 刷新缓存的url列表。 :param urls: The urls of this RefreshPreheatingBody. :type: list[str] """ self._urls = urls def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = "****" else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, RefreshPreheatingBody): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
[ "six.iteritems" ]
[((6835, 6868), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (6848, 6868), False, 'import six\n')]
#!/usr/bin/python import cv2.cv as cv import time cv.NamedWindow("camera", 1) capture = cv.CaptureFromCAM(0) while True: img = cv.QueryFrame(capture) cv.ShowImage("camera", img) if cv.WaitKey(10) == 27: break cv.DestroyAllWindows()
[ "cv2.cv.NamedWindow", "cv2.cv.WaitKey", "cv2.cv.QueryFrame", "cv2.cv.CaptureFromCAM", "cv2.cv.ShowImage", "cv2.cv.DestroyAllWindows" ]
[((52, 79), 'cv2.cv.NamedWindow', 'cv.NamedWindow', (['"""camera"""', '(1)'], {}), "('camera', 1)\n", (66, 79), True, 'import cv2.cv as cv\n'), ((91, 111), 'cv2.cv.CaptureFromCAM', 'cv.CaptureFromCAM', (['(0)'], {}), '(0)\n', (108, 111), True, 'import cv2.cv as cv\n'), ((233, 255), 'cv2.cv.DestroyAllWindows', 'cv.DestroyAllWindows', ([], {}), '()\n', (253, 255), True, 'import cv2.cv as cv\n'), ((135, 157), 'cv2.cv.QueryFrame', 'cv.QueryFrame', (['capture'], {}), '(capture)\n', (148, 157), True, 'import cv2.cv as cv\n'), ((162, 189), 'cv2.cv.ShowImage', 'cv.ShowImage', (['"""camera"""', 'img'], {}), "('camera', img)\n", (174, 189), True, 'import cv2.cv as cv\n'), ((197, 211), 'cv2.cv.WaitKey', 'cv.WaitKey', (['(10)'], {}), '(10)\n', (207, 211), True, 'import cv2.cv as cv\n')]
#pylint: disable=E1101, E0401, E1102, W0621, W0221 import argparse import numpy as np import torch import torch.nn as nn import torch.optim as optim import random import time from random import SystemRandom import models import utils parser = argparse.ArgumentParser() parser.add_argument('--niters', type=int, default=2000) parser.add_argument('--lr', type=float, default=0.01) parser.add_argument('--latent-dim', type=int, default=32) parser.add_argument('--rec-hidden', type=int, default=32) parser.add_argument('--gen-hidden', type=int, default=50) parser.add_argument('--embed-time', type=int, default=128) parser.add_argument('--save', type=int, default=1) parser.add_argument('--enc', type=str, default='mtan_rnn') parser.add_argument('--dec', type=str, default='mtan_rnn') parser.add_argument('--fname', type=str, default=None) parser.add_argument('--seed', type=int, default=0) parser.add_argument('--split', type=int, default=0) parser.add_argument('--n', type=int, default=8000) parser.add_argument('--batch-size', type=int, default=50) parser.add_argument('--classif', action='store_true', help="Include binary classification loss") parser.add_argument('--freq', type=float, default=10.) parser.add_argument('--k-iwae', type=int, default=10) parser.add_argument('--norm', action='store_true') parser.add_argument('--kl', action='store_true') parser.add_argument('--learn-emb', action='store_true') parser.add_argument('--dataset', type=str, default='activity') parser.add_argument('--alpha', type=int, default=100.) parser.add_argument('--enc-num-heads', type=int, default=1) parser.add_argument('--dec-num-heads', type=int, default=1) parser.add_argument('--num-ref-points', type=int, default=128) parser.add_argument('--classify-pertp', action='store_true') args = parser.parse_args() if __name__ == '__main__': experiment_id = int(SystemRandom().random()*100000) print(args, experiment_id) seed = args.seed torch.manual_seed(seed) np.random.seed(seed) torch.cuda.manual_seed(seed) random.seed(seed) device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') if args.dataset == 'activity': data_obj = utils.get_activity_data(args, 'cpu') train_loader = data_obj["train_dataloader"] test_loader = data_obj["test_dataloader"] val_loader = data_obj["val_dataloader"] dim = data_obj["input_dim"] if args.enc == 'enc_rnn3': rec = models.enc_rnn3( dim, torch.linspace(0, 1., 50), args.latent_dim, args.rec_hidden, 128, learn_emb=args.learn_emb).to(device) elif args.enc == 'mtan_rnn': rec = models.enc_mtan_rnn( dim, torch.linspace(0, 1., args.num_ref_points), args.latent_dim, args.rec_hidden, embed_time=128, learn_emb=args.learn_emb, num_heads=args.enc_num_heads).to(device) if args.dec == 'rnn3': dec = models.dec_rnn3( dim, torch.linspace(0, 1., 50), args.latent_dim, args.gen_hidden, 128, learn_emb=args.learn_emb).to(device) elif args.dec == 'mtan_rnn': dec = models.dec_mtan_rnn( dim, torch.linspace(0, 1., args.num_ref_points), args.latent_dim, args.gen_hidden, embed_time=128, learn_emb=args.learn_emb, num_heads=args.dec_num_heads).to(device) classifier = nn.Sequential( nn.Linear(args.latent_dim, 300), nn.ReLU(), nn.Linear(300, 300), nn.ReLU(), nn.Linear(300, 11)) classifier = classifier.to(device) params = (list(rec.parameters()) + list(dec.parameters()) + list(classifier.parameters())) print('parameters:', utils.count_parameters(rec), utils.count_parameters(dec), utils.count_parameters(classifier)) optimizer = optim.Adam(params, lr=args.lr) criterion = nn.CrossEntropyLoss() best_val_loss = float('inf') total_time = 0. for itr in range(1, args.niters + 1): train_recon_loss, train_ce_loss = 0, 0 mse = 0 train_n = 0 train_acc = 0 #avg_reconst, avg_kl, mse = 0, 0, 0 if args.kl: wait_until_kl_inc = 10 if itr < wait_until_kl_inc: kl_coef = 0. else: kl_coef = (1-0.99** (itr - wait_until_kl_inc)) else: kl_coef = 1 start_time = time.time() for train_batch, label in train_loader: train_batch, label = train_batch.to(device), label.to(device) batch_len = train_batch.shape[0] observed_data, observed_mask, observed_tp \ = train_batch[:, :, :dim], train_batch[:, :, dim:2*dim], train_batch[:, :, -1] out = rec(torch.cat((observed_data, observed_mask), 2), observed_tp) qz0_mean, qz0_logvar = out[:, :, :args.latent_dim], out[:, :, args.latent_dim:] epsilon = torch.randn(args.k_iwae, qz0_mean.shape[0], qz0_mean.shape[1], qz0_mean.shape[2]).to(device) z0 = epsilon * torch.exp(.5 * qz0_logvar) + qz0_mean z0 = z0.view(-1, qz0_mean.shape[1], qz0_mean.shape[2]) print(z0.shape) pred_y = classifier(z0) print(pred_y.shape) # compute loss if args.classify_pertp: N = label.size(-1) out = pred_y.view(-1, N) label = label.view(-1, N) _, label = label.max(-1) ce_loss = criterion(out, label.long()) else: loss = criterion(out, label) loss = ce_loss optimizer.zero_grad() loss.backward() optimizer.step() train_ce_loss += ce_loss.item() * batch_len train_acc += torch.mean((out.argmax(1) == label).float()).item() * batch_len train_n += batch_len total_time += time.time() - start_time val_loss, val_acc, val_auc = utils.evaluate_classifier( rec, val_loader, dec=dec, args=args, classifier=classifier, reconst=True, num_sample=1, dim=dim) if val_loss <= best_val_loss: best_val_loss = min(best_val_loss, val_loss) rec_state_dict = rec.state_dict() dec_state_dict = dec.state_dict() classifier_state_dict = classifier.state_dict() optimizer_state_dict = optimizer.state_dict() test_loss, test_acc, test_auc = utils.evaluate_classifier( rec, test_loader, dec=dec, args=args, classifier=classifier, reconst=True, num_sample=1, dim=dim) print('Iter: {}, recon_loss: {:.4f}, ce_loss: {:.4f}, acc: {:.4f}, mse: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}, test_acc: {:.4f}, test_auc: {:.4f}' .format(itr, train_recon_loss/train_n, train_ce_loss/train_n, train_acc/train_n, mse/train_n, val_loss, val_acc, test_acc, test_auc)) if itr % 100 == 0 and args.save: torch.save({ 'args': args, 'epoch': itr, 'rec_state_dict': rec_state_dict, 'dec_state_dict': dec_state_dict, 'optimizer_state_dict': optimizer_state_dict, 'classifier_state_dict': classifier_state_dict, 'loss': -loss, }, args.dataset + '_' + args.enc + '_' + args.dec + '_' + str(experiment_id) + '.h5') print(best_val_loss) print(total_time)
[ "utils.get_activity_data", "numpy.random.seed", "argparse.ArgumentParser", "torch.cat", "torch.randn", "torch.exp", "random.seed", "torch.nn.Linear", "torch.manual_seed", "torch.cuda.manual_seed", "torch.optim.Adam", "torch.cuda.is_available", "utils.count_parameters", "torch.nn.ReLU", "random.SystemRandom", "utils.evaluate_classifier", "torch.nn.CrossEntropyLoss", "time.time", "torch.linspace" ]
[((245, 270), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (268, 270), False, 'import argparse\n'), ((1963, 1986), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1980, 1986), False, 'import torch\n'), ((1991, 2011), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2005, 2011), True, 'import numpy as np\n'), ((2016, 2044), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (2038, 2044), False, 'import torch\n'), ((2049, 2066), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2060, 2066), False, 'import random\n'), ((3768, 3798), 'torch.optim.Adam', 'optim.Adam', (['params'], {'lr': 'args.lr'}), '(params, lr=args.lr)\n', (3778, 3798), True, 'import torch.optim as optim\n'), ((3815, 3836), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3834, 3836), True, 'import torch.nn as nn\n'), ((2209, 2245), 'utils.get_activity_data', 'utils.get_activity_data', (['args', '"""cpu"""'], {}), "(args, 'cpu')\n", (2232, 2245), False, 'import utils\n'), ((3355, 3386), 'torch.nn.Linear', 'nn.Linear', (['args.latent_dim', '(300)'], {}), '(args.latent_dim, 300)\n', (3364, 3386), True, 'import torch.nn as nn\n'), ((3400, 3409), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3407, 3409), True, 'import torch.nn as nn\n'), ((3423, 3442), 'torch.nn.Linear', 'nn.Linear', (['(300)', '(300)'], {}), '(300, 300)\n', (3432, 3442), True, 'import torch.nn as nn\n'), ((3456, 3465), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3463, 3465), True, 'import torch.nn as nn\n'), ((3479, 3497), 'torch.nn.Linear', 'nn.Linear', (['(300)', '(11)'], {}), '(300, 11)\n', (3488, 3497), True, 'import torch.nn as nn\n'), ((3658, 3685), 'utils.count_parameters', 'utils.count_parameters', (['rec'], {}), '(rec)\n', (3680, 3685), False, 'import utils\n'), ((3687, 3714), 'utils.count_parameters', 'utils.count_parameters', (['dec'], {}), '(dec)\n', (3709, 3714), False, 'import utils\n'), ((3716, 3750), 'utils.count_parameters', 'utils.count_parameters', (['classifier'], {}), '(classifier)\n', (3738, 3750), False, 'import utils\n'), ((4350, 4361), 'time.time', 'time.time', ([], {}), '()\n', (4359, 4361), False, 'import time\n'), ((5949, 6076), 'utils.evaluate_classifier', 'utils.evaluate_classifier', (['rec', 'val_loader'], {'dec': 'dec', 'args': 'args', 'classifier': 'classifier', 'reconst': '(True)', 'num_sample': '(1)', 'dim': 'dim'}), '(rec, val_loader, dec=dec, args=args, classifier=\n classifier, reconst=True, num_sample=1, dim=dim)\n', (5974, 6076), False, 'import utils\n'), ((6430, 6558), 'utils.evaluate_classifier', 'utils.evaluate_classifier', (['rec', 'test_loader'], {'dec': 'dec', 'args': 'args', 'classifier': 'classifier', 'reconst': '(True)', 'num_sample': '(1)', 'dim': 'dim'}), '(rec, test_loader, dec=dec, args=args, classifier=\n classifier, reconst=True, num_sample=1, dim=dim)\n', (6455, 6558), False, 'import utils\n'), ((2112, 2137), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2135, 2137), False, 'import torch\n'), ((5887, 5898), 'time.time', 'time.time', ([], {}), '()\n', (5896, 5898), False, 'import time\n'), ((4703, 4747), 'torch.cat', 'torch.cat', (['(observed_data, observed_mask)', '(2)'], {}), '((observed_data, observed_mask), 2)\n', (4712, 4747), False, 'import torch\n'), ((1875, 1889), 'random.SystemRandom', 'SystemRandom', ([], {}), '()\n', (1887, 1889), False, 'from random import SystemRandom\n'), ((2504, 2530), 'torch.linspace', 'torch.linspace', (['(0)', '(1.0)', '(50)'], {}), '(0, 1.0, 50)\n', (2518, 2530), False, 'import torch\n'), ((2949, 2975), 'torch.linspace', 'torch.linspace', (['(0)', '(1.0)', '(50)'], {}), '(0, 1.0, 50)\n', (2963, 2975), False, 'import torch\n'), ((4876, 4962), 'torch.randn', 'torch.randn', (['args.k_iwae', 'qz0_mean.shape[0]', 'qz0_mean.shape[1]', 'qz0_mean.shape[2]'], {}), '(args.k_iwae, qz0_mean.shape[0], qz0_mean.shape[1], qz0_mean.\n shape[2])\n', (4887, 4962), False, 'import torch\n'), ((4996, 5023), 'torch.exp', 'torch.exp', (['(0.5 * qz0_logvar)'], {}), '(0.5 * qz0_logvar)\n', (5005, 5023), False, 'import torch\n'), ((2692, 2735), 'torch.linspace', 'torch.linspace', (['(0)', '(1.0)', 'args.num_ref_points'], {}), '(0, 1.0, args.num_ref_points)\n', (2706, 2735), False, 'import torch\n'), ((3137, 3180), 'torch.linspace', 'torch.linspace', (['(0)', '(1.0)', 'args.num_ref_points'], {}), '(0, 1.0, args.num_ref_points)\n', (3151, 3180), False, 'import torch\n')]
from socialauth.models import AuthMeta, OpenidProfile, TwitterUserProfile, \ FacebookUserProfile, LinkedInUserProfile, GithubUserProfile, FoursquareUserProfile from django.contrib import admin admin.site.register(AuthMeta) admin.site.register(OpenidProfile) admin.site.register(TwitterUserProfile) admin.site.register(FacebookUserProfile) admin.site.register(LinkedInUserProfile) admin.site.register(GithubUserProfile) admin.site.register(FoursquareUserProfile)
[ "django.contrib.admin.site.register" ]
[((195, 224), 'django.contrib.admin.site.register', 'admin.site.register', (['AuthMeta'], {}), '(AuthMeta)\n', (214, 224), False, 'from django.contrib import admin\n'), ((225, 259), 'django.contrib.admin.site.register', 'admin.site.register', (['OpenidProfile'], {}), '(OpenidProfile)\n', (244, 259), False, 'from django.contrib import admin\n'), ((260, 299), 'django.contrib.admin.site.register', 'admin.site.register', (['TwitterUserProfile'], {}), '(TwitterUserProfile)\n', (279, 299), False, 'from django.contrib import admin\n'), ((300, 340), 'django.contrib.admin.site.register', 'admin.site.register', (['FacebookUserProfile'], {}), '(FacebookUserProfile)\n', (319, 340), False, 'from django.contrib import admin\n'), ((341, 381), 'django.contrib.admin.site.register', 'admin.site.register', (['LinkedInUserProfile'], {}), '(LinkedInUserProfile)\n', (360, 381), False, 'from django.contrib import admin\n'), ((382, 420), 'django.contrib.admin.site.register', 'admin.site.register', (['GithubUserProfile'], {}), '(GithubUserProfile)\n', (401, 420), False, 'from django.contrib import admin\n'), ((421, 463), 'django.contrib.admin.site.register', 'admin.site.register', (['FoursquareUserProfile'], {}), '(FoursquareUserProfile)\n', (440, 463), False, 'from django.contrib import admin\n')]
__author__ = 'Arseniy' from model.project import Project import string import random def random_name(prefix, maxlen): symbols = string.ascii_letters return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))]) def test_add_project(app): app.session.login("administrator", "root") project = Project(name=random_name("proj_", 10)) if project in app.soap.get_project_list(): app.project.delete(project) old_projects = app.soap.get_project_list() app.project.create(project) new_projects = app.soap.get_project_list() assert len(old_projects) + 1 == len(new_projects) old_projects.append(project) assert sorted(old_projects, key=Project.name) == sorted(new_projects, key=Project.name)
[ "random.randrange", "random.choice" ]
[((184, 206), 'random.choice', 'random.choice', (['symbols'], {}), '(symbols)\n', (197, 206), False, 'import random\n'), ((222, 246), 'random.randrange', 'random.randrange', (['maxlen'], {}), '(maxlen)\n', (238, 246), False, 'import random\n')]
import pytest from .solution import solve, Vent INPUT = """0,9 -> 5,9 8,0 -> 0,8 9,4 -> 3,4 2,2 -> 2,1 7,0 -> 7,4 6,4 -> 2,0 0,9 -> 2,9 3,4 -> 1,4 0,0 -> 8,8 5,5 -> 8,2 """ POINTS = [ (1, 1, 3, 3, [(1,1), (2, 2), (3, 3)]), (3, 3, 1, 1, [(1,1), (2, 2), (3, 3)]), (5, 5, 8, 2, [(5,5), (6, 4), (7, 3), (8, 2)]), ] @pytest.mark.parametrize('x1, y1, x2, y2, points', POINTS) def test_points(x1, y1, x2, y2, points): v = Vent(x1, y1, x2, y2) assert list(sorted(v.get_points())) == points def test_solve(): assert solve(INPUT, diag=False) == 5 def test_solve2(): assert solve(INPUT, diag=True) == 12
[ "pytest.mark.parametrize" ]
[((328, 385), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x1, y1, x2, y2, points"""', 'POINTS'], {}), "('x1, y1, x2, y2, points', POINTS)\n", (351, 385), False, 'import pytest\n')]
from django import forms from django.contrib.auth import authenticate from django.utils.encoding import smart_text from django.utils.translation import ugettext as _ from .. import scope from ..constants import RESPONSE_TYPE_CHOICES, SCOPES from ..forms import OAuthForm, OAuthValidationError from ..scope import SCOPE_NAMES from ..utils import now from .models import Client, Grant, RefreshToken class ClientForm(forms.ModelForm): """ Form to create new consumers. """ class Meta: model = Client fields = ('name', 'url', 'redirect_uri', 'client_type') def save(self, user=None, **kwargs): self.instance.user = user return super(ClientForm, self).save(**kwargs) class ClientAuthForm(forms.Form): """ Client authentication form. Required to make sure that we're dealing with a real client. Form is used in :attr:`provider.oauth2.backends` to validate the client. """ client_id = forms.CharField() client_secret = forms.CharField() def clean(self): data = self.cleaned_data try: client = Client.objects.get(client_id=data.get('client_id'), client_secret=data.get('client_secret')) except Client.DoesNotExist: raise forms.ValidationError(_("Client could not be validated with " "key pair.")) data['client'] = client return data class ScopeChoiceField(forms.ChoiceField): """ Custom form field that seperates values on space as defined in :rfc:`3.3`. """ widget = forms.SelectMultiple def to_python(self, value): if not value: return [] # New in Django 1.6: value may come in as a string. # Instead of raising an `OAuthValidationError`, try to parse and # ultimately return an empty list if nothing remains -- this will # eventually raise an `OAuthValidationError` in `validate` where # it should be anyways. if not isinstance(value, (list, tuple)): value = value.split(' ') # Split values into list return ' '.join([smart_text(val) for val in value]).split(' ') def validate(self, value): """ Validates that the input is a list or tuple. """ if self.required and not value: raise OAuthValidationError({'error': 'invalid_request'}) # Validate that each value in the value list is in self.choices. for val in value: if not self.valid_value(val): raise OAuthValidationError({ 'error': 'invalid_request', 'error_description': _("'%s' is not a valid scope.") % \ val}) class ScopeMixin(object): """ Form mixin to clean scope fields. """ def clean_scope(self): """ The scope is assembled by combining all the set flags into a single integer value which we can later check again for set bits. If *no* scope is set, we return the default scope which is the first defined scope in :attr:`provider.constants.SCOPES`. """ default = SCOPES[0][0] flags = self.cleaned_data.get('scope', []) return scope.to_int(default=default, *flags) class AuthorizationRequestForm(ScopeMixin, OAuthForm): """ This form is used to validate the request data that the authorization endpoint receives from clients. Included data is specified in :rfc:`4.1.1`. """ # Setting all required fields to false to explicitly check by hand # and use custom error messages that can be reused in the OAuth2 # protocol response_type = forms.CharField(required=False) """ ``"code"`` or ``"token"`` depending on the grant type. """ redirect_uri = forms.URLField(required=False) """ Where the client would like to redirect the user back to. This has to match whatever value was saved while creating the client. """ state = forms.CharField(required=False) """ Opaque - just pass back to the client for validation. """ scope = ScopeChoiceField(choices=SCOPE_NAMES, required=False) """ The scope that the authorization should include. """ def clean_response_type(self): """ :rfc:`3.1.1` Lists of values are space delimited. """ response_type = self.cleaned_data.get('response_type') if not response_type: raise OAuthValidationError({'error': 'invalid_request', 'error_description': "No 'response_type' supplied."}) types = response_type.split(" ") for type in types: if type not in RESPONSE_TYPE_CHOICES: raise OAuthValidationError({ 'error': 'unsupported_response_type', 'error_description': "'%s' is not a supported response " "type." % type}) return response_type def clean_redirect_uri(self): """ :rfc:`3.1.2` The redirect value has to match what was saved on the authorization server. """ redirect_uri = self.cleaned_data.get('redirect_uri') if redirect_uri: if not redirect_uri == self.client.redirect_uri: raise OAuthValidationError({ 'error': 'invalid_request', 'error_description': _("The requested redirect didn't " "match the client settings.")}) return redirect_uri class AuthorizationForm(ScopeMixin, OAuthForm): """ A form used to ask the resource owner for authorization of a given client. """ authorize = forms.BooleanField(required=False) scope = ScopeChoiceField(choices=SCOPE_NAMES, required=False) def save(self, **kwargs): authorize = self.cleaned_data.get('authorize') if not authorize: return None grant = Grant() grant.scope = self.cleaned_data.get('scope') return grant class RefreshTokenGrantForm(ScopeMixin, OAuthForm): """ Checks and returns a refresh token. """ refresh_token = forms.CharField(required=False) scope = ScopeChoiceField(choices=SCOPE_NAMES, required=False) def clean_refresh_token(self): token = self.cleaned_data.get('refresh_token') if not token: raise OAuthValidationError({'error': 'invalid_request'}) try: token = RefreshToken.objects.get(token=token, expired=False, client=self.client) except RefreshToken.DoesNotExist: raise OAuthValidationError({'error': 'invalid_grant'}) return token def clean(self): """ Make sure that the scope is less or equal to the previous scope! """ data = self.cleaned_data want_scope = data.get('scope') or 0 refresh_token = data.get('refresh_token') access_token = getattr(refresh_token, 'access_token', None) if \ refresh_token else \ None has_scope = access_token.scope if access_token else 0 # Only check if we've actually got a scope in the data # (read: All fields have been cleaned) if want_scope is not 0 and not scope.check(want_scope, has_scope): raise OAuthValidationError({'error': 'invalid_scope'}) return data class AuthorizationCodeGrantForm(ScopeMixin, OAuthForm): """ Check and return an authorization grant. """ code = forms.CharField(required=False) scope = ScopeChoiceField(choices=SCOPE_NAMES, required=False) def clean_code(self): code = self.cleaned_data.get('code') if not code: raise OAuthValidationError({'error': 'invalid_request'}) try: self.cleaned_data['grant'] = Grant.objects.get( code=code, client=self.client, expires__gt=now()) except Grant.DoesNotExist: raise OAuthValidationError({'error': 'invalid_grant'}) return code def clean(self): """ Make sure that the scope is less or equal to the scope allowed on the grant! """ data = self.cleaned_data want_scope = data.get('scope') or 0 grant = data.get('grant') has_scope = grant.scope if grant else 0 # Only check if we've actually got a scope in the data # (read: All fields have been cleaned) if want_scope is not 0 and not scope.check(want_scope, has_scope): raise OAuthValidationError({'error': 'invalid_scope'}) return data class PasswordGrantForm(ScopeMixin, OAuthForm): """ Validate the password of a user on a password grant request. """ username = forms.CharField(required=False) password = forms.CharField(required=False) scope = ScopeChoiceField(choices=SCOPE_NAMES, required=False) def clean_username(self): username = self.cleaned_data.get('username') if not username: raise OAuthValidationError({'error': 'invalid_request'}) return username def clean_password(self): password = self.cleaned_data.get('password') if not password: raise OAuthValidationError({'error': 'invalid_request'}) return password def clean(self): data = self.cleaned_data user = authenticate(username=data.get('username'), password=data.get('password')) if user is None: raise OAuthValidationError({'error': 'invalid_grant'}) data['user'] = user return data class PublicPasswordGrantForm(PasswordGrantForm): client_id = forms.CharField(required=True) grant_type = forms.CharField(required=True) def clean_grant_type(self): grant_type = self.cleaned_data.get('grant_type') if grant_type != 'password': raise OAuthValidationError({'error': 'invalid_grant'}) return grant_type def clean(self): data = super(PublicPasswordGrantForm, self).clean() try: client = Client.objects.get(client_id=data.get('client_id')) except Client.DoesNotExist: raise OAuthValidationError({'error': 'invalid_client'}) if client.client_type != 1: # public raise OAuthValidationError({'error': 'invalid_client'}) data['client'] = client return data
[ "django.forms.BooleanField", "django.forms.URLField", "django.utils.translation.ugettext", "django.forms.CharField", "django.utils.encoding.smart_text" ]
[((959, 976), 'django.forms.CharField', 'forms.CharField', ([], {}), '()\n', (974, 976), False, 'from django import forms\n'), ((997, 1014), 'django.forms.CharField', 'forms.CharField', ([], {}), '()\n', (1012, 1014), False, 'from django import forms\n'), ((3694, 3725), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(False)'}), '(required=False)\n', (3709, 3725), False, 'from django import forms\n'), ((3821, 3851), 'django.forms.URLField', 'forms.URLField', ([], {'required': '(False)'}), '(required=False)\n', (3835, 3851), False, 'from django import forms\n'), ((4021, 4052), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(False)'}), '(required=False)\n', (4036, 4052), False, 'from django import forms\n'), ((5715, 5749), 'django.forms.BooleanField', 'forms.BooleanField', ([], {'required': '(False)'}), '(required=False)\n', (5733, 5749), False, 'from django import forms\n'), ((6182, 6213), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(False)'}), '(required=False)\n', (6197, 6213), False, 'from django import forms\n'), ((7553, 7584), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(False)'}), '(required=False)\n', (7568, 7584), False, 'from django import forms\n'), ((8795, 8826), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(False)'}), '(required=False)\n', (8810, 8826), False, 'from django import forms\n'), ((8842, 8873), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(False)'}), '(required=False)\n', (8857, 8873), False, 'from django import forms\n'), ((9716, 9746), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(True)'}), '(required=True)\n', (9731, 9746), False, 'from django import forms\n'), ((9764, 9794), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(True)'}), '(required=True)\n', (9779, 9794), False, 'from django import forms\n'), ((1289, 1338), 'django.utils.translation.ugettext', '_', (['"""Client could not be validated with key pair."""'], {}), "('Client could not be validated with key pair.')\n", (1290, 1338), True, 'from django.utils.translation import ugettext as _\n'), ((2125, 2140), 'django.utils.encoding.smart_text', 'smart_text', (['val'], {}), '(val)\n', (2135, 2140), False, 'from django.utils.encoding import smart_text\n'), ((5434, 5495), 'django.utils.translation.ugettext', '_', (['"""The requested redirect didn\'t match the client settings."""'], {}), '("The requested redirect didn\'t match the client settings.")\n', (5435, 5495), True, 'from django.utils.translation import ugettext as _\n'), ((2665, 2696), 'django.utils.translation.ugettext', '_', (['"""\'%s\' is not a valid scope."""'], {}), '("\'%s\' is not a valid scope.")\n', (2666, 2696), True, 'from django.utils.translation import ugettext as _\n')]
"""Gaussian process-based minimization algorithms.""" import numpy as np from sklearn.utils import check_random_state from .base import base_minimize from ..utils import cook_estimator from ..utils import normalize_dimensions def gp_minimize(func, dimensions, base_estimator=None, n_calls=100, n_random_starts=10, acq_func="gp_hedge", acq_optimizer="auto", x0=None, y0=None, random_state=None, verbose=False, callback=None, n_points=10000, n_restarts_optimizer=5, xi=0.01, kappa=1.96, noise="gaussian", n_jobs=1, model_queue_size=None): """Bayesian optimization using Gaussian Processes. If every function evaluation is expensive, for instance when the parameters are the hyperparameters of a neural network and the function evaluation is the mean cross-validation score across ten folds, optimizing the hyperparameters by standard optimization routines would take for ever! The idea is to approximate the function using a Gaussian process. In other words the function values are assumed to follow a multivariate gaussian. The covariance of the function values are given by a GP kernel between the parameters. Then a smart choice to choose the next parameter to evaluate can be made by the acquisition function over the Gaussian prior which is much quicker to evaluate. The total number of evaluations, `n_calls`, are performed like the following. If `x0` is provided but not `y0`, then the elements of `x0` are first evaluated, followed by `n_random_starts` evaluations. Finally, `n_calls - len(x0) - n_random_starts` evaluations are made guided by the surrogate model. If `x0` and `y0` are both provided then `n_random_starts` evaluations are first made then `n_calls - n_random_starts` subsequent evaluations are made guided by the surrogate model. Parameters ---------- func : callable Function to minimize. Should take a single list of parameters and return the objective value. If you have a search-space where all dimensions have names, then you can use :func:`skopt.utils.use_named_args` as a decorator on your objective function, in order to call it directly with the named arguments. See `use_named_args` for an example. dimensions : [list, shape (n_dims,) List of search space dimensions. Each search dimension can be defined either as - a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer` dimensions), - a `(lower_bound, upper_bound, "prior")` tuple (for `Real` dimensions), - as a list of categories (for `Categorical` dimensions), or - an instance of a `Dimension` object (`Real`, `Integer` or `Categorical`). .. note:: The upper and lower bounds are inclusive for `Integer` dimensions. base_estimator : a Gaussian process estimator The Gaussian process estimator to use for optimization. By default, a Matern kernel is used with the following hyperparameters tuned. - All the length scales of the Matern kernel. - The covariance amplitude that each element is multiplied with. - Noise that is added to the matern kernel. The noise is assumed to be iid gaussian. n_calls : int, default=100 Number of calls to `func`. n_random_starts : int, default=10 Number of evaluations of `func` with random points before approximating it with `base_estimator`. acq_func : string, default=`"gp_hedge"` Function to minimize over the gaussian prior. Can be either - `"LCB"` for lower confidence bound. - `"EI"` for negative expected improvement. - `"PI"` for negative probability of improvement. - `"gp_hedge"` Probabilistically choose one of the above three acquisition functions at every iteration. The weightage given to these gains can be set by :math:`\eta` through `acq_func_kwargs`. - The gains `g_i` are initialized to zero. - At every iteration, - Each acquisition function is optimised independently to propose an candidate point `X_i`. - Out of all these candidate points, the next point `X_best` is chosen by :math:`softmax(\eta g_i)` - After fitting the surrogate model with `(X_best, y_best)`, the gains are updated such that :math:`g_i -= \mu(X_i)` - `"EIps"` for negated expected improvement per second to take into account the function compute time. Then, the objective function is assumed to return two values, the first being the objective value and the second being the time taken in seconds. - `"PIps"` for negated probability of improvement per second. The return type of the objective function is assumed to be similar to that of `"EIps acq_optimizer : string, `"sampling"` or `"lbfgs"`, default=`"lbfgs"` Method to minimize the acquistion function. The fit model is updated with the optimal value obtained by optimizing `acq_func` with `acq_optimizer`. The `acq_func` is computed at `n_points` sampled randomly. - If set to `"auto"`, then `acq_optimizer` is configured on the basis of the space searched over. If the space is Categorical then this is set to be "sampling"`. - If set to `"sampling"`, then the point among these `n_points` where the `acq_func` is minimum is the next candidate minimum. - If set to `"lbfgs"`, then - The `n_restarts_optimizer` no. of points which the acquisition function is least are taken as start points. - `"lbfgs"` is run for 20 iterations with these points as initial points to find local minima. - The optimal of these local minima is used to update the prior. x0 : list, list of lists or `None` Initial input points. - If it is a list of lists, use it as a list of input points. - If it is a list, use it as a single initial input point. - If it is `None`, no initial input points are used. y0 : list, scalar or `None` Evaluation of initial input points. - If it is a list, then it corresponds to evaluations of the function at each element of `x0` : the i-th element of `y0` corresponds to the function evaluated at the i-th element of `x0`. - If it is a scalar, then it corresponds to the evaluation of the function at `x0`. - If it is None and `x0` is provided, then the function is evaluated at each element of `x0`. random_state : int, RandomState instance, or None (default) Set random state to something other than None for reproducible results. verbose : boolean, default=False Control the verbosity. It is advised to set the verbosity to True for long optimization runs. callback : callable, list of callables, optional If callable then `callback(res)` is called after each call to `func`. If list of callables, then each callable in the list is called. n_points : int, default=10000 Number of points to sample to determine the next "best" point. Useless if acq_optimizer is set to `"lbfgs"`. n_restarts_optimizer : int, default=5 The number of restarts of the optimizer when `acq_optimizer` is `"lbfgs"`. kappa : float, default=1.96 Controls how much of the variance in the predicted values should be taken into account. If set to be very high, then we are favouring exploration over exploitation and vice versa. Used when the acquisition is `"LCB"`. xi : float, default=0.01 Controls how much improvement one wants over the previous best values. Used when the acquisition is either `"EI"` or `"PI"`. noise : float, default="gaussian" - Use noise="gaussian" if the objective returns noisy observations. The noise of each observation is assumed to be iid with mean zero and a fixed variance. - If the variance is known before-hand, this can be set directly to the variance of the noise. - Set this to a value close to zero (1e-10) if the function is noise-free. Setting to zero might cause stability issues. n_jobs : int, default=1 Number of cores to run in parallel while running the lbfgs optimizations over the acquisition function. Valid only when `acq_optimizer` is set to "lbfgs." Defaults to 1 core. If `n_jobs=-1`, then number of jobs is set to number of cores. model_queue_size : int or None, default=None Keeps list of models only as long as the argument given. In the case of None, the list has no capped length. Returns ------- res : `OptimizeResult`, scipy object The optimization result returned as a OptimizeResult object. Important attributes are: - `x` [list]: location of the minimum. - `fun` [float]: function value at the minimum. - `models`: surrogate models used for each iteration. - `x_iters` [list of lists]: location of function evaluation for each iteration. - `func_vals` [array]: function value for each iteration. - `space` [Space]: the optimization space. - `specs` [dict]`: the call specifications. - `rng` [RandomState instance]: State of the random state at the end of minimization. For more details related to the OptimizeResult object, refer http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html .. seealso:: functions :class:`skopt.forest_minimize`, :class:`skopt.dummy_minimize` """ # Check params rng = check_random_state(random_state) space = normalize_dimensions(dimensions) if base_estimator is None: base_estimator = cook_estimator( "GP", space=space, random_state=rng.randint(0, np.iinfo(np.int32).max), noise=noise) return base_minimize( func, space, base_estimator=base_estimator, acq_func=acq_func, xi=xi, kappa=kappa, acq_optimizer=acq_optimizer, n_calls=n_calls, n_points=n_points, n_random_starts=n_random_starts, n_restarts_optimizer=n_restarts_optimizer, x0=x0, y0=y0, random_state=rng, verbose=verbose, callback=callback, n_jobs=n_jobs, model_queue_size=model_queue_size)
[ "sklearn.utils.check_random_state", "numpy.iinfo" ]
[((10132, 10164), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (10150, 10164), False, 'from sklearn.utils import check_random_state\n'), ((10342, 10360), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (10350, 10360), True, 'import numpy as np\n')]
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # #     https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .laikago_v2 import LaikagoBulletV2 from pybullet_utils import bullet_client import pybullet import time import gym, gym.utils.seeding, gym.spaces import numpy as np import math import torch from gan import utils from collections import deque import os import inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) # this is called V4 because it shares same obs space with Laikago env V4 # the robot is still Laikago V2 though, same as env V4 class LaikagoConFEnvV4(gym.Env): metadata = {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 50} def __init__(self, render=True, init_noise=True, act_noise=False, obs_noise=False, control_skip=10, max_tar_vel=2.5, energy_weight=0.1, jl_weight=0.5, ab=5.0, q_pen_weight=0.4, dq_pen_weight=0.001, vel_r_weight=4.0, train_dyn=True, # if false, fix dyn and train motor policy pretrain_dyn=False, # pre-train with deviation to sim enlarge_act_range=0.0, # make behavior pi more diverse to match collection, only train_dyn behavior_dir="trained_models_laika_bullet_61/ppo", behavior_env_name="LaikagoBulletEnv-v4", behavior_iter=None, dyn_dir="", dyn_env_name="LaikagoConFEnv-v4", dyn_iter=None, cuda_env=True, ): self.render = render self.init_noise = init_noise self.obs_noise = obs_noise self.act_noise = act_noise self.control_skip = int(control_skip) self._ts = 1. / 500. self.max_tar_vel = max_tar_vel self.energy_weight = energy_weight self.jl_weight = jl_weight self.ab = ab self.q_pen_weight = q_pen_weight self.dq_pen_weight = dq_pen_weight self.vel_r_weight = vel_r_weight self.train_dyn = train_dyn self.enlarge_act_range = enlarge_act_range self.pretrain_dyn = pretrain_dyn self.cuda_env = cuda_env self.ratio = None if self.render: self._p = bullet_client.BulletClient(connection_mode=pybullet.GUI) else: self._p = bullet_client.BulletClient() self.np_random = None self.robot = LaikagoBulletV2(init_noise=self.init_noise, time_step=self._ts, np_random=self.np_random) self.seed(0) # used once temporarily, will be overwritten outside though superclass api self.viewer = None self.timer = 0 self.behavior_past_obs_t_idx = [0, 4, 8] # self.behavior_past_obs_t_idx = [0] # t-3. t-6. t-9 (B does not take past a) self.generator_past_obs_t_idx = [0, 2] self.generator_past_act_t_idx = [0] self.past_obs_array = deque(maxlen=10) self.past_bact_array = deque(maxlen=10) # only need to store past behavior action if self.train_dyn: if behavior_iter: behavior_iter = int(behavior_iter) self.dyn_actor_critic = None # load fixed behavior policy self.go_actor_critic, _, \ self.recurrent_hidden_states, \ self.masks = utils.load( behavior_dir, behavior_env_name, self.cuda_env, behavior_iter ) else: if dyn_iter: dyn_iter = int(dyn_iter) # train motor policy self.go_actor_critic = None # load fixed dynamics model self.dyn_actor_critic, _, \ self.recurrent_hidden_states, \ self.masks = utils.load( dyn_dir, dyn_env_name, self.cuda_env, dyn_iter ) # # self.discri = utils.load_gail_discriminator(dyn_dir, # dyn_env_name, # self.cuda_env, # dyn_iter) # # self.feat_select_func = self.robot.feature_selection_all_laika self.reset_const = 100 self.reset_counter = self.reset_const # do a hard reset first # self.action_dim = 12 self.init_state = None obs = self.reset() # # self.d_scores = [] # # set up imaginary session for pre-train # self.set_up_imaginary_session() if self.train_dyn: self.action_dim = 12 # 12D action scales, see beginning of step() for comment else: self.action_dim = len(self.robot.ctrl_dofs) self.act = [0.0] * self.action_dim self.action_space = gym.spaces.Box(low=np.array([-1.] * self.action_dim), high=np.array([+1.] * self.action_dim)) obs_dummy = np.array([1.12234567] * len(obs)) self.observation_space = gym.spaces.Box(low=-np.inf * obs_dummy, high=np.inf * obs_dummy) def reset(self): if self.reset_counter < self.reset_const: self.reset_counter += 1 self._p.restoreState(self.init_state) self.robot.soft_reset(self._p) else: self.reset_counter = 0 self._p.resetSimulation() self._p.setTimeStep(self._ts) self._p.setGravity(0, 0, -10) self._p.setPhysicsEngineParameter(numSolverIterations=100) # self._p.setPhysicsEngineParameter(restitutionVelocityThreshold=0.000001) self.floor_id = self._p.loadURDF(os.path.join(currentdir, 'assets/plane.urdf'), [0, 0, 0.0], useFixedBase=1) # conf policy does not use bullet collision self._p.setCollisionFilterGroupMask(self.floor_id, -1, 0, 0) self.robot.reset(self._p) self.init_state = self._p.saveState() self._p.stepSimulation() self.timer = 0 self.past_obs_array.clear() self.past_bact_array.clear() # self.d_scores = [] obs = self.get_extended_observation() # self.ratios = np.array([[]]).reshape(0, self.action_dim) return obs # def set_up_imaginary_session(self): # # create another bullet session to run reset & rollout # self._imaginary_p = bullet_client.BulletClient() # self._imaginary_robot = LaikagoBulletV2(init_noise=self.init_noise, # time_step=self._ts, # np_random=self.np_random) # # self._imaginary_p.resetSimulation() # self._imaginary_p.setTimeStep(self._ts) # self._imaginary_p.setGravity(0, 0, -10) # self._imaginary_p.setPhysicsEngineParameter(numSolverIterations=100) # # there is a floor in this session # floor_i = self._imaginary_p.loadURDF(os.path.join(currentdir, 'assets/plane.urdf'), [0, 0, 0.0], useFixedBase=1) # self._imaginary_robot.reset(self._imaginary_p) # # self._imaginary_robot.soft_reset(self._imaginary_p) # # # TODO: change torque limit for this session # # self._imaginary_p.stepSimulation() # def rollout_one_step_imaginary(self): # # and get the obs vector [no tar vel] in sim # assert self.train_dyn # assert self.pretrain_dyn # # # robo_obs = self.obs[:-self.behavior_act_len] # TODO: deprecate behavior_act_len # robo_action = self.obs[-self.behavior_act_len:] # # print(robo_obs, "in img obs") # # print(robo_action, "in img act") # # # robo_state_vec = self._imaginary_robot.transform_obs_to_state(robo_obs) # robo_state_vec = self.robot.get_robot_raw_state_vec() # # self._imaginary_robot.soft_reset_to_state(self._imaginary_p, robo_state_vec) # robo_state_i = self._imaginary_robot.get_robot_raw_state_vec() # # robo_action = np.clip(robo_action, -1.0, 1.0) # should also clip # for _ in range(self.control_skip): # self._imaginary_robot.apply_action(robo_action) # self._imaginary_p.stepSimulation() # # if self.render: # # time.sleep(self._ts * 0.5) # # return self._imaginary_robot.get_robot_observation(), robo_state_i # pre-state_i # def rollout_one_step_imaginary_same_session(self): # # and get the obs vector [no tar vel] in sim # assert self.train_dyn # assert self.pretrain_dyn # # robo_action = self.obs[-self.behavior_act_len:] # # robo_action = np.clip(robo_action, -1.0, 1.0) # should also clip # for _ in range(self.control_skip): # self.robot.apply_action(robo_action) # self._p.stepSimulation() # # return self.robot.get_robot_observation() # def calc_obs_dist_pretrain(self, obs1, obs2): # # TODO quat dist # # print(np.array(obs1)) # # print("2", np.array(obs2)) # # print(np.linalg.norm(np.array(obs1) - np.array(obs2))) # # print(1.5-np.linalg.norm(np.array(obs1[36:]) - np.array(obs2[36:]))) # # return -np.mean(np.abs((np.array(obs1[:36]) - np.array(obs2[:36])) / np.array(obs2[:36]))) * 100 # return 0.4-np.sum(np.abs(np.array(obs1[:36]) - np.array(obs2[:36]))) # obs len 48 # # return 6.0 -np.sum(np.abs(np.array(obs1[3:]) - np.array(obs2[3:]))) \ # # -np.sum(np.abs(np.array(obs1[:6]) - np.array(obs2[:6]))) * 20.0 # obs len 48 def step(self, a): # TODO: currently for laika, env_action is 12D, 4 feet 3D without wrench if self.train_dyn: env_action = a robo_action = self.past_bact_array[0] # after tanh else: robo_action = a robo_action = np.tanh(robo_action) # update past_bact after tanh utils.push_recent_value(self.past_bact_array, robo_action) env_pi_obs = utils.select_and_merge_from_s_a( s_mt=list(self.past_obs_array), a_mt=list(self.past_bact_array), s_idx=self.generator_past_obs_t_idx, a_idx=self.generator_past_act_t_idx ) env_pi_obs_nn = utils.wrap(env_pi_obs, is_cuda=self.cuda_env) with torch.no_grad(): _, env_action_nn, _, self.recurrent_hidden_states = self.dyn_actor_critic.act( env_pi_obs_nn, self.recurrent_hidden_states, self.masks, deterministic=False ) env_action = utils.unwrap(env_action_nn, is_cuda=self.cuda_env) # if self.ratio is None: # self.ratio = np.array([env_action / robo_action]) # else: # self.ratio = np.append(self.ratio, [env_action / robo_action], axis=0) # self.ratios = np.append(self.ratios, [env_action / robo_action], axis=0) # # env_pi_obs_feat = self.feat_select_func(self.obs) # dis_state = np.concatenate((env_pi_obs_feat, robo_action)) # dis_state = utils.wrap(dis_state, is_cuda=self.cuda_env) root_pos, _ = self.robot.get_link_com_xyz_orn(-1) x_0 = root_pos[0] # this is post noise (unseen), different from seen diversify of self.enlarge_act_scale if self.act_noise: robo_action = utils.perturb(robo_action, 0.05, self.np_random) # when call info, should call before sim_step() as in v4 (append s_t+1 later) # info will be used to construct D input outside. past_info = self.construct_past_traj_window() # # TODO # if self.pretrain_dyn: # # self.state_id = self._p.saveState() # self.img_obs, pre_s_i = self.rollout_one_step_imaginary() # takes the old self.obs # # img_obs = self.rollout_one_step_imaginary_same_session() # # self._p.restoreState(self.state_id) # pre_s = self.robot.get_robot_raw_state_vec() # # print(pre_s_i) # # print(pre_s) # assert np.allclose(pre_s, pre_s_i, atol=1e-5) for _ in range(self.control_skip): self.robot.apply_action(robo_action) self.apply_scale_clip_conf_from_pi_new(env_action) self._p.stepSimulation() if self.render: time.sleep(self._ts * 1.0) self.timer += 1 obs_new = self.get_extended_observation() # and update past_obs_array past_info += [self.past_obs_array[0]] # s_t+1 root_pos, _ = self.robot.get_link_com_xyz_orn(-1) x_1 = root_pos[0] self.velx = (x_1 - x_0) / (self.control_skip * self._ts) y_1 = root_pos[1] height = root_pos[2] q, dq = self.robot.get_q_dq(self.robot.ctrl_dofs) # print(np.max(np.abs(dq))) # in_support = self.robot.is_root_com_in_support() if not self.pretrain_dyn: reward = self.ab # alive bonus tar = np.minimum(self.timer / 500, self.max_tar_vel) reward += np.minimum(self.velx, tar) * self.vel_r_weight # print("v", self.velx, "tar", tar) reward += -self.energy_weight * np.square(robo_action).sum() # print("act norm", -self.energy_weight * np.square(a).sum()) pos_mid = 0.5 * (self.robot.ll + self.robot.ul) q_scaled = 2 * (q - pos_mid) / (self.robot.ul - self.robot.ll) joints_at_limit = np.count_nonzero(np.abs(q_scaled) > 0.97) reward += -self.jl_weight * joints_at_limit # print("jl", -self.jl_weight * joints_at_limit) reward += -np.minimum(np.sum(np.square(dq)) * self.dq_pen_weight, 5.0) weight = np.array([2.0, 1.0, 1.0] * 4) reward += -np.minimum(np.sum(np.square(q - self.robot.init_q) * weight) * self.q_pen_weight, 5.0) # print("vel pen", -np.minimum(np.sum(np.abs(dq)) * self.dq_pen_weight, 5.0)) # print("pos pen", -np.minimum(np.sum(np.square(q - self.robot.init_q)) * self.q_pen_weight, 5.0)) y_1 = root_pos[1] reward += -y_1 * 0.5 # print("dev pen", -y_1*0.5) else: # reward = self.calc_obs_dist_pretrain(self.img_obs[:-4], self.obs[:len(self.img_obs[:-4])]) reward = 0 # TODO # print("______") # print(in_support) # print("h", height) # print("dq.", np.abs(dq)) # print((np.abs(dq) < 50).all()) # print("------") # conf policy will not have body-in-contact flag not_done = (np.abs(dq) < 90).all() and (height > 0.2) and (height < 1.0) # not_done = (abs(y_1) < 5.0) and (height > 0.1) and (height < 1.0) and (rpy[2] > 0.1) # not_done = True # # if not not_done: # print(self.ratio.shape) # labels = list("123456789ABC") # data = self.ratio # from matplotlib import pyplot as plt # width = 0.4 # fig, ax = plt.subplots() # for i, l in enumerate(labels): # x = np.ones(data.shape[0]) * i + (np.random.rand(data.shape[0]) * width - width / 2.) # ax.scatter(x, data[:, i], s=25) # median = np.median(data[:, i]) # ax.plot([i - width / 2., i + width / 2.], [median, median], color="k") # # plt.ylim(-5, 5) # ax.set_xticks(range(len(labels))) # ax.set_xticklabels(labels) # plt.show() # self.ratio = None # if not self.train_dyn: # dis_action = self.feat_select_func(self.obs) # dis_action = utils.wrap(dis_action, is_cuda=self.cuda_env) # d_score = self.discri.predict_prob_single_step(dis_state, dis_action) # self.d_scores.append(utils.unwrap(d_score, is_cuda=self.cuda_env)) # # if len(self.d_scores) > 20 and np.mean(self.d_scores[-20:]) < 0.4: # # not_done = False # # if not not_done or self.timer==1000: # # print(np.mean(self.d_scores)) return obs_new, reward, not not_done, {"sas_window": past_info} # def return_imaginary_obs(self): # # mods self.obs # obs_i = np.copy(self.obs) # # obs_i[:len(self.img_obs[:-4])] = self.img_obs[:-4] # obs_i[:len(self.img_obs)] = self.img_obs # return obs_i def apply_scale_clip_conf_from_pi_new(self, con_f): approx_mass = 26.0 max_fz = approx_mass * 9.81 * 2 # 2mg # TODO for foot_ind, link in enumerate(self.robot.feet): this_con_f = np.tanh(con_f[foot_ind * 3: (foot_ind + 1) * 3]) # [-1 ,1] pos, _ = self.robot.get_link_com_xyz_orn(link, fk=1) if pos[2] < 0.01: # first dim represents fz # fz = np.abs(this_con_f[0]) * max_fz fz = (this_con_f[0] + 1) / 2.0 * max_fz else: fz = 0.0 fx = this_con_f[1] * 1.5 * fz fy = this_con_f[2] * 1.5 * fz utils.apply_external_world_force_on_local_point(self.robot.go_id, link, [fx, fy, fz], [0, 0, 0], self._p) def construct_past_traj_window(self): # st, ... st-9, at, ..., at-9 # call this before s_t+1 enters deque # order does not matter as long as it is the same in policy & expert batch # print(list(self.past_obs_array) + list(self.past_act_array)) return list(self.past_obs_array) + list(self.past_bact_array) def get_ave_dx(self): return self.velx def get_dist(self): return self.robot.get_link_com_xyz_orn(-1)[0][0] def get_extended_observation(self): # with vel false cur_state = self.robot.get_robot_observation(with_vel=False) if self.obs_noise: cur_state = utils.perturb(cur_state, 0.1, self.np_random) # then update past obs utils.push_recent_value(self.past_obs_array, cur_state) # then construct behavior obs b_obs_all = utils.select_and_merge_from_s_a( s_mt=list(self.past_obs_array), a_mt=list(self.past_bact_array), s_idx=self.behavior_past_obs_t_idx, a_idx=np.array([]) ) # if train motor, return behavior obs and we are done if not self.train_dyn: return b_obs_all # else, train dyn # rollout b_pi obs_nn = utils.wrap(b_obs_all, is_cuda=self.cuda_env) with torch.no_grad(): _, action_nn, _, self.recurrent_hidden_states = self.go_actor_critic.act( obs_nn, self.recurrent_hidden_states, self.masks, deterministic=False ) b_cur_act = list(utils.unwrap(action_nn, is_cuda=self.cuda_env)) b_cur_act = utils.perturb(b_cur_act, self.enlarge_act_range, self.np_random) b_cur_act = np.tanh(b_cur_act) # Store action after tanh (-1,1) utils.push_recent_value(self.past_bact_array, b_cur_act) # construct G obs from updated past obs&b_act g_obs_all = utils.select_and_merge_from_s_a( s_mt=list(self.past_obs_array), a_mt=list(self.past_bact_array), s_idx=self.generator_past_obs_t_idx, a_idx=self.generator_past_act_t_idx ) return g_obs_all def seed(self, seed=None): self.np_random, seed = gym.utils.seeding.np_random(seed) self.robot.np_random = self.np_random # use the same np_randomizer for robot as for env return [seed] def getSourceCode(self): s = inspect.getsource(type(self)) s = s + inspect.getsource(type(self.robot)) return s def cam_track_torso_link(self): distance = 2 yaw = 0 root_pos, _ = self.robot.get_link_com_xyz_orn(-1) distance -= root_pos[1] self._p.resetDebugVisualizerCamera(distance, yaw, -20, [root_pos[0], 0.0, 0.4])
[ "numpy.abs", "gan.utils.apply_external_world_force_on_local_point", "torch.no_grad", "os.path.join", "collections.deque", "gym.utils.seeding.np_random", "gan.utils.push_recent_value", "gan.utils.perturb", "gan.utils.load", "numpy.minimum", "numpy.tanh", "pybullet_utils.bullet_client.BulletClient", "numpy.square", "time.sleep", "inspect.currentframe", "gan.utils.unwrap", "numpy.array", "gym.spaces.Box", "gan.utils.wrap" ]
[((3675, 3691), 'collections.deque', 'deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (3680, 3691), False, 'from collections import deque\n'), ((3723, 3739), 'collections.deque', 'deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (3728, 3739), False, 'from collections import deque\n'), ((5809, 5873), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(-np.inf * obs_dummy)', 'high': '(np.inf * obs_dummy)'}), '(low=-np.inf * obs_dummy, high=np.inf * obs_dummy)\n', (5823, 5873), False, 'import gym, gym.utils.seeding, gym.spaces\n'), ((19150, 19205), 'gan.utils.push_recent_value', 'utils.push_recent_value', (['self.past_obs_array', 'cur_state'], {}), '(self.past_obs_array, cur_state)\n', (19173, 19205), False, 'from gan import utils\n'), ((19665, 19709), 'gan.utils.wrap', 'utils.wrap', (['b_obs_all'], {'is_cuda': 'self.cuda_env'}), '(b_obs_all, is_cuda=self.cuda_env)\n', (19675, 19709), False, 'from gan import utils\n'), ((20019, 20083), 'gan.utils.perturb', 'utils.perturb', (['b_cur_act', 'self.enlarge_act_range', 'self.np_random'], {}), '(b_cur_act, self.enlarge_act_range, self.np_random)\n', (20032, 20083), False, 'from gan import utils\n'), ((20104, 20122), 'numpy.tanh', 'np.tanh', (['b_cur_act'], {}), '(b_cur_act)\n', (20111, 20122), True, 'import numpy as np\n'), ((20173, 20229), 'gan.utils.push_recent_value', 'utils.push_recent_value', (['self.past_bact_array', 'b_cur_act'], {}), '(self.past_bact_array, b_cur_act)\n', (20196, 20229), False, 'from gan import utils\n'), ((20623, 20656), 'gym.utils.seeding.np_random', 'gym.utils.seeding.np_random', (['seed'], {}), '(seed)\n', (20650, 20656), False, 'import gym, gym.utils.seeding, gym.spaces\n'), ((922, 944), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (942, 944), False, 'import inspect\n'), ((2930, 2986), 'pybullet_utils.bullet_client.BulletClient', 'bullet_client.BulletClient', ([], {'connection_mode': 'pybullet.GUI'}), '(connection_mode=pybullet.GUI)\n', (2956, 2986), False, 'from pybullet_utils import bullet_client\n'), ((3023, 3051), 'pybullet_utils.bullet_client.BulletClient', 'bullet_client.BulletClient', ([], {}), '()\n', (3049, 3051), False, 'from pybullet_utils import bullet_client\n'), ((4093, 4166), 'gan.utils.load', 'utils.load', (['behavior_dir', 'behavior_env_name', 'self.cuda_env', 'behavior_iter'], {}), '(behavior_dir, behavior_env_name, self.cuda_env, behavior_iter)\n', (4103, 4166), False, 'from gan import utils\n'), ((4515, 4573), 'gan.utils.load', 'utils.load', (['dyn_dir', 'dyn_env_name', 'self.cuda_env', 'dyn_iter'], {}), '(dyn_dir, dyn_env_name, self.cuda_env, dyn_iter)\n', (4525, 4573), False, 'from gan import utils\n'), ((10732, 10752), 'numpy.tanh', 'np.tanh', (['robo_action'], {}), '(robo_action)\n', (10739, 10752), True, 'import numpy as np\n'), ((10807, 10865), 'gan.utils.push_recent_value', 'utils.push_recent_value', (['self.past_bact_array', 'robo_action'], {}), '(self.past_bact_array, robo_action)\n', (10830, 10865), False, 'from gan import utils\n'), ((11229, 11274), 'gan.utils.wrap', 'utils.wrap', (['env_pi_obs'], {'is_cuda': 'self.cuda_env'}), '(env_pi_obs, is_cuda=self.cuda_env)\n', (11239, 11274), False, 'from gan import utils\n'), ((11544, 11594), 'gan.utils.unwrap', 'utils.unwrap', (['env_action_nn'], {'is_cuda': 'self.cuda_env'}), '(env_action_nn, is_cuda=self.cuda_env)\n', (11556, 11594), False, 'from gan import utils\n'), ((12337, 12385), 'gan.utils.perturb', 'utils.perturb', (['robo_action', '(0.05)', 'self.np_random'], {}), '(robo_action, 0.05, self.np_random)\n', (12350, 12385), False, 'from gan import utils\n'), ((13985, 14031), 'numpy.minimum', 'np.minimum', (['(self.timer / 500)', 'self.max_tar_vel'], {}), '(self.timer / 500, self.max_tar_vel)\n', (13995, 14031), True, 'import numpy as np\n'), ((14726, 14755), 'numpy.array', 'np.array', (['([2.0, 1.0, 1.0] * 4)'], {}), '([2.0, 1.0, 1.0] * 4)\n', (14734, 14755), True, 'import numpy as np\n'), ((17655, 17702), 'numpy.tanh', 'np.tanh', (['con_f[foot_ind * 3:(foot_ind + 1) * 3]'], {}), '(con_f[foot_ind * 3:(foot_ind + 1) * 3])\n', (17662, 17702), True, 'import numpy as np\n'), ((18105, 18214), 'gan.utils.apply_external_world_force_on_local_point', 'utils.apply_external_world_force_on_local_point', (['self.robot.go_id', 'link', '[fx, fy, fz]', '[0, 0, 0]', 'self._p'], {}), '(self.robot.go_id, link, [fx,\n fy, fz], [0, 0, 0], self._p)\n', (18152, 18214), False, 'from gan import utils\n'), ((19064, 19109), 'gan.utils.perturb', 'utils.perturb', (['cur_state', '(0.1)', 'self.np_random'], {}), '(cur_state, 0.1, self.np_random)\n', (19077, 19109), False, 'from gan import utils\n'), ((19723, 19738), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19736, 19738), False, 'import torch\n'), ((19951, 19997), 'gan.utils.unwrap', 'utils.unwrap', (['action_nn'], {'is_cuda': 'self.cuda_env'}), '(action_nn, is_cuda=self.cuda_env)\n', (19963, 19997), False, 'from gan import utils\n'), ((5604, 5638), 'numpy.array', 'np.array', (['([-1.0] * self.action_dim)'], {}), '([-1.0] * self.action_dim)\n', (5612, 5638), True, 'import numpy as np\n'), ((5687, 5721), 'numpy.array', 'np.array', (['([+1.0] * self.action_dim)'], {}), '([+1.0] * self.action_dim)\n', (5695, 5721), True, 'import numpy as np\n'), ((6453, 6498), 'os.path.join', 'os.path.join', (['currentdir', '"""assets/plane.urdf"""'], {}), "(currentdir, 'assets/plane.urdf')\n", (6465, 6498), False, 'import os\n'), ((11292, 11307), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11305, 11307), False, 'import torch\n'), ((13329, 13355), 'time.sleep', 'time.sleep', (['(self._ts * 1.0)'], {}), '(self._ts * 1.0)\n', (13339, 13355), False, 'import time\n'), ((14054, 14080), 'numpy.minimum', 'np.minimum', (['self.velx', 'tar'], {}), '(self.velx, tar)\n', (14064, 14080), True, 'import numpy as np\n'), ((19453, 19465), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (19461, 19465), True, 'import numpy as np\n'), ((14479, 14495), 'numpy.abs', 'np.abs', (['q_scaled'], {}), '(q_scaled)\n', (14485, 14495), True, 'import numpy as np\n'), ((14193, 14215), 'numpy.square', 'np.square', (['robo_action'], {}), '(robo_action)\n', (14202, 14215), True, 'import numpy as np\n'), ((15587, 15597), 'numpy.abs', 'np.abs', (['dq'], {}), '(dq)\n', (15593, 15597), True, 'import numpy as np\n'), ((14663, 14676), 'numpy.square', 'np.square', (['dq'], {}), '(dq)\n', (14672, 14676), True, 'import numpy as np\n'), ((14797, 14829), 'numpy.square', 'np.square', (['(q - self.robot.init_q)'], {}), '(q - self.robot.init_q)\n', (14806, 14829), True, 'import numpy as np\n')]
from .utils import * from ytmusicapi.helpers import to_int from .songs import parse_song_runs, parse_like_status def parse_album_header(response): header = nav(response, HEADER_DETAIL) album = { 'title': nav(header, TITLE_TEXT), 'type': nav(header, SUBTITLE), 'thumbnails': nav(header, THUMBNAIL_CROPPED) } if "description" in header: album["description"] = header["description"]["runs"][0]["text"] album_info = parse_song_runs(header['subtitle']['runs'][2:]) album.update(album_info) if len(header['secondSubtitle']['runs']) > 1: album['trackCount'] = to_int(header['secondSubtitle']['runs'][0]['text']) album['duration'] = header['secondSubtitle']['runs'][2]['text'] else: album['duration'] = header['secondSubtitle']['runs'][0]['text'] # add to library/uploaded menu = nav(header, MENU) toplevel = menu['topLevelButtons'] album['audioPlaylistId'] = nav(toplevel, [0, 'buttonRenderer'] + NAVIGATION_WATCH_PLAYLIST_ID) service = nav(toplevel, [1, 'buttonRenderer', 'defaultServiceEndpoint'], True) if service: album['likeStatus'] = parse_like_status(service) return album
[ "ytmusicapi.helpers.to_int" ]
[((625, 676), 'ytmusicapi.helpers.to_int', 'to_int', (["header['secondSubtitle']['runs'][0]['text']"], {}), "(header['secondSubtitle']['runs'][0]['text'])\n", (631, 676), False, 'from ytmusicapi.helpers import to_int\n')]
""" This script acts as a placeholder to set a threshold for cands >b and compare those cands with our gold data. """ import csv import logging import os import pickle from enum import Enum import numpy as np from tqdm import tqdm from hack.transistors.transistor_utils import ( Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic, ) # Configure logging for Hack logging.basicConfig( format="[%(asctime)s][%(levelname)s] %(name)s:%(lineno)s - %(message)s", level=logging.DEBUG, handlers=[ logging.FileHandler( os.path.join(os.path.dirname(os.path.abspath(__file__)), "analysis.log") ), logging.StreamHandler(), ], ) logger = logging.getLogger(__name__) # Enum for tracking class Relation(Enum): STG_TEMP_MIN = "stg_temp_min" STG_TEMP_MAX = "stg_temp_max" POLARITY = "polarity" CE_V_MAX = "ce_v_max" def load_parts_by_doc(): dirname = os.path.dirname(os.path.abspath(__file__)) pickle_file = os.path.join(dirname, "data/parts_by_doc_new.pkl") with open(pickle_file, "rb") as f: return pickle.load(f) def capitalize_filenames(filenames): output = set() for filename in filenames: output.add(filename.upper()) return output def print_score(score, description): logger.info("===================================================") logger.info(description) logger.info("===================================================") logger.info(f"Corpus Precision {score.prec:.3f}") logger.info(f"Corpus Recall {score.rec:.3f}") logger.info(f"Corpus F1 {score.f1:.3f}") logger.info("---------------------------------------------------") logger.info( f"TP: {len(score.TP)} " f"| FP: {len(score.FP)} " f"| FN: {len(score.FN)}" ) logger.info("===================================================\n") def get_entity_set(file, parts_by_doc, b=0.0): entities = set() errors = set() with open(file, "r") as input: reader = csv.reader(input) for line in reader: try: (doc, part, val, score) = line if float(score) > b: # Add implied parts as well for p in get_implied_parts(part, doc, parts_by_doc): entities.add((doc, p, val)) except KeyError: if doc not in errors: logger.warning(f"{doc} was not found in parts_by_doc.") errors.add(doc) continue except Exception as e: logger.error(f"{e} while getting entity set from {file}.") return entities def get_parts(entities): parts = set() for (doc, part, val) in entities: parts.add(part) return parts def get_filenames(entities): filenames = set() for (doc, part, val) in entities: filenames.add(doc) return filenames def print_filenames_to_file(entities, outfile): with open(outfile, "w") as outfile: writer = csv.writer(outfile) for (doc, part, val) in entities: writer.writerow([doc]) def get_filenames_from_file(file): filenames = set() with open(file, "r") as input: reader = csv.reader(input) for line in reader: filenames.add(line[0].upper()) return filenames def filter_filenames(entities, filenames): result = set() for (doc, part, val) in entities: if doc in filenames: result.add((doc, part, val)) if len(result) == 0: logger.debug( f"Filtering for {len(get_filenames(entities))} " + "entity filenames turned up empty." ) return result def main( num=100, relation=Relation.CE_V_MAX.value, devfile="ce_v_max_dev_probs.csv", testfile="ce_v_max_test_probs.csv", outfile="analysis/ce_v_max_analysis_discrepancies.csv", debug=False, ): # Define output dirname = os.path.dirname(os.path.abspath(__file__)) discrepancy_file = os.path.join(dirname, outfile) # Analysis gold_file = os.path.join(dirname, "data/analysis/our_gold.csv") filenames_file = os.path.join(dirname, "data/analysis/filenames.csv") filenames = capitalize_filenames(get_filenames_from_file(filenames_file)) # logger.info(f"Analysis dataset is {len(filenames)}" + " filenames long.") gold = filter_filenames( get_gold_set(gold=[gold_file], attribute=relation), filenames ) # logger.info(f"Original gold set is {len(get_filenames(gold))} filenames long.") best_score = Score(0, 0, 0, [], [], []) best_b = 0 best_entities = set() # Test test_file = os.path.join(dirname, testfile) test_filenames = capitalize_filenames( get_filenames_from_file(os.path.join(dirname, "data/test/filenames.csv")) ) test_goldfile = os.path.join(dirname, "data/test/test_gold.csv") test_gold = filter_filenames( get_gold_set(gold=[test_goldfile], attribute=relation), test_filenames ) best_test_score = Score(0, 0, 0, [], [], []) best_test_b = 0 best_test_entities = set() # Dev dev_file = os.path.join(dirname, devfile) dev_filenames = capitalize_filenames( get_filenames_from_file(os.path.join(dirname, "data/dev/filenames.csv")) ) dev_goldfile = os.path.join(dirname, "data/dev/dev_gold.csv") dev_gold = filter_filenames( get_gold_set(gold=[dev_goldfile], attribute=relation), dev_filenames ) best_dev_score = Score(0, 0, 0, [], [], []) best_dev_b = 0 best_dev_entities = set() # Iterate over `b` values logger.info(f"Determining best b...") parts_by_doc = load_parts_by_doc() for b in tqdm(np.linspace(0, 1, num=num)): # Dev and Test dev_entities = get_entity_set(dev_file, parts_by_doc, b=b) test_entities = get_entity_set(test_file, parts_by_doc, b=b) # Analysis (combo of dev and test) entities = filter_filenames( dev_entities.union(test_entities), get_filenames_from_file(filenames_file) ) # Score entities against gold data and generate comparison CSV dev_score = entity_level_scores( dev_entities, attribute=relation, docs=dev_filenames ) test_score = entity_level_scores( test_entities, attribute=relation, docs=test_filenames ) score = entity_level_scores(entities, attribute=relation, docs=filenames) if dev_score.f1 > best_dev_score.f1: best_dev_score = dev_score best_dev_b = b best_dev_entities = dev_entities if test_score.f1 > best_test_score.f1: best_test_score = test_score best_test_b = b best_test_entities = test_entities if score.f1 > best_score.f1: best_score = score best_b = b best_entities = entities if debug: # Test logger.info("Scoring for test set...") logger.info( f"Entity set is {len(get_filenames(best_test_entities))} filenames long." ) logger.info(f"Gold set is {len(get_filenames(test_gold))} filenames long.") print_score( best_test_score, description=f"Scoring on cands > {best_test_b:.3f} " + "against our gold labels.", ) # Dev logger.info("Scoring for dev set...") logger.info( f"Entity set is {len(get_filenames(best_dev_entities))} filenames long." ) logger.info(f"Gold set is {len(get_filenames(dev_gold))} filenames long.") print_score( best_dev_score, description=f"Scoring on cands > {best_dev_b:.3f} against our gold labels.", ) logger.info("Scoring for analysis set...") # Analysis # logger.info(f"Entity set is {len(get_filenames(best_entities))} filenames long.") # logger.info(f"Gold set is {len(get_filenames(gold))} filenames long.") print_score( best_score, description=f"Scoring on cands > {best_b:.3f} against our gold labels.", ) compare_entities( set(best_score.FP), attribute=relation, type="FP", outfile=discrepancy_file, gold_dic=gold_set_to_dic(gold), ) compare_entities( set(best_score.FN), attribute=relation, type="FN", outfile=discrepancy_file, append=True, entity_dic=gold_set_to_dic(best_entities), )
[ "os.path.abspath", "hack.transistors.transistor_utils.Score", "csv.reader", "csv.writer", "hack.transistors.transistor_utils.get_implied_parts", "logging.StreamHandler", "hack.transistors.transistor_utils.entity_level_scores", "pickle.load", "numpy.linspace", "hack.transistors.transistor_utils.get_gold_set", "os.path.join", "hack.transistors.transistor_utils.gold_set_to_dic", "logging.getLogger" ]
[((747, 774), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (764, 774), False, 'import logging\n'), ((1041, 1091), 'os.path.join', 'os.path.join', (['dirname', '"""data/parts_by_doc_new.pkl"""'], {}), "(dirname, 'data/parts_by_doc_new.pkl')\n", (1053, 1091), False, 'import os\n'), ((4082, 4112), 'os.path.join', 'os.path.join', (['dirname', 'outfile'], {}), '(dirname, outfile)\n', (4094, 4112), False, 'import os\n'), ((4145, 4196), 'os.path.join', 'os.path.join', (['dirname', '"""data/analysis/our_gold.csv"""'], {}), "(dirname, 'data/analysis/our_gold.csv')\n", (4157, 4196), False, 'import os\n'), ((4218, 4270), 'os.path.join', 'os.path.join', (['dirname', '"""data/analysis/filenames.csv"""'], {}), "(dirname, 'data/analysis/filenames.csv')\n", (4230, 4270), False, 'import os\n'), ((4638, 4664), 'hack.transistors.transistor_utils.Score', 'Score', (['(0)', '(0)', '(0)', '[]', '[]', '[]'], {}), '(0, 0, 0, [], [], [])\n', (4643, 4664), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((4734, 4765), 'os.path.join', 'os.path.join', (['dirname', 'testfile'], {}), '(dirname, testfile)\n', (4746, 4765), False, 'import os\n'), ((4917, 4965), 'os.path.join', 'os.path.join', (['dirname', '"""data/test/test_gold.csv"""'], {}), "(dirname, 'data/test/test_gold.csv')\n", (4929, 4965), False, 'import os\n'), ((5108, 5134), 'hack.transistors.transistor_utils.Score', 'Score', (['(0)', '(0)', '(0)', '[]', '[]', '[]'], {}), '(0, 0, 0, [], [], [])\n', (5113, 5134), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((5212, 5242), 'os.path.join', 'os.path.join', (['dirname', 'devfile'], {}), '(dirname, devfile)\n', (5224, 5242), False, 'import os\n'), ((5391, 5437), 'os.path.join', 'os.path.join', (['dirname', '"""data/dev/dev_gold.csv"""'], {}), "(dirname, 'data/dev/dev_gold.csv')\n", (5403, 5437), False, 'import os\n'), ((5576, 5602), 'hack.transistors.transistor_utils.Score', 'Score', (['(0)', '(0)', '(0)', '[]', '[]', '[]'], {}), '(0, 0, 0, [], [], [])\n', (5581, 5602), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((996, 1021), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1011, 1021), False, 'import os\n'), ((1146, 1160), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1157, 1160), False, 'import pickle\n'), ((2065, 2082), 'csv.reader', 'csv.reader', (['input'], {}), '(input)\n', (2075, 2082), False, 'import csv\n'), ((3085, 3104), 'csv.writer', 'csv.writer', (['outfile'], {}), '(outfile)\n', (3095, 3104), False, 'import csv\n'), ((3293, 3310), 'csv.reader', 'csv.reader', (['input'], {}), '(input)\n', (3303, 3310), False, 'import csv\n'), ((4032, 4057), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4047, 4057), False, 'import os\n'), ((4466, 4516), 'hack.transistors.transistor_utils.get_gold_set', 'get_gold_set', ([], {'gold': '[gold_file]', 'attribute': 'relation'}), '(gold=[gold_file], attribute=relation)\n', (4478, 4516), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((5008, 5062), 'hack.transistors.transistor_utils.get_gold_set', 'get_gold_set', ([], {'gold': '[test_goldfile]', 'attribute': 'relation'}), '(gold=[test_goldfile], attribute=relation)\n', (5020, 5062), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((5479, 5532), 'hack.transistors.transistor_utils.get_gold_set', 'get_gold_set', ([], {'gold': '[dev_goldfile]', 'attribute': 'relation'}), '(gold=[dev_goldfile], attribute=relation)\n', (5491, 5532), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((5782, 5808), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': 'num'}), '(0, 1, num=num)\n', (5793, 5808), True, 'import numpy as np\n'), ((6240, 6313), 'hack.transistors.transistor_utils.entity_level_scores', 'entity_level_scores', (['dev_entities'], {'attribute': 'relation', 'docs': 'dev_filenames'}), '(dev_entities, attribute=relation, docs=dev_filenames)\n', (6259, 6313), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((6357, 6432), 'hack.transistors.transistor_utils.entity_level_scores', 'entity_level_scores', (['test_entities'], {'attribute': 'relation', 'docs': 'test_filenames'}), '(test_entities, attribute=relation, docs=test_filenames)\n', (6376, 6432), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((6471, 6536), 'hack.transistors.transistor_utils.entity_level_scores', 'entity_level_scores', (['entities'], {'attribute': 'relation', 'docs': 'filenames'}), '(entities, attribute=relation, docs=filenames)\n', (6490, 6536), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((704, 727), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (725, 727), False, 'import logging\n'), ((4841, 4889), 'os.path.join', 'os.path.join', (['dirname', '"""data/test/filenames.csv"""'], {}), "(dirname, 'data/test/filenames.csv')\n", (4853, 4889), False, 'import os\n'), ((5317, 5364), 'os.path.join', 'os.path.join', (['dirname', '"""data/dev/filenames.csv"""'], {}), "(dirname, 'data/dev/filenames.csv')\n", (5329, 5364), False, 'import os\n'), ((8345, 8366), 'hack.transistors.transistor_utils.gold_set_to_dic', 'gold_set_to_dic', (['gold'], {}), '(gold)\n', (8360, 8366), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((8545, 8575), 'hack.transistors.transistor_utils.gold_set_to_dic', 'gold_set_to_dic', (['best_entities'], {}), '(best_entities)\n', (8560, 8575), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((2289, 2331), 'hack.transistors.transistor_utils.get_implied_parts', 'get_implied_parts', (['part', 'doc', 'parts_by_doc'], {}), '(part, doc, parts_by_doc)\n', (2306, 2331), False, 'from hack.transistors.transistor_utils import Score, compare_entities, entity_level_scores, get_gold_set, get_implied_parts, gold_set_to_dic\n'), ((641, 666), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (656, 666), False, 'import os\n')]
import tkinter from tkinter import * from PIL import ImageTk, Image #PIL -> Pillow import pymysql from tkinter import messagebox from AddBook import * from DeleteBook import * from ViewBook import * from IssueBook import * # # Connecting to MySql Server # mypass = "<PASSWORD>" # Use your own password # mydatabase = "db" # name of the database # con = pymysql.connect # (host ="localhost", user = "root", password = <PASSWORD>.database = mydatabase) # #root is the username here # cur = con.cursor() #cur -> cursor # Designing the Window root = Tk() root.title("Library") root.minsize(width = 400, height = 400) root.geometry("600x500") same = True n = 0.25 # Adding a background image background_image = Image.open('lib.jpg') [imageSizeWidth , imageSizeHeight] = background_image.size newImageSizeWidth = int(imageSizeWidth*n) if same: newImageSizeHeight = int(imageSizeHeight*n) else: newImageSizeHeight = int(imageSizeHeight/n) background_image = background_image.resize((newImageSizeWidth , newImageSizeHeight)), Image.ANTIALIAS img = ImageTk.PhotoImage(background_image) Canvas1 = Casvas(root) Canvas1.create_image(300 , 340 , image = img) Canvas1.config(bg = "white" , width = newImageSizeWidth, height = newImageSizeHeight) Canvas1.pack(expand = True, fill = BOTH) root.mainloop()
[ "PIL.ImageTk.PhotoImage", "PIL.Image.open" ]
[((713, 734), 'PIL.Image.open', 'Image.open', (['"""lib.jpg"""'], {}), "('lib.jpg')\n", (723, 734), False, 'from PIL import ImageTk, Image\n'), ((1057, 1093), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['background_image'], {}), '(background_image)\n', (1075, 1093), False, 'from PIL import ImageTk, Image\n')]
#!/usr/bin/env nemesis # # ---------------------------------------------------------------------- # # <NAME>, U.S. Geological Survey # <NAME>, GNS Science # <NAME>, University at Buffalo # # This code was developed as part of the Computational Infrastructure # for Geodynamics (http://geodynamics.org). # # Copyright (c) 2010-2021 University of California, Davis # # See LICENSE.md for license information. # # ---------------------------------------------------------------------- # # @file tests/fullscale/poroelasticity/mandel_compaction/TestMandelCompaction.py # # @brief Test suite for testing pylith with Mandel's problem. import unittest from pylith.testing.FullTestApp import (FullTestCase, Check, check_data) import meshes import mandel_compaction_soln import mandel_compaction_gendb # We do not include trace_strain in the check of the solution fields, because of the # poor convergence of the series solution. SOLUTION_FIELDS = ["displacement", "pressure"] SOLUTION_TOLERANCE = 0.2 # ------------------------------------------------------------------------------------------------- class TestCase(FullTestCase): def setUp(self): defaults = { "filename": "output/{name}-{mesh_entity}.h5", "exact_soln": mandel_compaction_soln.AnalyticalSoln(), "mesh": self.mesh, } self.checks = [ Check( mesh_entities=["domain"], vertex_fields=SOLUTION_FIELDS, defaults=defaults, tolerance=SOLUTION_TOLERANCE, ), Check( mesh_entities=["poroelastic"], filename="output/{name}-{mesh_entity}_info.h5", cell_fields=[ "biot_coefficient", "biot_modulus", "drained_bulk_modulus", "fluid_density", "fluid_viscosity", "isotropic_permeability", "porosity", "shear_modulus", "solid_density", ], defaults=defaults, ), Check( mesh_entities=["poroelastic"], vertex_fields = SOLUTION_FIELDS, defaults=defaults, tolerance=SOLUTION_TOLERANCE, ), Check( mesh_entities=["x_neg", "x_pos", "y_neg", "y_pos"], filename="output/{name}-{mesh_entity}_info.h5", vertex_fields=["initial_amplitude"], defaults=defaults, ), Check( mesh_entities=["x_neg", "x_pos", "y_neg", "y_pos"], vertex_fields=SOLUTION_FIELDS, defaults=defaults, tolerance=SOLUTION_TOLERANCE, ), ] def run_pylith(self, testName, args): FullTestCase.run_pylith(self, testName, args, mandel_compaction_gendb.GenerateDB) # ------------------------------------------------------------------------------------------------- class TestQuad(TestCase): def setUp(self): self.name = "mandel_compaction_quad" self.mesh = meshes.Quad() super().setUp() TestCase.run_pylith(self, self.name, ["mandel_compaction.cfg", "mandel_compaction_quad.cfg"]) return # ------------------------------------------------------------------------------------------------- class TestTri(TestCase): def setUp(self): self.name = "mandel_compaction_tri" self.mesh = meshes.Tri() super().setUp() TestCase.run_pylith(self, self.name, ["mandel_compaction.cfg", "mandel_compaction_tri.cfg"]) return # ---------------------------------------------------------------------------------------------------------------------- def test_cases(): return [ TestQuad, TestTri, ] # ---------------------------------------------------------------------------------------------------------------------- if __name__ == '__main__': FullTestCase.parse_args() suite = unittest.TestSuite() for test in test_cases(): suite.addTest(unittest.makeSuite(test)) unittest.TextTestRunner(verbosity=2).run(suite) # End of file
[ "mandel_compaction_soln.AnalyticalSoln", "unittest.TextTestRunner", "unittest.TestSuite", "unittest.makeSuite", "pylith.testing.FullTestApp.FullTestCase.run_pylith", "meshes.Quad", "meshes.Tri", "pylith.testing.FullTestApp.FullTestCase.parse_args", "pylith.testing.FullTestApp.Check" ]
[((4067, 4092), 'pylith.testing.FullTestApp.FullTestCase.parse_args', 'FullTestCase.parse_args', ([], {}), '()\n', (4090, 4092), False, 'from pylith.testing.FullTestApp import FullTestCase, Check, check_data\n'), ((4106, 4126), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (4124, 4126), False, 'import unittest\n'), ((2898, 2984), 'pylith.testing.FullTestApp.FullTestCase.run_pylith', 'FullTestCase.run_pylith', (['self', 'testName', 'args', 'mandel_compaction_gendb.GenerateDB'], {}), '(self, testName, args, mandel_compaction_gendb.\n GenerateDB)\n', (2921, 2984), False, 'from pylith.testing.FullTestApp import FullTestCase, Check, check_data\n'), ((3195, 3208), 'meshes.Quad', 'meshes.Quad', ([], {}), '()\n', (3206, 3208), False, 'import meshes\n'), ((3564, 3576), 'meshes.Tri', 'meshes.Tri', ([], {}), '()\n', (3574, 3576), False, 'import meshes\n'), ((1255, 1294), 'mandel_compaction_soln.AnalyticalSoln', 'mandel_compaction_soln.AnalyticalSoln', ([], {}), '()\n', (1292, 1294), False, 'import mandel_compaction_soln\n'), ((1373, 1489), 'pylith.testing.FullTestApp.Check', 'Check', ([], {'mesh_entities': "['domain']", 'vertex_fields': 'SOLUTION_FIELDS', 'defaults': 'defaults', 'tolerance': 'SOLUTION_TOLERANCE'}), "(mesh_entities=['domain'], vertex_fields=SOLUTION_FIELDS, defaults=\n defaults, tolerance=SOLUTION_TOLERANCE)\n", (1378, 1489), False, 'from pylith.testing.FullTestApp import FullTestCase, Check, check_data\n'), ((1577, 1879), 'pylith.testing.FullTestApp.Check', 'Check', ([], {'mesh_entities': "['poroelastic']", 'filename': '"""output/{name}-{mesh_entity}_info.h5"""', 'cell_fields': "['biot_coefficient', 'biot_modulus', 'drained_bulk_modulus',\n 'fluid_density', 'fluid_viscosity', 'isotropic_permeability',\n 'porosity', 'shear_modulus', 'solid_density']", 'defaults': 'defaults'}), "(mesh_entities=['poroelastic'], filename=\n 'output/{name}-{mesh_entity}_info.h5', cell_fields=['biot_coefficient',\n 'biot_modulus', 'drained_bulk_modulus', 'fluid_density',\n 'fluid_viscosity', 'isotropic_permeability', 'porosity',\n 'shear_modulus', 'solid_density'], defaults=defaults)\n", (1582, 1879), False, 'from pylith.testing.FullTestApp import FullTestCase, Check, check_data\n'), ((2154, 2274), 'pylith.testing.FullTestApp.Check', 'Check', ([], {'mesh_entities': "['poroelastic']", 'vertex_fields': 'SOLUTION_FIELDS', 'defaults': 'defaults', 'tolerance': 'SOLUTION_TOLERANCE'}), "(mesh_entities=['poroelastic'], vertex_fields=SOLUTION_FIELDS,\n defaults=defaults, tolerance=SOLUTION_TOLERANCE)\n", (2159, 2274), False, 'from pylith.testing.FullTestApp import FullTestCase, Check, check_data\n'), ((2365, 2536), 'pylith.testing.FullTestApp.Check', 'Check', ([], {'mesh_entities': "['x_neg', 'x_pos', 'y_neg', 'y_pos']", 'filename': '"""output/{name}-{mesh_entity}_info.h5"""', 'vertex_fields': "['initial_amplitude']", 'defaults': 'defaults'}), "(mesh_entities=['x_neg', 'x_pos', 'y_neg', 'y_pos'], filename=\n 'output/{name}-{mesh_entity}_info.h5', vertex_fields=[\n 'initial_amplitude'], defaults=defaults)\n", (2370, 2536), False, 'from pylith.testing.FullTestApp import FullTestCase, Check, check_data\n'), ((2619, 2761), 'pylith.testing.FullTestApp.Check', 'Check', ([], {'mesh_entities': "['x_neg', 'x_pos', 'y_neg', 'y_pos']", 'vertex_fields': 'SOLUTION_FIELDS', 'defaults': 'defaults', 'tolerance': 'SOLUTION_TOLERANCE'}), "(mesh_entities=['x_neg', 'x_pos', 'y_neg', 'y_pos'], vertex_fields=\n SOLUTION_FIELDS, defaults=defaults, tolerance=SOLUTION_TOLERANCE)\n", (2624, 2761), False, 'from pylith.testing.FullTestApp import FullTestCase, Check, check_data\n'), ((4179, 4203), 'unittest.makeSuite', 'unittest.makeSuite', (['test'], {}), '(test)\n', (4197, 4203), False, 'import unittest\n'), ((4209, 4245), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (4232, 4245), False, 'import unittest\n')]
# Generated by Django 3.2.11 on 2022-03-28 20:03 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('guests', '0029_alter_guest_caboose_farm'), ] operations = [ migrations.AlterField( model_name='guest', name='meal', field=models.CharField(blank=True, choices=[('beef', 'braised beef shortrib'), ('fish', 'salmon'), ('vegan', 'vegan')], max_length=20, null=True), ), ]
[ "django.db.models.CharField" ]
[((354, 498), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'choices': "[('beef', 'braised beef shortrib'), ('fish', 'salmon'), ('vegan', 'vegan')]", 'max_length': '(20)', 'null': '(True)'}), "(blank=True, choices=[('beef', 'braised beef shortrib'), (\n 'fish', 'salmon'), ('vegan', 'vegan')], max_length=20, null=True)\n", (370, 498), False, 'from django.db import migrations, models\n')]
#!/usr/bin/env python # # author: dec 2020 # <NAME> - https://cassota.gitlab.io import sys import os import glob if __name__ == '__main__': if len(sys.argv) < 3: print("usage: %s <kaldi-egs-dir> <ctm-in-dir> <pts-out-dir>" % sys.argv[0]) sys.exit(1) proj_dir = sys.argv[1] ctm_dir = sys.argv[2] pts_dir = sys.argv[3] if not os.path.isdir(proj_dir): print("[%s] error: project dir expected to exist: " "'%s'" % (sys.argv[0], proj_dir)) sys.exit(1) if not os.path.isdir(ctm_dir): print("[%s] error: ctm dir expected to exist: " "'%s'" % (sys.argv[0], ctm_dir)) sys.exit(1) if not os.path.isdir(pts_dir): print("[%s] error: pts dir expected to exist: " "'%s'" % (sys.argv[0], pts_dir)) sys.exit(1) phones_file = os.path.join(proj_dir, "data", "lang", "phones.txt") if not os.path.isfile(phones_file): print("[%s] error: file expected to exist: " "'%s'" % (sys.argv[0], phones_file)) sys.exit(1) print("[%s] mapping file '%s'" % (sys.argv[0], phones_file)) with open(phones_file) as f: phones = f.readlines() mapping = {} for line in phones: phone, phoneid = line.split() mapping[phoneid] = phone.split("_")[0] for ctm_file in sorted(glob.glob(os.path.join(ctm_dir, "*.ctm"))): print("\r[%s] processing file %s" % (sys.argv[0], ctm_file), end=" ", flush=True) pts = [] with open(ctm_file) as ctm: for line in ctm: uttid, ch, start, dur, phoneid = line.split() p, ts = mapping[phoneid], float(start) + float(dur) pts.append("%s\t%.3f" % (p, ts)) pts_file = os.path.basename(ctm_file.replace(".ctm", ".pts")) pts_file = os.path.join(pts_dir, pts_file) with open(pts_file, "w") as f: for item in pts: f.write(item.expandtabs(8) + "\n") print()
[ "os.path.isdir", "os.path.isfile", "os.path.join", "sys.exit" ]
[((851, 903), 'os.path.join', 'os.path.join', (['proj_dir', '"""data"""', '"""lang"""', '"""phones.txt"""'], {}), "(proj_dir, 'data', 'lang', 'phones.txt')\n", (863, 903), False, 'import os\n'), ((260, 271), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (268, 271), False, 'import sys\n'), ((363, 386), 'os.path.isdir', 'os.path.isdir', (['proj_dir'], {}), '(proj_dir)\n', (376, 386), False, 'import os\n'), ((504, 515), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (512, 515), False, 'import sys\n'), ((527, 549), 'os.path.isdir', 'os.path.isdir', (['ctm_dir'], {}), '(ctm_dir)\n', (540, 549), False, 'import os\n'), ((662, 673), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (670, 673), False, 'import sys\n'), ((685, 707), 'os.path.isdir', 'os.path.isdir', (['pts_dir'], {}), '(pts_dir)\n', (698, 707), False, 'import os\n'), ((820, 831), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (828, 831), False, 'import sys\n'), ((915, 942), 'os.path.isfile', 'os.path.isfile', (['phones_file'], {}), '(phones_file)\n', (929, 942), False, 'import os\n'), ((1056, 1067), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1064, 1067), False, 'import sys\n'), ((1851, 1882), 'os.path.join', 'os.path.join', (['pts_dir', 'pts_file'], {}), '(pts_dir, pts_file)\n', (1863, 1882), False, 'import os\n'), ((1362, 1392), 'os.path.join', 'os.path.join', (['ctm_dir', '"""*.ctm"""'], {}), "(ctm_dir, '*.ctm')\n", (1374, 1392), False, 'import os\n')]
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, too-many-locals, too-many-statements "Scan related operators" from typing import Callable, Optional, Union import tvm from tvm import te from tvm.contrib.thrust import can_use_rocthrust, can_use_thrust from .. import tag from ..math import cast from ..transform import expand_dims, reshape, squeeze, transpose from ..utils import ceil_div, get_const_int, prod, swap from .injective import schedule_injective_from_existing def _get_thrust_func_name(tvmop): tvmop_to_thrust_func_name = {tvm.tir.generic.add: "tvm.contrib.thrust.sum_scan"} assert tvmop in tvmop_to_thrust_func_name, "{} not supported by thrust".format(tvmop) return tvmop_to_thrust_func_name[tvmop] def exclusive_scan_ir(data, output, reduction=None, binop=tvm.tir.generic.add, identity_value=0): """Low level IR to do exclusive sum scan along rows of 2D input. Parameters ---------- data : Buffer Input N-D Buffer. Scan is done over the innermost axis. output: Buffer A buffer to store the output scan, of the same shape as data reduction: Buffer, optional (N-1)-D Buffer, to store the sum of each scan axis. binop: function, optional A binary associative op to use for scan. The function takes two TIR expressions and produce a new TIR expression. By default it uses tvm.tir.generic.add to compute prefix sum. identity_value: int or float A value for the binary operation which provides the identity property. E.g. if * is your operator and i is the identity_value then a * i = a for all a in the domain of your operation. """ batch_size = prod(data.shape[:-1]) scan_axis_size = data.shape[-1] ib = tvm.tir.ir_builder.create() data = ib.buffer_ptr(data) output = ib.buffer_ptr(output) out_dtype = output.dtype if reduction is not None: reduction = ib.buffer_ptr(reduction) max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads) with ib.if_scope(scan_axis_size == 0): with ib.new_scope(): bx = te.thread_axis("blockIdx.x") ib.scope_attr(bx, "thread_extent", batch_size) with ib.if_scope(bx < batch_size): if reduction is not None: reduction[bx] = cast(identity_value, out_dtype) with ib.else_scope(): with ib.new_scope(): nthread_tx = max_threads nthread_bx = ceil_div(scan_axis_size, max_threads) nthread_by = batch_size tx = te.thread_axis("threadIdx.x") bx = te.thread_axis("blockIdx.x") by = te.thread_axis("blockIdx.y") ib.scope_attr(tx, "thread_extent", nthread_tx) ib.scope_attr(bx, "thread_extent", nthread_bx) ib.scope_attr(by, "thread_extent", nthread_by) tid = bx * nthread_tx + tx with ib.if_scope(tid < scan_axis_size): output[by * scan_axis_size + tid] = cast(data[by * scan_axis_size + tid], out_dtype) nthread_tx = max_threads nthread_bx = ceil_div(scan_axis_size, max_threads) nthread_by = batch_size # The following algorithm performs parallel exclusive scan # Up Sweep of exclusive scan lim = tvm.tir.generic.cast( tvm.tir.ceil(tvm.tir.log2(tvm.tir.generic.cast(scan_axis_size, "float64"))), "int64" ) with ib.for_range(0, lim, dtype="int64") as l2_width: width = 2 << l2_width with ib.new_scope(): tx = te.thread_axis("threadIdx.x") bx = te.thread_axis("blockIdx.x") ib.scope_attr(tx, "thread_extent", nthread_tx) ib.scope_attr( bx, "thread_extent", tvm.tir.generic.cast(ceil_div(scan_axis_size, max_threads * width), "int32"), ) tid = bx * nthread_tx + tx by = te.thread_axis("blockIdx.y") ib.scope_attr(by, "thread_extent", nthread_by) start = ib.allocate("int64", (1,), name="start", scope="local") middle = ib.allocate("int64", (1,), name="middle", scope="local") end = ib.allocate("int64", (1,), name="end", scope="local") start[0] = width * tid with ib.if_scope(start[0] < scan_axis_size): middle[0] = start[0] + tvm.tir.indexdiv(width, 2) end[0] = tvm.te.min(start[0] + width, scan_axis_size) with ib.if_scope(middle[0] < scan_axis_size): output[by * scan_axis_size + end[0] - 1] = binop( output[by * scan_axis_size + end[0] - 1], output[by * scan_axis_size + middle[0] - 1], ) # Down Sweep of exclusive scan with ib.new_scope(): bx = te.thread_axis("blockIdx.x") ib.scope_attr(bx, "thread_extent", batch_size) with ib.if_scope(bx < batch_size): if reduction is not None: reduction[bx] = output[(bx + 1) * scan_axis_size - 1] output[(bx + 1) * scan_axis_size - 1] = cast(identity_value, out_dtype) with ib.for_range(0, lim, dtype="int64") as l2_width: width = 2 << (lim - l2_width - 1) with ib.new_scope(): tx = te.thread_axis("threadIdx.x") bx = te.thread_axis("blockIdx.x") ib.scope_attr(tx, "thread_extent", nthread_tx) ib.scope_attr( bx, "thread_extent", tvm.tir.generic.cast(ceil_div(scan_axis_size, max_threads * width), "int32"), ) tid = bx * nthread_tx + tx by = te.thread_axis("blockIdx.y") ib.scope_attr(by, "thread_extent", nthread_by) start = ib.allocate("int64", (1,), name="start", scope="local") middle = ib.allocate("int64", (1,), name="middle", scope="local") end = ib.allocate("int64", (1,), name="end", scope="local") tmp = ib.allocate(out_dtype, (1,), name="end", scope="local") start[0] = width * tid with ib.if_scope(tvm.tir.all(start[0] < scan_axis_size)): middle[0] = start[0] + tvm.tir.indexdiv(width, 2) end[0] = tvm.tir.min(start[0] + width, scan_axis_size) with ib.if_scope(middle[0] < scan_axis_size): tmp[0] = output[by * scan_axis_size + middle[0] - 1] output[by * scan_axis_size + middle[0] - 1] = output[ by * scan_axis_size + end[0] - 1 ] output[by * scan_axis_size + end[0] - 1] = binop( output[by * scan_axis_size + end[0] - 1], tmp[0] ) return ib.get() def get_reduction_from_exclusive_scan(data, ex_scan_output, binop=tvm.tir.generic.add): """Return the sum of the last element of data and the exclusive scan output. The is the reduction of data along each row (for 2-D case). Parameters ---------- data : tvm.te.Tensor Input data of any shape ex_scan_output : tvm.te.Tensor The output of exclusive scan on data binop: function, optional A binary associative op to use for scan. The function takes two TIR expressions and produce a new TIR expression. By default it uses tvm.tir.generic.add to compute prefix sum. Returns ------- reduction : tvm.te.Tensor (N-1)-D tensor storing the reduction of each scan axis. """ ndim = len(data.shape) if ndim == 1: data = expand_dims(data, axis=0) ex_scan_output = expand_dims(ex_scan_output, axis=0) def ir(data, data_ex_scan, reduction): batch_size = prod(data.shape[:-1]) scan_axis_size = data.shape[-1] ib = tvm.tir.ir_builder.create() data = ib.buffer_ptr(data) data_ex_scan = ib.buffer_ptr(data_ex_scan) reduction = ib.buffer_ptr(reduction) max_threads = int(tvm.target.Target.current(allow_none=False).max_num_threads) with ib.new_scope(): nthread_tx = max_threads nthread_bx = ceil_div(batch_size, max_threads) tx = te.thread_axis("threadIdx.x") bx = te.thread_axis("blockIdx.x") ib.scope_attr(tx, "thread_extent", nthread_tx) ib.scope_attr(bx, "thread_extent", nthread_bx) tid = bx * max_threads + tx with ib.if_scope(tid < batch_size): with ib.if_scope(scan_axis_size > 0): reduction[tid] = binop( data_ex_scan[tid * scan_axis_size + scan_axis_size - 1], data[tid * scan_axis_size + scan_axis_size - 1], ) with ib.else_scope(): reduction[tid] = 0 return ib.get() data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "valid_indices_buf", data_alignment=8) ex_scan_output_buf = tvm.tir.decl_buffer( ex_scan_output.shape, ex_scan_output.dtype, "ex_scan_output_buf", data_alignment=8 ) reduction = te.extern( [data.shape[:-1]], [data, ex_scan_output], lambda ins, outs: ir(ins[0], ins[1], outs[0]), dtype=[ex_scan_output.dtype], in_buffers=[data_buf, ex_scan_output_buf], name="ex_scan_reduction", tag="ex_scan_reduction_gpu", ) if ndim == 1: return squeeze(reduction, 0) return reduction def scan_thrust( data, output_dtype, exclusive=True, return_reduction=False, binop=tvm.tir.generic.add ): """Do exclusive or inclusive scan on 1D or multidimensional input, using thrust. Parameters ---------- data : tvm.te.Tensor Input data of any shape. The scan is done over the innermost axis. output_dtype: string The dtype of the output scan tensor. exclusive: bool, optional Whether or not do exclusive or inclusive scan. return_reduction: bool, optional Whether or not return a (N-1)-D tensor storing the reduction of each scan axis. Reductions are computed as part of the upsweep pass, so there is no extra cost. If False, reductions are ignored. It must be False when exclusive is False. binop: function, optional A binary associative op to use for scan. Since we need to lookup the corresponding thrust function, arbitrariy callables are not supported. Currently only tvm.tir.generic.add can be passed in. Returns ------- output : tvm.te.Tensor A N-D tensor of the same rank N and shape as the input data. reduction : tvm.te.Tensor, optional (N-1)-D tensor storing the reduction of each scan axis. Returned if return_reduction is True. """ data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8) output_buf = tvm.tir.decl_buffer(data.shape, output_dtype, "output_buf", data_alignment=8) output = te.extern( [data.shape], [data], lambda ins, outs: tvm.tir.call_packed( _get_thrust_func_name(binop), ins[0], outs[0], exclusive ), dtype=[output_dtype], in_buffers=[data_buf], out_buffers=[output_buf], name="exclusive_scan_thrust", tag="exclusive_scan_thrust_gpu", ) if return_reduction: assert exclusive, "return_reduction should be False for inclusive scan" reduction = get_reduction_from_exclusive_scan(data, output, binop) return output, reduction return output def exclusive_scan( data, axis=-1, return_reduction=False, output_dtype=None, binop=tvm.tir.generic.add, identity_value=0, ): """Do exclusive scan on 1D or multidimensional input. Parameters ---------- data : tvm.te.Tensor Input data of any shape. axis: int, optional The axis to do scan on. By default, scan is done on the innermost axis. return_reduction: bool, optional Whether or not return a tensor storing the reduction over each scan axis. If the input rank is N, this tensor is of rank N - 1. Reductions are computed as part of the upsweep pass, so there is no extra cost. If False, reductions are ignored. output_dtype: string, optional The dtype of the output scan tensor. If not provided, the dtype of the input is used. binop: function, optional A binary associative op to use for scan. The function takes two TIR expressions and produce a new TIR expression. By default it uses tvm.tir.generic.add to compute prefix sum. identity_value: int or float A value for the binary operation which provides the identity property. E.g. if * is your operator and i is the identity_value then a * i = a for all a in the domain of your operation. Returns ------- output : tvm.te.Tensor A N-D tensor of the same rank N and shape as the input data. reduction : tvm.te.Tensor, optional (N-1)-D tensor storing the reduction of each scan axis. Returned if return_reduction is True. """ def do_scan(data, output_dtype): target = tvm.target.Target.current() # TODO: add support for a prod_scan if ( target and binop == tvm.tir.generic.add and ( can_use_thrust(target, "tvm.contrib.thrust.sum_scan") or can_use_rocthrust(target, "tvm.contrib.thrust.sum_scan") ) ): return scan_thrust( data, output_dtype, exclusive=True, return_reduction=return_reduction, binop=binop ) if ndim == 1: # TIR exclusive scan accepts only 2D or higher-rank inputs. data = expand_dims(data, axis=0) data_buf = tvm.tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8) output_buf = tvm.tir.decl_buffer(data.shape, output_dtype, "output_buf", data_alignment=8) if return_reduction: output, reduction = te.extern( [data.shape, data.shape[:-1]], [data], lambda ins, outs: exclusive_scan_ir( ins[0], outs[0], outs[1], binop=binop, identity_value=identity_value ), dtype=[output_dtype, output_dtype], in_buffers=[data_buf], name="exclusive_scan", tag="exclusive_scan_gpu", ) else: output = te.extern( [data.shape], [data], lambda ins, outs: exclusive_scan_ir( ins[0], outs[0], binop=binop, identity_value=identity_value ), dtype=[output_dtype], in_buffers=[data_buf], out_buffers=[output_buf], name="exclusive_scan", tag="exclusive_scan_gpu", ) reduction = None if ndim == 1: output = squeeze(output, 0) if return_reduction: reduction = squeeze(reduction, 0) if return_reduction: return output, reduction return output if output_dtype is None or output_dtype == "": output_dtype = data.dtype ndim = len(data.shape) if axis < 0: axis += ndim # If scan axis is not the innermost one, swap the scan and the innermost axes # Scan is always done on the innermost axis, for performance reason. if axis != ndim - 1: axes = swap(list(range(ndim)), axis) data = transpose(data, axes) if return_reduction: output, reduction = do_scan(data, output_dtype) else: output = do_scan(data, output_dtype) if axis != ndim - 1: axes = swap(list(range(ndim)), axis) output = transpose(output, axes) if return_reduction: return output, reduction return output def inclusive_scan(data, axis=-1, output_dtype=None, binop=tvm.tir.generic.add, identity_value=0): """Do inclusive scan on 1D or multidimensional input. Parameters ---------- data : tvm.te.Tensor Input data of any shape. axis: int, optional The axis to do scan on. By default, scan is done on the innermost axis. output_dtype: string, optional The dtype of the output scan tensor. If not provided, the dtype of the input is used. binop: function, optional A binary associative op to use for scan. The function takes two TIR expressions and produce a new TIR expression. By default it uses tvm.tir.generic.add to compute prefix sum. identity_value: int or float A value for the binary operation which provides the identity property. E.g. if * is your operator and i is the identity_value then a * i = a for all a in the domain of your operation. Returns ------- output : tvm.te.Tensor A N-D tensor of the same rank N as the input data. """ ex_scan = exclusive_scan( data, axis, output_dtype=output_dtype, binop=binop, identity_value=identity_value ) if output_dtype is not None and data.dtype != output_dtype and output_dtype != "": data = cast(data, output_dtype) return binop(data, ex_scan) def schedule_scan(outs): """Schedule for scan operator. Parameters ---------- outs: Array of Tensor The computation graph description of scan in the format of an array of tensors. Returns ------- s: Schedule The computation schedule for the op. """ outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs s = te.create_schedule([x.op for x in outs]) scheduled_ops = [] def traverse(op): if tag.is_injective(op.tag): schedule_injective_from_existing(s, op.output(0)) for tensor in op.input_tensors: if tensor.op.input_tensors and tensor.op not in scheduled_ops: traverse(tensor.op) scheduled_ops.append(op) for out in outs: traverse(out.op) return s def scanop( data: tvm.te.Tensor, binop: Callable[["tvm.Expr", "tvm.Expr"], "tvm.Expr"], identity_value: Union[float, int], axis: Optional[int] = None, dtype: Optional[str] = None, exclusive: Optional[bool] = None, ) -> tvm.te.Tensor: """Cumulative binary operator (scan) with similar axis behavior as np.cumsum and np.cumprod. See cumprod and cumsum for an example of use. E.g. if * is your binary operator and the input tensor is [1, 2, 3, 4] the output may be [1, 1 * 2, 1 * 2 * 3, 1 * 2 * 3 * 4] Parameters ---------- data : tvm.te.Tensor The input data to the operator. binop: Callable (tvm.Expr, tvm.Expr) -> tvm.Expr A binary operator which should be associative and commutative. E.g. if * is your operator then a * (b * c) = (a * b) * c and a * b = b * a identity_value: int or float A value for the binary operation which provides the identity property. E.g. if * is your operator and i is the identity_value then a * i = a for all a in the domain of your operation. axis : int, optional Axis along which the operation is computed. The default (None) is to compute the cumulative operation over the flattened array. dtype : string, optional Type of the returned array and of the accumulator in which the elements are computed. If dtype is not specified, it defaults to the dtype of data. exclusive : bool, optional If true will return exclusive cumulative operation in which the first element is not included. In other terms, if true, the j-th output element would be the cumulative operation of the first (j-1) elements. Otherwise, it would be the cumulative operation of the first j elements. Returns ------- result : tvm.te.Tensor The result has the same size as data, and the same shape as data if axis is not None. If axis is None, the result is a 1-d array. """ if axis is None: axis = 0 data = reshape(data, (prod(data.shape),)) axis = get_const_int(axis) if exclusive is not None and exclusive: return exclusive_scan( data, axis, output_dtype=dtype, binop=binop, identity_value=identity_value ) return inclusive_scan( data, axis, output_dtype=dtype, binop=binop, identity_value=identity_value ) def cumsum( data: tvm.te.Tensor, axis: Optional[int] = None, dtype: Optional[int] = None, exclusive: Optional[bool] = None, ) -> tvm.te.Tensor: """Numpy style cumsum op. Return the cumulative sum of the elements along a given axis. Parameters ---------- data : tvm.te.Tensor The input data to the operator. axis : int, optional Axis along which the cumulative sum is computed. The default (None) is to compute the cumsum over the flattened array. dtype : string, optional Type of the returned array and of the accumulator in which the elements are summed. If dtype is not specified, it defaults to the dtype of data. exclusive : bool, optional If true will return exclusive sum in which the first element is not included. In other terms, if true, the j-th output element would be the sum of the first (j-1) elements. Otherwise, it would be the sum of the first j elements. Returns ------- result : tvm.te.Tensor The result has the same size as data, and the same shape as data if axis is not None. If axis is None, the result is a 1-d array. """ return scanop( data=data, binop=tvm.tir.generic.add, identity_value=0, axis=axis, dtype=dtype, exclusive=exclusive, ) def cumprod( data: tvm.te.Tensor, axis: Optional[int] = None, dtype: Optional[int] = None, exclusive: Optional[bool] = None, ): """Numpy style cumprod op. Return the cumulative product of the elements along a given axis. Parameters ---------- data : tvm.te.Tensor The input data to the operator. axis : int, optional Axis along which the cumulative product is computed. The default (None) is to compute the cumproduct over the flattened array. dtype : string, optional Type of the returned array and of the accumulator in which the elements are multiplied. If dtype is not specified, it defaults to the dtype of data. exclusive : bool, optional If True, will return exclusive product in which the first element is not included. In other terms, if True, the j-th output element would be the product of the first (j-1) elements. Otherwise, it would be the product of the first j elements. Returns ------- result : tvm.te.Tensor The result has the same size as data, and the same shape as data if axis is not None. If axis is None, the result is a 1-d array. """ return scanop( data=data, binop=tvm.tir.generic.multiply, identity_value=1, axis=axis, dtype=dtype, exclusive=exclusive, )
[ "tvm.tir.decl_buffer", "tvm.te.min", "tvm.tir.generic.cast", "tvm.target.Target.current", "tvm.contrib.thrust.can_use_rocthrust", "tvm.contrib.thrust.can_use_thrust", "tvm.te.thread_axis", "tvm.tir.min", "tvm.te.create_schedule", "tvm.tir.ir_builder.create", "tvm.tir.indexdiv", "tvm.tir.all" ]
[((2527, 2554), 'tvm.tir.ir_builder.create', 'tvm.tir.ir_builder.create', ([], {}), '()\n', (2552, 2554), False, 'import tvm\n'), ((9959, 10045), 'tvm.tir.decl_buffer', 'tvm.tir.decl_buffer', (['data.shape', 'data.dtype', '"""valid_indices_buf"""'], {'data_alignment': '(8)'}), "(data.shape, data.dtype, 'valid_indices_buf',\n data_alignment=8)\n", (9978, 10045), False, 'import tvm\n'), ((10067, 10174), 'tvm.tir.decl_buffer', 'tvm.tir.decl_buffer', (['ex_scan_output.shape', 'ex_scan_output.dtype', '"""ex_scan_output_buf"""'], {'data_alignment': '(8)'}), "(ex_scan_output.shape, ex_scan_output.dtype,\n 'ex_scan_output_buf', data_alignment=8)\n", (10086, 10174), False, 'import tvm\n'), ((11897, 11970), 'tvm.tir.decl_buffer', 'tvm.tir.decl_buffer', (['data.shape', 'data.dtype', '"""data_buf"""'], {'data_alignment': '(8)'}), "(data.shape, data.dtype, 'data_buf', data_alignment=8)\n", (11916, 11970), False, 'import tvm\n'), ((11988, 12065), 'tvm.tir.decl_buffer', 'tvm.tir.decl_buffer', (['data.shape', 'output_dtype', '"""output_buf"""'], {'data_alignment': '(8)'}), "(data.shape, output_dtype, 'output_buf', data_alignment=8)\n", (12007, 12065), False, 'import tvm\n'), ((18847, 18887), 'tvm.te.create_schedule', 'te.create_schedule', (['[x.op for x in outs]'], {}), '([x.op for x in outs])\n', (18865, 18887), False, 'from tvm import te\n'), ((8895, 8922), 'tvm.tir.ir_builder.create', 'tvm.tir.ir_builder.create', ([], {}), '()\n', (8920, 8922), False, 'import tvm\n'), ((14323, 14350), 'tvm.target.Target.current', 'tvm.target.Target.current', ([], {}), '()\n', (14348, 14350), False, 'import tvm\n'), ((14967, 15040), 'tvm.tir.decl_buffer', 'tvm.tir.decl_buffer', (['data.shape', 'data.dtype', '"""data_buf"""'], {'data_alignment': '(8)'}), "(data.shape, data.dtype, 'data_buf', data_alignment=8)\n", (14986, 15040), False, 'import tvm\n'), ((15062, 15139), 'tvm.tir.decl_buffer', 'tvm.tir.decl_buffer', (['data.shape', 'output_dtype', '"""output_buf"""'], {'data_alignment': '(8)'}), "(data.shape, output_dtype, 'output_buf', data_alignment=8)\n", (15081, 15139), False, 'import tvm\n'), ((2751, 2794), 'tvm.target.Target.current', 'tvm.target.Target.current', ([], {'allow_none': '(False)'}), '(allow_none=False)\n', (2776, 2794), False, 'import tvm\n'), ((2902, 2930), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.x"""'], {}), "('blockIdx.x')\n", (2916, 2930), False, 'from tvm import te\n'), ((3355, 3384), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.x"""'], {}), "('threadIdx.x')\n", (3369, 3384), False, 'from tvm import te\n'), ((3402, 3430), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.x"""'], {}), "('blockIdx.x')\n", (3416, 3430), False, 'from tvm import te\n'), ((3448, 3476), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.y"""'], {}), "('blockIdx.y')\n", (3462, 3476), False, 'from tvm import te\n'), ((5755, 5783), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.x"""'], {}), "('blockIdx.x')\n", (5769, 5783), False, 'from tvm import te\n'), ((9285, 9314), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.x"""'], {}), "('threadIdx.x')\n", (9299, 9314), False, 'from tvm import te\n'), ((9332, 9360), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.x"""'], {}), "('blockIdx.x')\n", (9346, 9360), False, 'from tvm import te\n'), ((4370, 4399), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.x"""'], {}), "('threadIdx.x')\n", (4384, 4399), False, 'from tvm import te\n'), ((4421, 4449), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.x"""'], {}), "('blockIdx.x')\n", (4435, 4449), False, 'from tvm import te\n'), ((4786, 4814), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.y"""'], {}), "('blockIdx.y')\n", (4800, 4814), False, 'from tvm import te\n'), ((6258, 6287), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.x"""'], {}), "('threadIdx.x')\n", (6272, 6287), False, 'from tvm import te\n'), ((6309, 6337), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.x"""'], {}), "('blockIdx.x')\n", (6323, 6337), False, 'from tvm import te\n'), ((6674, 6702), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.y"""'], {}), "('blockIdx.y')\n", (6688, 6702), False, 'from tvm import te\n'), ((9082, 9125), 'tvm.target.Target.current', 'tvm.target.Target.current', ([], {'allow_none': '(False)'}), '(allow_none=False)\n', (9107, 9125), False, 'import tvm\n'), ((14507, 14560), 'tvm.contrib.thrust.can_use_thrust', 'can_use_thrust', (['target', '"""tvm.contrib.thrust.sum_scan"""'], {}), "(target, 'tvm.contrib.thrust.sum_scan')\n", (14521, 14560), False, 'from tvm.contrib.thrust import can_use_rocthrust, can_use_thrust\n'), ((14580, 14636), 'tvm.contrib.thrust.can_use_rocthrust', 'can_use_rocthrust', (['target', '"""tvm.contrib.thrust.sum_scan"""'], {}), "(target, 'tvm.contrib.thrust.sum_scan')\n", (14597, 14636), False, 'from tvm.contrib.thrust import can_use_rocthrust, can_use_thrust\n'), ((4150, 4197), 'tvm.tir.generic.cast', 'tvm.tir.generic.cast', (['scan_axis_size', '"""float64"""'], {}), "(scan_axis_size, 'float64')\n", (4170, 4197), False, 'import tvm\n'), ((5315, 5359), 'tvm.te.min', 'tvm.te.min', (['(start[0] + width)', 'scan_axis_size'], {}), '(start[0] + width, scan_axis_size)\n', (5325, 5359), False, 'import tvm\n'), ((7294, 7339), 'tvm.tir.min', 'tvm.tir.min', (['(start[0] + width)', 'scan_axis_size'], {}), '(start[0] + width, scan_axis_size)\n', (7305, 7339), False, 'import tvm\n'), ((5259, 5285), 'tvm.tir.indexdiv', 'tvm.tir.indexdiv', (['width', '(2)'], {}), '(width, 2)\n', (5275, 5285), False, 'import tvm\n'), ((7154, 7192), 'tvm.tir.all', 'tvm.tir.all', (['(start[0] < scan_axis_size)'], {}), '(start[0] < scan_axis_size)\n', (7165, 7192), False, 'import tvm\n'), ((7238, 7264), 'tvm.tir.indexdiv', 'tvm.tir.indexdiv', (['width', '(2)'], {}), '(width, 2)\n', (7254, 7264), False, 'import tvm\n')]
#!/usr/bin/env python # encoding: utf-8 # # Copyright SAS Institute # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ''' Tests for hyperparameter tuning ''' from __future__ import print_function, division, absolute_import, unicode_literals import os import re import six import swat.utils.testing as tm import unittest import pipefitter from pipefitter.estimator import DecisionTree from pipefitter.model_selection import HyperParameterTuning class TestHyper(tm.TestCase): def test_params(self): estimator = DecisionTree() param_grid=dict( max_depth=[6, 10], leaf_size=[3, 5], ) # Basic settings and defaults hpt = HyperParameterTuning(estimator=estimator, param_grid=param_grid) self.assertEqual(hpt.params['estimator'], estimator) self.assertEqual(hpt.params['param_grid'], param_grid) self.assertEqual(hpt.params['cv'], 3) self.assertTrue(hpt.params['score_type'] is None) # cv = int hpt = HyperParameterTuning(estimator=estimator, param_grid=param_grid, cv=3) self.assertEqual(hpt.params['cv'], 3) # cv = float hpt = HyperParameterTuning(estimator=estimator, param_grid=param_grid, cv=0.3) self.assertEqual(hpt.params['cv'], 0.3) # cv = -float with self.assertRaises(ValueError): hpt = HyperParameterTuning(estimator=estimator, param_grid=param_grid, cv=-0.1) # cv = float > 1 with self.assertRaises(ValueError): hpt = HyperParameterTuning(estimator=estimator, param_grid=param_grid, cv=1.0001) # cv = generator gen = iter([0]) hpt = HyperParameterTuning(estimator=estimator, param_grid=param_grid, cv=gen) self.assertEqual(hpt.params['cv'], gen) # cv = list items = [0] hpt = HyperParameterTuning(estimator=estimator, param_grid=param_grid, cv=items) self.assertEqual(hpt.params['cv'], items) # cv = string with self.assertRaises(TypeError): HyperParameterTuning(estimator=estimator, param_grid=param_grid, cv='foo') # cv = 1 (lower than minimum) with self.assertRaises(ValueError): hpt = HyperParameterTuning(estimator=estimator, param_grid=param_grid, cv=1) if __name__ == '__main__': tm.runtests()
[ "swat.utils.testing.runtests", "pipefitter.model_selection.HyperParameterTuning", "pipefitter.estimator.DecisionTree" ]
[((3342, 3355), 'swat.utils.testing.runtests', 'tm.runtests', ([], {}), '()\n', (3353, 3355), True, 'import swat.utils.testing as tm\n'), ((1033, 1047), 'pipefitter.estimator.DecisionTree', 'DecisionTree', ([], {}), '()\n', (1045, 1047), False, 'from pipefitter.estimator import DecisionTree\n'), ((1197, 1261), 'pipefitter.model_selection.HyperParameterTuning', 'HyperParameterTuning', ([], {'estimator': 'estimator', 'param_grid': 'param_grid'}), '(estimator=estimator, param_grid=param_grid)\n', (1217, 1261), False, 'from pipefitter.model_selection import HyperParameterTuning\n'), ((1559, 1629), 'pipefitter.model_selection.HyperParameterTuning', 'HyperParameterTuning', ([], {'estimator': 'estimator', 'param_grid': 'param_grid', 'cv': '(3)'}), '(estimator=estimator, param_grid=param_grid, cv=3)\n', (1579, 1629), False, 'from pipefitter.model_selection import HyperParameterTuning\n'), ((1782, 1854), 'pipefitter.model_selection.HyperParameterTuning', 'HyperParameterTuning', ([], {'estimator': 'estimator', 'param_grid': 'param_grid', 'cv': '(0.3)'}), '(estimator=estimator, param_grid=param_grid, cv=0.3)\n', (1802, 1854), False, 'from pipefitter.model_selection import HyperParameterTuning\n'), ((2438, 2510), 'pipefitter.model_selection.HyperParameterTuning', 'HyperParameterTuning', ([], {'estimator': 'estimator', 'param_grid': 'param_grid', 'cv': 'gen'}), '(estimator=estimator, param_grid=param_grid, cv=gen)\n', (2458, 2510), False, 'from pipefitter.model_selection import HyperParameterTuning\n'), ((2684, 2758), 'pipefitter.model_selection.HyperParameterTuning', 'HyperParameterTuning', ([], {'estimator': 'estimator', 'param_grid': 'param_grid', 'cv': 'items'}), '(estimator=estimator, param_grid=param_grid, cv=items)\n', (2704, 2758), False, 'from pipefitter.model_selection import HyperParameterTuning\n'), ((2058, 2131), 'pipefitter.model_selection.HyperParameterTuning', 'HyperParameterTuning', ([], {'estimator': 'estimator', 'param_grid': 'param_grid', 'cv': '(-0.1)'}), '(estimator=estimator, param_grid=param_grid, cv=-0.1)\n', (2078, 2131), False, 'from pipefitter.model_selection import HyperParameterTuning\n'), ((2259, 2334), 'pipefitter.model_selection.HyperParameterTuning', 'HyperParameterTuning', ([], {'estimator': 'estimator', 'param_grid': 'param_grid', 'cv': '(1.0001)'}), '(estimator=estimator, param_grid=param_grid, cv=1.0001)\n', (2279, 2334), False, 'from pipefitter.model_selection import HyperParameterTuning\n'), ((2957, 3031), 'pipefitter.model_selection.HyperParameterTuning', 'HyperParameterTuning', ([], {'estimator': 'estimator', 'param_grid': 'param_grid', 'cv': '"""foo"""'}), "(estimator=estimator, param_grid=param_grid, cv='foo')\n", (2977, 3031), False, 'from pipefitter.model_selection import HyperParameterTuning\n'), ((3199, 3269), 'pipefitter.model_selection.HyperParameterTuning', 'HyperParameterTuning', ([], {'estimator': 'estimator', 'param_grid': 'param_grid', 'cv': '(1)'}), '(estimator=estimator, param_grid=param_grid, cv=1)\n', (3219, 3269), False, 'from pipefitter.model_selection import HyperParameterTuning\n')]
#!/usr/bin/env python3 import numpy as np import pandas as pd import random import os,sys import shutil ##################################MAIN##################################### def save_train_val(dir,in_csv): name_path_save_val = os.path.join(dir,'val/') if not os.path.isdir(name_path_save_val): os.mkdir(name_path_save_val) name_path_save_train = os.path.join(dir,'train/') if not os.path.isdir(name_path_save_train): os.mkdir(name_path_save_train) ## val df = pd.read_csv(os.path.join(in_csv,'val_.csv')) print(df.label.values) for i in range(len(df.label.values)): if not os.path.isdir(name_path_save_val+str(df.label.values[i])): os.mkdir(name_path_save_val+str(df.label.values[i])) name_image = df.path.values[i].split('/')[-2]+'-'+df.path.values[i].split('/')[-1] shutil.copyfile(df.path.values[i], name_path_save_val+str(df.label.values[i])+'/'+name_image) print(name_image) ## train df = pd.read_csv(os.path.join(in_csv,'train_.csv')) print(df.label.values) for i in range(len(df.label.values)): if not os.path.isdir(name_path_save_train+str(df.label.values[i])): os.mkdir(name_path_save_train+str(df.label.values[i])) name_image = df.path.values[i].split('/')[-2]+'-'+df.path.values[i].split('/')[-1] shutil.copyfile(df.path.values[i], name_path_save_train+str(df.label.values[i])+'/'+name_image) print(name_image)
[ "os.path.isdir", "os.mkdir", "os.path.join" ]
[((238, 263), 'os.path.join', 'os.path.join', (['dir', '"""val/"""'], {}), "(dir, 'val/')\n", (250, 263), False, 'import os, sys\n'), ((361, 388), 'os.path.join', 'os.path.join', (['dir', '"""train/"""'], {}), "(dir, 'train/')\n", (373, 388), False, 'import os, sys\n'), ((271, 304), 'os.path.isdir', 'os.path.isdir', (['name_path_save_val'], {}), '(name_path_save_val)\n', (284, 304), False, 'import os, sys\n'), ((308, 336), 'os.mkdir', 'os.mkdir', (['name_path_save_val'], {}), '(name_path_save_val)\n', (316, 336), False, 'import os, sys\n'), ((396, 431), 'os.path.isdir', 'os.path.isdir', (['name_path_save_train'], {}), '(name_path_save_train)\n', (409, 431), False, 'import os, sys\n'), ((435, 465), 'os.mkdir', 'os.mkdir', (['name_path_save_train'], {}), '(name_path_save_train)\n', (443, 465), False, 'import os, sys\n'), ((493, 525), 'os.path.join', 'os.path.join', (['in_csv', '"""val_.csv"""'], {}), "(in_csv, 'val_.csv')\n", (505, 525), False, 'import os, sys\n'), ((945, 979), 'os.path.join', 'os.path.join', (['in_csv', '"""train_.csv"""'], {}), "(in_csv, 'train_.csv')\n", (957, 979), False, 'import os, sys\n')]
# Generated by Django 3.0.7 on 2020-06-22 12:40 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Study_Place', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.TextField(max_length=100, verbose_name='название')), ('place', models.TextField(max_length=100, verbose_name='место')), ('type', models.PositiveSmallIntegerField(choices=[(1, 'Школа'), (2, 'Колледж')], default=1)), ], options={ 'verbose_name': 'место обучения', 'verbose_name_plural': 'места обучения', }, ), migrations.CreateModel( name='Study_Program', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.TextField(max_length=100, verbose_name='название')), ('count', models.IntegerField(verbose_name='количество студентов')), ], options={ 'verbose_name': 'программа обучения', 'verbose_name_plural': 'программы обучения', }, ), migrations.CreateModel( name='Abiturient', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('surname', models.TextField(max_length=100, verbose_name='фамилия')), ('name', models.TextField(max_length=100, verbose_name='имя')), ('secondname', models.TextField(max_length=100, verbose_name='отчество')), ('document_type', models.PositiveSmallIntegerField(choices=[(1, 'Паспорт'), (2, 'Свидетельство о рождении')], default=1, verbose_name='тип документа')), ('document_number', models.TextField(max_length=100, verbose_name='номер документа')), ('study_date', models.DateField(verbose_name='дата окончания')), ('award_type', models.PositiveSmallIntegerField(choices=[(1, 'Нет'), (2, 'Серебрянная медаль'), (3, 'Золотая медаль')], default=1, verbose_name='награда')), ('study_type', models.PositiveSmallIntegerField(choices=[(1, 'очная'), (2, 'очно-заочная'), (3, 'заочная')], default=1, verbose_name='формат обучения')), ('contract_type', models.PositiveSmallIntegerField(choices=[(1, 'бюджет'), (2, 'платный')], default=1, verbose_name='тип обучения')), ('student_type', models.PositiveSmallIntegerField(choices=[(1, 'нет'), (2, 'целевик'), (3, 'инвалид'), (4, 'сирота')], default=1, verbose_name='тип студента')), ('marks', models.CharField(max_length=200)), ('accepted', models.BooleanField(default=False, verbose_name='зачислен')), ('study_place', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='abit.Study_Place', verbose_name='место учебы')), ('study_program', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='abit.Study_Program', verbose_name='программа обучения')), ], options={ 'verbose_name': 'студент', 'verbose_name_plural': 'студенты', }, ), ]
[ "django.db.models.TextField", "django.db.models.CharField", "django.db.models.ForeignKey", "django.db.models.PositiveSmallIntegerField", "django.db.models.AutoField", "django.db.models.BooleanField", "django.db.models.IntegerField", "django.db.models.DateField" ]
[((340, 433), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (356, 433), False, 'from django.db import migrations, models\n'), ((457, 514), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(100)', 'verbose_name': '"""название"""'}), "(max_length=100, verbose_name='название')\n", (473, 514), False, 'from django.db import migrations, models\n'), ((543, 597), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(100)', 'verbose_name': '"""место"""'}), "(max_length=100, verbose_name='место')\n", (559, 597), False, 'from django.db import migrations, models\n'), ((625, 712), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'choices': "[(1, 'Школа'), (2, 'Колледж')]", 'default': '(1)'}), "(choices=[(1, 'Школа'), (2, 'Колледж')],\n default=1)\n", (657, 712), False, 'from django.db import migrations, models\n'), ((991, 1084), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1007, 1084), False, 'from django.db import migrations, models\n'), ((1108, 1165), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(100)', 'verbose_name': '"""название"""'}), "(max_length=100, verbose_name='название')\n", (1124, 1165), False, 'from django.db import migrations, models\n'), ((1194, 1250), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': '"""количество студентов"""'}), "(verbose_name='количество студентов')\n", (1213, 1250), False, 'from django.db import migrations, models\n'), ((1538, 1631), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1554, 1631), False, 'from django.db import migrations, models\n'), ((1658, 1714), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(100)', 'verbose_name': '"""фамилия"""'}), "(max_length=100, verbose_name='фамилия')\n", (1674, 1714), False, 'from django.db import migrations, models\n'), ((1742, 1794), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(100)', 'verbose_name': '"""имя"""'}), "(max_length=100, verbose_name='имя')\n", (1758, 1794), False, 'from django.db import migrations, models\n'), ((1828, 1885), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(100)', 'verbose_name': '"""отчество"""'}), "(max_length=100, verbose_name='отчество')\n", (1844, 1885), False, 'from django.db import migrations, models\n'), ((1922, 2058), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'choices': "[(1, 'Паспорт'), (2, 'Свидетельство о рождении')]", 'default': '(1)', 'verbose_name': '"""тип документа"""'}), "(choices=[(1, 'Паспорт'), (2,\n 'Свидетельство о рождении')], default=1, verbose_name='тип документа')\n", (1954, 2058), False, 'from django.db import migrations, models\n'), ((2093, 2157), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(100)', 'verbose_name': '"""номер документа"""'}), "(max_length=100, verbose_name='номер документа')\n", (2109, 2157), False, 'from django.db import migrations, models\n'), ((2191, 2238), 'django.db.models.DateField', 'models.DateField', ([], {'verbose_name': '"""дата окончания"""'}), "(verbose_name='дата окончания')\n", (2207, 2238), False, 'from django.db import migrations, models\n'), ((2272, 2420), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'choices': "[(1, 'Нет'), (2, 'Серебрянная медаль'), (3, 'Золотая медаль')]", 'default': '(1)', 'verbose_name': '"""награда"""'}), "(choices=[(1, 'Нет'), (2,\n 'Серебрянная медаль'), (3, 'Золотая медаль')], default=1, verbose_name=\n 'награда')\n", (2304, 2420), False, 'from django.db import migrations, models\n'), ((2445, 2585), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'choices': "[(1, 'очная'), (2, 'очно-заочная'), (3, 'заочная')]", 'default': '(1)', 'verbose_name': '"""формат обучения"""'}), "(choices=[(1, 'очная'), (2, 'очно-заочная'),\n (3, 'заочная')], default=1, verbose_name='формат обучения')\n", (2477, 2585), False, 'from django.db import migrations, models\n'), ((2618, 2735), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'choices': "[(1, 'бюджет'), (2, 'платный')]", 'default': '(1)', 'verbose_name': '"""тип обучения"""'}), "(choices=[(1, 'бюджет'), (2, 'платный')],\n default=1, verbose_name='тип обучения')\n", (2650, 2735), False, 'from django.db import migrations, models\n'), ((2767, 2912), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'choices': "[(1, 'нет'), (2, 'целевик'), (3, 'инвалид'), (4, 'сирота')]", 'default': '(1)', 'verbose_name': '"""тип студента"""'}), "(choices=[(1, 'нет'), (2, 'целевик'), (3,\n 'инвалид'), (4, 'сирота')], default=1, verbose_name='тип студента')\n", (2799, 2912), False, 'from django.db import migrations, models\n'), ((2937, 2969), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (2953, 2969), False, 'from django.db import migrations, models\n'), ((3001, 3060), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""зачислен"""'}), "(default=False, verbose_name='зачислен')\n", (3020, 3060), False, 'from django.db import migrations, models\n'), ((3095, 3213), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""abit.Study_Place"""', 'verbose_name': '"""место учебы"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'abit.Study_Place', verbose_name='место учебы')\n", (3112, 3213), False, 'from django.db import migrations, models\n'), ((3245, 3372), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""abit.Study_Program"""', 'verbose_name': '"""программа обучения"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'abit.Study_Program', verbose_name='программа обучения')\n", (3262, 3372), False, 'from django.db import migrations, models\n')]
import bcolz import numpy as np def save_array(fname, arr): c=bcolz.carray(arr, rootdir=fname, mode='w') c.flush() def load_array(fname): return bcolz.open(fname)[:] probs = np.load('/home/chicm/ml/kgdata/carvana/results/single/UNet_double_1024_5/submit/049/probs-part10.8.npy') save_array('/home/chicm/ml/kgdata/carvana/results/single/UNet_double_1024_5/submit/ensemble/probs-part10.8.npy', probs)
[ "numpy.load", "bcolz.open", "bcolz.carray" ]
[((191, 305), 'numpy.load', 'np.load', (['"""/home/chicm/ml/kgdata/carvana/results/single/UNet_double_1024_5/submit/049/probs-part10.8.npy"""'], {}), "(\n '/home/chicm/ml/kgdata/carvana/results/single/UNet_double_1024_5/submit/049/probs-part10.8.npy'\n )\n", (198, 305), True, 'import numpy as np\n'), ((68, 110), 'bcolz.carray', 'bcolz.carray', (['arr'], {'rootdir': 'fname', 'mode': '"""w"""'}), "(arr, rootdir=fname, mode='w')\n", (80, 110), False, 'import bcolz\n'), ((160, 177), 'bcolz.open', 'bcolz.open', (['fname'], {}), '(fname)\n', (170, 177), False, 'import bcolz\n')]
import json, boto3, base64 def getprocessor(env, name, source='core', scope=None): return name if ':' in name else '{lambda_namespace}-{source}-{name}'.format(lambda_namespace=env['lambda_namespace'], source=source, name='{}-{}'.format(scope, name) if scope else name) def main(event, context): ''' - triggered by trigger/query, trigger/version - executes the given query using the given record ''' env = context.client_context.env if context.client_context and context.client_context.env else event.get('_env', {}) client_context = base64.b64encode(bytes(json.dumps({'env': env}), 'utf-8')).decode('utf-8') s3 = boto3.resource('s3') bucket = s3.Bucket(env['bucket']) s3_client = boto3.client('s3') lambda_client = boto3.client('lambda') counter = 0 query_id = event.get('query_id') query_processor = event.get('processor') query_options = event.get('options') record_stub = event.get('record', {}) if query_id and record_stub: class_name = record_stub.get('@type') record_id = record_stub.get('@id') record_data = json.loads(s3_client.get_object(Bucket=env['bucket'], Key='{data_root}/record/{class_name}/{record_id}.json'.format(data_root=env['data_root'], class_name=class_name, record_id=record_id))['Body'].read().decode('utf-8')) if not query_processor: query_data = json.loads(s3_client.get_object(Bucket=env['bucket'], Key='{data_root}/query/{class_name}/{query_id}.json'.format(data_root=env['data_root'], class_name=class_name, query_id=query_id))['Body'].read().decode('utf-8')) query_processor = query_data.get('processor') query_options = query_data.get('options') query_payload = {'record': record_data, 'options': query_options} query_result = json.loads(lambda_client.invoke(FunctionName=getprocessor(env, query_processor, 'extension', 'query'), Payload=bytes(json.dumps(query_payload), 'utf-8'), ClientContext=client_context)['Payload'].read().decode('utf-8')) query_index_key = '{data_root}/query/{class_name}/{query_id}/{record_initial}.json'.format(data_root=env['data_root'], class_name=class_name, query_id=query_id, record_initial=record_id[0]) try: query_index = json.loads(s3_client.get_object(Bucket=env['bucket'], Key=query_index_key)['Body'].read().decode('utf-8')) except: query_index = [] if query_result is True and record_id not in query_index: query_index.append(record_id) query_index.sort() elif query_result is False and record_id in query_index: query_index.remove(record_id) query_index.sort() bucket.put_object(Body=bytes(json.dumps(query_index), 'utf-8'), Key=query_index_key, ContentType='application/json') counter = counter + 1 return counter
[ "boto3.resource", "boto3.client", "json.dumps" ]
[((651, 671), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (665, 671), False, 'import json, boto3, base64\n'), ((726, 744), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (738, 744), False, 'import json, boto3, base64\n'), ((765, 787), 'boto3.client', 'boto3.client', (['"""lambda"""'], {}), "('lambda')\n", (777, 787), False, 'import json, boto3, base64\n'), ((590, 614), 'json.dumps', 'json.dumps', (["{'env': env}"], {}), "({'env': env})\n", (600, 614), False, 'import json, boto3, base64\n'), ((2739, 2762), 'json.dumps', 'json.dumps', (['query_index'], {}), '(query_index)\n', (2749, 2762), False, 'import json, boto3, base64\n'), ((1934, 1959), 'json.dumps', 'json.dumps', (['query_payload'], {}), '(query_payload)\n', (1944, 1959), False, 'import json, boto3, base64\n')]
""" Environment setup functions Try to keep the imports in this file to the python standard libraries. Otherwise some of the metatlas_repo/kernel validation errors will not correctly report problems with the notebook configuration """ import getpass import json import logging import os import re import shutil import subprocess import sys from pathlib import Path logger = logging.getLogger(__name__) SOURCE_NOTEBOOK = { "RT-Predict": "Workflow_Notebook_VS_Auto_RT_Predict_V2.ipynb", "ISTDsEtc": "Targeted.ipynb", "FinalEMA-HILIC": "Targeted.ipynb", } SOURCE_ATLAS_PREFIX = { "RT-Predict": None, "ISTDsEtc": "HILICz150_ANT20190824_PRD_IS_LabUnlab2_", "FinalEMA-HILIC": "HILICz150_ANT20190824_TPL_EMA_Unlab_", } def install_kernel(): """ Copies kernel.json from repo to active location under home directory. Only for use on NERC! """ logger.info('Installing kernel.json for "Metatlas Targeted".') repo_path = Path(__file__).resolve().parent.parent.parent source = repo_path / "notebooks" / "kernels" / "metatlas-targeted.kernel.json" dest_dir = Path.home() / ".local" / "share" / "jupyter" / "kernels" / "metatlas-targeted" os.makedirs(dest_dir, exist_ok=True) shutil.copyfile(source, dest_dir / "kernel.json") logger.info('Kernel installation complete. Reload Jupyter notebook page to see new kernel". ') def validate_kernel(): """ Raise error if problem with kernel When on NERSC, this will install the correct kernel if needed """ allowed_exe = [ "/global/common/software/m2650/metatlas-targeted-2021-10-13/bin/python", ] error_msg = "Invalid kernel setting in Jupyter Notebook." on_nersc = "METATLAS_LOCAL" not in os.environ if on_nersc and sys.executable not in allowed_exe: install_kernel() if "/global/common/software/m2650/metatlas-targeted" in sys.executable: logger.critical('Upgraded "Metatlas Targeted" kernel.') logger.critical('Please reselect "Metatlas Targeted" kernel for upgrade to become active.') else: logger.critical('Please check that the kernel is set to "Metatlas Targeted".') raise ValueError(error_msg) try: # pylint: disable=import-outside-toplevel,unused-import import dataset # noqa: F401 except ModuleNotFoundError as module_error: logger.critical( 'Could not find dataset module. Please check that the kernel is set to "Metatlas Targeted".' ) raise ModuleNotFoundError from module_error logger.debug("Kernel validation passed. Using python from %s.", sys.executable) def repo_dir(): """Returns a string with the path to the root of the Metatlas git repo""" return os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) def create_all_notebooks(output_type, base_output_dir, experiment_id, analysis_number): """ Creates Jupyter notebooks with appropriate filename and pre-populated parameters inputs: output_type: one of 'RT-Predict', 'ISTDsEtc', 'FinalEMA-HILIC' base_output_dir: project directory containing the experiment directories experiment_id: '_' delimited experiment identifier analysis_number: increment to not overwrite existing analysis outputs """ possible_outputs = ["RT-Predict", "ISTDsEtc", "FinalEMA-HILIC"] outputs = possible_outputs[: (1 + possible_outputs.index(output_type))] parameters = { "experiment": experiment_id, "metatlas_repo_path": repo_dir(), "output_directory": base_output_dir, "analysis_number": analysis_number, } analysis_id = f"{getpass.getuser()}{parameters['analysis_number']}" tokens = parameters["experiment"].split("_") output_dir = os.path.join(base_output_dir, experiment_id) os.makedirs(output_dir, exist_ok=True) for output in outputs: parameters["output_type"] = output for polarity in ["positive", "negative"] if output != "RT-Predict" else [None]: source = os.path.join(repo_dir(), "notebooks", "reference", SOURCE_NOTEBOOK[output]) if polarity is not None: parameters["polarity"] = polarity pol = polarity[:3].upper() parameters["source_atlas"] = f"{SOURCE_ATLAS_PREFIX[output]}_{pol}_{tokens[3]}_{analysis_id}" generate_notebook(source, output_dir, parameters) def generate_notebook(source, output_dir, parameters): """ Creates a notebook from source in output_dir that has updated parameters. inputs: source: path of input Jupyter notebook output_dir: directory to write output Jupyter notebook parameters: dict of parameters to update in the notebook parameters must have atleast the following keys: analysis_number, experiment, output_type """ if "polarity" in parameters: pol = parameters["polarity"][:3].upper() suffix = f"{parameters['output_type']}_{pol}" else: suffix = "RT-Predict" tokens = parameters["experiment"].split("_") dest = os.path.join(output_dir, "_".join(tokens[3:5] + [suffix]) + ".ipynb") create_notebook_with_parameters(source, dest, parameters) def create_notebook_with_parameters(source, dest, parameters): """ Copies source notebook to dest and updates parameters inputs: source: path of input notebook dest: path of destination notebook parameters: dict with name of parameter in key and new value in value """ with open(source, encoding="utf8") as source_fh: data = json.load(source_fh) eq_pat = re.compile(r"^([^#= ]+)\s*=.+$") param_source = data["cells"][1]["source"] for i, line in enumerate(param_source): re_match = eq_pat.match(line) if re_match: param_name = re_match.group(1) if param_name in parameters: new_value = parameters[param_name] out_value = f"'{new_value}'" if isinstance(new_value, str) else new_value param_source[i] = f"{param_name} = {out_value}\n" with open(dest, "w", encoding="utf8") as out_fh: json.dump(data, out_fh) def validate_data_dir(base_data_dir, experiment_id): """Raise FileNotFoundError if base_data_dir / experiment_id is not an existing directory""" experiment_dir = os.path.join(base_data_dir, experiment_id) try: if not os.path.isdir(experiment_dir): raise FileNotFoundError(f"Data directory does not exist at {experiment_dir}.") except FileNotFoundError as err: logger.exception(err) raise err def get_repo_hash(): """ Returns the full hash for the current git commit or 'git not found, hash unknown' """ try: result = subprocess.run(["git", "rev-parse", "HEAD"], cwd=repo_dir(), capture_output=True, check=True) except FileNotFoundError: return "git not found, hash unknown" return result.stdout.strip()
[ "json.dump", "os.path.abspath", "json.load", "getpass.getuser", "os.makedirs", "pathlib.Path.home", "os.path.isdir", "pathlib.Path", "shutil.copyfile", "os.path.join", "logging.getLogger", "re.compile" ]
[((378, 405), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (395, 405), False, 'import logging\n'), ((1192, 1228), 'os.makedirs', 'os.makedirs', (['dest_dir'], {'exist_ok': '(True)'}), '(dest_dir, exist_ok=True)\n', (1203, 1228), False, 'import os\n'), ((1233, 1282), 'shutil.copyfile', 'shutil.copyfile', (['source', "(dest_dir / 'kernel.json')"], {}), "(source, dest_dir / 'kernel.json')\n", (1248, 1282), False, 'import shutil\n'), ((3805, 3849), 'os.path.join', 'os.path.join', (['base_output_dir', 'experiment_id'], {}), '(base_output_dir, experiment_id)\n', (3817, 3849), False, 'import os\n'), ((3854, 3892), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (3865, 3892), False, 'import os\n'), ((5663, 5695), 're.compile', 're.compile', (['"""^([^#= ]+)\\\\s*=.+$"""'], {}), "('^([^#= ]+)\\\\s*=.+$')\n", (5673, 5695), False, 'import re\n'), ((6393, 6435), 'os.path.join', 'os.path.join', (['base_data_dir', 'experiment_id'], {}), '(base_data_dir, experiment_id)\n', (6405, 6435), False, 'import os\n'), ((5629, 5649), 'json.load', 'json.load', (['source_fh'], {}), '(source_fh)\n', (5638, 5649), False, 'import json\n'), ((6197, 6220), 'json.dump', 'json.dump', (['data', 'out_fh'], {}), '(data, out_fh)\n', (6206, 6220), False, 'import json\n'), ((3688, 3705), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (3703, 3705), False, 'import getpass\n'), ((6460, 6489), 'os.path.isdir', 'os.path.isdir', (['experiment_dir'], {}), '(experiment_dir)\n', (6473, 6489), False, 'import os\n'), ((2809, 2834), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2824, 2834), False, 'import os\n'), ((965, 979), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (969, 979), False, 'from pathlib import Path\n'), ((1109, 1120), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (1118, 1120), False, 'from pathlib import Path\n')]
# -*- coding: UTF-8 import os import gui as wegui from itchat.content import * import itchat import sys from pypinyin import lazy_pinyin reload(sys) sys.setdefaultencoding("utf-8") def get_contact_name(msg): return msg['User']['RemarkName'] if msg['User']['RemarkName'] else msg['User']['NickName'] def get_group_name(msg): group_name="" if not msg['User']['NickName']: for member in msg['User']['MemberList']: group_name+=member['NickName'] else: group_name=msg['User']['NickName'] if len(group_name)>15: return u"群聊: "+group_name[:12]+"..." else: return u"群聊: "+group_name def notify(title, text): os.system(""" osascript -e 'display notification "{}" with title "{}"' """.format(unicode(text), unicode(title))) def start(): @itchat.msg_register([TEXT, MAP, CARD, NOTE, SHARING,PICTURE, RECORDING, ATTACHMENT, VIDEO,FRIENDS]) def recive_contact_msg(msg): contact_name = get_contact_name(msg) try: wechatMain.recive_message(msg,contact_name) notify('TWchat',"new message from: "+contact_name) except AttributeError: pass @itchat.msg_register(TEXT, isGroupChat=True) def recive_group_msg(msg): group_name = get_group_name(msg) try: wechatMain.recive_message(msg,group_name) notify('TWchat',"new message from: "+group_name) except AttributeError: pass return def on_contact_item_click(button,info): wechatMain.chatListBox.addNewChat(info[0],info[1]) wechatMain.set_current_chat(info[0],info[1]) wechatMain.chatListBox.show_chat() return def on_chat_item_click(button,info): wechatMain.set_current_chat(info[0],info[1]) return palette = [ ('left', 'black', 'light gray'), ('right', 'black', 'dark cyan'), ('button', 'dark green','black'), ('mybg', 'black','dark cyan'), ('tobg', 'dark blue','light gray'), ('edit', 'dark cyan','black'), ('bg', 'dark green', 'black'),] print (''' _____ _ _ _____ _ _ ___ _____ |_ _|| | | |/ __ \| | | | / _ \ |_ _| | | | | | || / \/| |_| |/ /_\ \ | | | | | |/\| || | | _ || _ | | | | | \ /\ /| \__/\| | | || | | | | | \_/ \/ \/ \____/\_| |_/\_| |_/ \_/ ''') wechatMain = wegui.WechatMain(palette) itchat.auto_login(enableCmdQR=2,hotReload=True) itchat.run(blockThread=False) userInfo =itchat.web_init()['User'] owner_id = userInfo['UserName'] owner_name = userInfo['NickName'] contactlist= itchat.get_friends(update=True) chatlist = itchat.get_chatrooms() #contactlist = sorted(contactlist,key=lambda x:(x['RemarkPYInitial'],x['PYInitial'])) contactlist = sorted(contactlist,key=lambda x:(lazy_pinyin(get_name(x)))) wechatMain.initUserInfo(owner_id,owner_name,on_contact_item_click,on_chat_item_click,contactlist,chatlist) wechatMain.bind_itchat(itchat) wechatMain.createLoop() def get_name(contact): name = '' if not contact['RemarkName']: name = contact['NickName'] else: name = contact['RemarkName'] return name.lower() if __name__ == '__main__': start()
[ "itchat.get_friends", "gui.WechatMain", "itchat.msg_register", "itchat.get_chatrooms", "sys.setdefaultencoding", "itchat.web_init", "itchat.auto_login", "itchat.run" ]
[((149, 180), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (171, 180), False, 'import sys\n'), ((833, 938), 'itchat.msg_register', 'itchat.msg_register', (['[TEXT, MAP, CARD, NOTE, SHARING, PICTURE, RECORDING, ATTACHMENT, VIDEO, FRIENDS\n ]'], {}), '([TEXT, MAP, CARD, NOTE, SHARING, PICTURE, RECORDING,\n ATTACHMENT, VIDEO, FRIENDS])\n', (852, 938), False, 'import itchat\n'), ((1201, 1244), 'itchat.msg_register', 'itchat.msg_register', (['TEXT'], {'isGroupChat': '(True)'}), '(TEXT, isGroupChat=True)\n', (1220, 1244), False, 'import itchat\n'), ((2452, 2477), 'gui.WechatMain', 'wegui.WechatMain', (['palette'], {}), '(palette)\n', (2468, 2477), True, 'import gui as wegui\n'), ((2482, 2530), 'itchat.auto_login', 'itchat.auto_login', ([], {'enableCmdQR': '(2)', 'hotReload': '(True)'}), '(enableCmdQR=2, hotReload=True)\n', (2499, 2530), False, 'import itchat\n'), ((2534, 2563), 'itchat.run', 'itchat.run', ([], {'blockThread': '(False)'}), '(blockThread=False)\n', (2544, 2563), False, 'import itchat\n'), ((2695, 2726), 'itchat.get_friends', 'itchat.get_friends', ([], {'update': '(True)'}), '(update=True)\n', (2713, 2726), False, 'import itchat\n'), ((2742, 2764), 'itchat.get_chatrooms', 'itchat.get_chatrooms', ([], {}), '()\n', (2762, 2764), False, 'import itchat\n'), ((2578, 2595), 'itchat.web_init', 'itchat.web_init', ([], {}), '()\n', (2593, 2595), False, 'import itchat\n')]
# Copyright 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions from neutron_lib import constants as n_constants from neutron_lib import exceptions as nexception from neutron.api.v2 import resource_helper from neutron.plugins.common import constants from neutron_lbaas._i18n import _ from neutron_lbaas.extensions import loadbalancerv2 class ProviderCannotCreateLoadBalancerGraph(nexception.BadRequest): message = _("The provider does not have the ability to create a load " "balancer graph.") # NOTE(blogan): this dictionary is to be used only for importing from the # plugin to validate against. It is only put here for consistency with # all other extensions and an easy place to look what changes this extension # allows. RESOURCE_ATTRIBUTE_MAP = { 'graphs': { 'loadbalancer': {'allow_post': True, 'allow_put': False, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'is_visible': True} } } EXISTING_ATTR_GRAPH_ATTR_MAP = { 'loadbalancers': { 'listeners': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': [] } }, 'listeners': { 'default_pool': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': n_constants.ATTR_NOT_SPECIFIED }, 'l7policies': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': [] } }, 'pools': { 'healthmonitor': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': n_constants.ATTR_NOT_SPECIFIED }, 'members': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': [] } }, 'l7policies': { 'rules': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': [] }, 'redirect_pool': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': n_constants.ATTR_NOT_SPECIFIED }, 'listener_id': { 'allow_post': False, 'allow_put': False, 'is_visible': True } } } class Lb_graph(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Load Balancer Graph" @classmethod def get_alias(cls): return "lb-graph" @classmethod def get_description(cls): return "Extension for allowing the creation of load balancers with a" \ " full graph in one API request." @classmethod def get_namespace(cls): return "http://wiki.openstack.org/neutron/LBaaS/API_2.0" @classmethod def get_updated(cls): return "2016-02-09T10:00:00-00:00" def get_required_extensions(self): return ["lbaasv2"] @classmethod def get_resources(cls): plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) resources = resource_helper.build_resource_info( plural_mappings, RESOURCE_ATTRIBUTE_MAP, constants.LOADBALANCERV2, register_quota=True) return resources @classmethod def get_plugin_interface(cls): return loadbalancerv2.LoadBalancerPluginBaseV2
[ "neutron.api.v2.resource_helper.build_plural_mappings", "neutron_lbaas._i18n._", "neutron.api.v2.resource_helper.build_resource_info" ]
[((1034, 1110), 'neutron_lbaas._i18n._', '_', (['"""The provider does not have the ability to create a load balancer graph."""'], {}), "('The provider does not have the ability to create a load balancer graph.')\n", (1035, 1110), False, 'from neutron_lbaas._i18n import _\n'), ((3598, 3663), 'neutron.api.v2.resource_helper.build_plural_mappings', 'resource_helper.build_plural_mappings', (['{}', 'RESOURCE_ATTRIBUTE_MAP'], {}), '({}, RESOURCE_ATTRIBUTE_MAP)\n', (3635, 3663), False, 'from neutron.api.v2 import resource_helper\n'), ((3697, 3824), 'neutron.api.v2.resource_helper.build_resource_info', 'resource_helper.build_resource_info', (['plural_mappings', 'RESOURCE_ATTRIBUTE_MAP', 'constants.LOADBALANCERV2'], {'register_quota': '(True)'}), '(plural_mappings, RESOURCE_ATTRIBUTE_MAP,\n constants.LOADBALANCERV2, register_quota=True)\n', (3732, 3824), False, 'from neutron.api.v2 import resource_helper\n')]
from time import strftime, localtime from pystatus.plugin import IPlugin, IInstance class Clock(IPlugin): def __init__(self): super().__init__("clock", "0.1", "g0dsCookie", ClockInstance) class ClockInstance(IInstance): def __init__(self, *args, **kwargs): options = { "text": "%Y-%m-%d %H:%M:%S", "short": "%H:%M:%S", } super().__init__(*args, options=options, **kwargs) def update(self): time = localtime() with self.block: if self._text: self.block.full_text = strftime(self._text, time) if self._short: self.block.short_text = strftime(self._short, time)
[ "time.strftime", "time.localtime" ]
[((478, 489), 'time.localtime', 'localtime', ([], {}), '()\n', (487, 489), False, 'from time import strftime, localtime\n'), ((581, 607), 'time.strftime', 'strftime', (['self._text', 'time'], {}), '(self._text, time)\n', (589, 607), False, 'from time import strftime, localtime\n'), ((676, 703), 'time.strftime', 'strftime', (['self._short', 'time'], {}), '(self._short, time)\n', (684, 703), False, 'from time import strftime, localtime\n')]
# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function from six import StringIO import unittest from skbio import TreeNode from skbio.io import NewickFormatError from skbio.io.newick import (_newick_to_tree_node, _tree_node_to_newick, _newick_sniffer) class TestNewick(unittest.TestCase): def _assert_node_equal(self, n1, n2): self.assertEqual(n1.name, n2.name) self.assertEqual(n1.length, n2.length) self.assertEqual(len(n1.children), len(n2.children)) def _assert_equal(self, n1, n2): def name(x): return (str(x.name), float(x.length) if x.length is not None else 0, len(x.children)) self._assert_node_equal(n1, n2) for c1, c2 in zip(sorted(n1.children, key=name), sorted(n2.children, key=name)): self.assertTrue(c1.parent is n1) self.assertTrue(c2.parent is n2) self._assert_equal(c1, c2) def _setup_tree(self, kwargs_list): trees = [] for kwargs in kwargs_list: trees.append(TreeNode(**kwargs)) trees[4].extend([trees[2], trees[3]]) trees[5].extend([trees[0], trees[1], trees[4]]) return trees[5] def _setup_linked_list(self, kwargs_list): last_node = None for idx, kwargs in enumerate(kwargs_list): new_node = TreeNode(**kwargs) if last_node is not None: new_node.append(last_node) last_node = new_node return last_node def _setup_balanced_binary(self, kwargs_list): trees = [] for kwargs in kwargs_list: trees.append(TreeNode(**kwargs)) trees[0].extend([trees[2], trees[3]]) trees[1].extend([trees[4], trees[5]]) trees[6].extend([trees[0], trees[1]]) return trees[6] def setUp(self): # Using the factory functions above, we will construct different tree # instances. Each tree is expected to serialize to the first newick # string in the list. Each string in the list is expected to # deserialize into an equivilent rotation of the constructed instance. tree_blank = (self._setup_tree([ {}, {}, {}, {}, {}, {} ]), [ "(,,(,));\n", "(,(,),);", "((,),,);", " ((,[ this is a comment ]) , , ) ; ", "((,[ i_can_do_this[0] or escape unmatched '[ ]),[more words],);", ]) tree_leaves_named = (self._setup_tree([ {'name': 'a_'}, {'name': 'b'}, {'name': 'c'}, {'name': 'd'}, {}, {} ]), [ "('a_',b,(c,d));\n", "(b,(c,d),'a_');", "(b\n,'a_'\n ,(d \t,c) ) ;", ]) tree_all_named = (self._setup_tree([ {'name': 'a'}, {'name': 'b'}, {'name': 'c'}, {'name': '[whaaat!\']'}, {'name': 'e'}, {'name': 'f'} ]), [ "(a,b,(c,'[whaaat!'']')e)f;\n", "(b,(c,'[whaaat!'']')e,a)f;", "(b,[comment] \na,('[whaaat!'']',c)e)f;", ]) tree_all_but_root_distances = (self._setup_tree([ {'length': 0.1}, {'length': 0.2}, {'length': 0.3}, {'length': 0.4}, {'length': 0.5}, {} ]), [ "(:0.1,:0.2,(:0.3,:0.4):0.5);\n", "(:0.2,(:0.3,:0.4):0.5,:0.1);", "(:0.2,:0.1,(:0.4,:0.3):0.5);", ]) tree_all_distances = (self._setup_tree([ {'length': 0.1}, {'length': 0.2}, {'length': 0.3}, {'length': 0.4}, {'length': 0.5}, {'length': 0.0} ]), [ "(:0.1,:0.2,(:0.3,:0.4):0.5):0.0;\n", "(:0.2,(:0.3,:0.4):0.5,:0.1):0.0;", "(:0.2,\n:0.1,(:0.4,\n:0.3):0.5)\n:0.0;", ]) tree_all_leaves_named_with_distances = (self._setup_tree([ {'name': 'a', 'length': 0.1}, {'name': 'b_a\'', 'length': 0.2}, {'name': 'c', 'length': 0.3}, {'name': 'de d', 'length': 0.4}, {'length': 0.5}, {'length': 0.0} ]), [ "(a:0.1,'b_a''':0.2,(c:0.3,de_d:0.4):0.5):0.0;\n", "('b_a''':0.2,(c:0.3,'de d':0.4):0.5,a:0.1):0.0;", "('b_a''':0.2,a:0.1,('de d'[why not]:0.4,c:0.3):0.5):0.0;", ]) tree_all_leaves_named_with_distances_no_root = (self._setup_tree([ {'name': 'a', 'length': 0.1}, {'name': 'b_a\'', 'length': 0.2}, {'name': 'c', 'length': 0.3}, {'name': 'de d', 'length': 0.4}, {'length': 0.5}, {} ]), [ "(a:0.1,'b_a''':0.2,(c:0.3,de__d:0.4):0.5);\n", "('b_a''':0.2\n[comment ahoy]\n,(c:0.3,'de d':0.4):0.5,a:0.1);", "('b_a''':0.2,a:0.1,(de__d:0.4,c:0.3):0.5);" ]) tree_all = (self._setup_tree([ {'name': 'a', 'length': 0.1}, {'name': 'b_a\'', 'length': 0.2}, {'name': 'c', 'length': 0.3}, {'name': 'de\' d', 'length': 0.4}, {'name': 'e', 'length': 0.5}, {'name': 'f', 'length': 0.0} ]), [ "(a:0.1,'b_a''':0.2,(c:0.3,de''_d:0.4)e:0.5)f:0.0;\n", "('b_a''':0.2,(c:0.3,de''_d:0.4)e:0.5,a:0.1)f:0.0;", "((de''_d:0.4, c:0.3)e:0.5, 'b_a''':0.2, a:0.1)f:0.0;" ]) balanced_blank = (self._setup_balanced_binary([ {}, {}, {}, {}, {}, {}, {} ]), [ "((,),(,));\n", ]) balanced_named = (self._setup_balanced_binary([ {'name': 'a'}, {'name': 'b'}, {'name': 'c'}, {'name': 'd'}, {'name': 'e'}, {'name': 'f'}, {'name': 'g'} ]), [ "((c,d)a,(e,f)b)g;\n", ]) balanced_distances = (self._setup_balanced_binary([ {'length': 1.0}, {'length': 2.0}, {'length': 3.0}, {'length': 4.0}, {'length': 5.0}, {'length': 6.0}, {'length': 0.0} ]), [ "((:3.0,:4.0):1.0,(:5.0,:6.0):2.0):0.0;\n", ]) blanaced_all = (self._setup_balanced_binary([ {'name': 'a', 'length': 1.0}, {'name': 'b', 'length': 2.0}, {'name': 'c', 'length': 3.0}, {'name': 'd', 'length': 4.0}, {'name': 'e', 'length': 5.0}, {'name': 'f:f\'f', 'length': 6.0}, {'name': 'g', 'length': 0.0} ]), [ "((c:3.0,d:4.0)a:1.0,(e:5.0,'f:f''f':6.0)b:2.0)g:0.0;\n", ]) linked_list_blank = (self._setup_linked_list([ {}, {}, {}, {}, {} ]), [ "(((())));\n", "[(((())));](((())));", "[[(((())));](((())));](((())));\t\t\n" ]) linked_list_named = (self._setup_linked_list([ {'name': 'aaa'}, {'name': 'b_a\''}, {'name': 'c'}, {'name': 'de d'}, {'name': 'e'}, ]), [ "((((aaa)'b_a''')c)de_d)e;\n" ]) inked_list_distances = (self._setup_linked_list([ {'length': 0.4}, {'length': 0.3}, {'length': 0.2}, {'length': 0.1}, {'length': 0.0}, ]), [ "((((:0.4):0.3):0.2):0.1):0.0;\n", "((((:0.4)[not a label]:0.3):0.2):0.1):0.0;\t\t\n" ]) linked_list_all = (self._setup_linked_list([ {'name': 'a', 'length': 0.4}, {'name': 'b_a\'', 'length': 0.3}, {'name': 'c', 'length': 0.2}, {'name': 'de d', 'length': 0.1}, {'name': 'eee', 'length': 0.0}, ]), [ "((((a:0.4)'b_a''':0.3)c:0.2)de_d:0.1)eee:0.0;\n" ]) single_empty = (TreeNode(), [";\n", "[comment about the root" " and its properties];"]) single_named = (TreeNode(name='athing'), ["athing;\n"]) single_distance = (TreeNode(length=200.0), [":200.0;\n"]) single_all = (TreeNode(name='[a]', length=200.0), ["'[a]':200.0;\n"]) self.trees_newick_lists = [ tree_blank, tree_leaves_named, tree_all_named, tree_all_but_root_distances, tree_all_distances, tree_all_leaves_named_with_distances, tree_all_leaves_named_with_distances_no_root, tree_all, balanced_blank, balanced_named, balanced_distances, blanaced_all, linked_list_blank, linked_list_named, inked_list_distances, linked_list_all, single_empty, single_named, single_distance, single_all ] # Invalid newick strings and list of error fragments that should be # a part of the error message when read. self.invalid_newicks = [ ("", ['root']), ("This is not a newick file.", ['whitespace', 'label']), ("((();", ['Parenthesis', 'unbalanced']), ("(,,,)(,);\n", ['unnested', 'children']), ("(()());", ['unnested', 'children']), ("(():,,)", ['length']), ("[][[]('comment is the gotcha':0.2,,);", ['unbalanced', 'root']), ("#SampleID\tHeaderA\tHeaderB\n0\t'yellow'\t0.45;", ['whitespace', 'label']), ("))();", ['Parenthesis', 'unbalanced']), ("((,,),((,,));", ['Parenthesis', 'unbalanced']), ("\n".join([",".join(str(i) for i in range(100)) for _ in range(100)]), ['whitespace', 'label']) ] def test_newick_to_tree_node_valid_files(self): for tree, newicks in self.trees_newick_lists: for newick in newicks: fh = StringIO(newick) read_tree = _newick_to_tree_node(fh) self._assert_equal(tree, read_tree) fh.close() def test_newick_to_tree_node_invalid_files(self): for invalid, error_fragments in self.invalid_newicks: fh = StringIO(invalid) with self.assertRaises(NewickFormatError) as cm: _newick_to_tree_node(fh) for frag in error_fragments: self.assertIn(frag, str(cm.exception)) fh.close() def test_tree_node_to_newick(self): for tree, newicks in self.trees_newick_lists: newick = newicks[0] fh = StringIO() _tree_node_to_newick(tree, fh) self.assertEqual(newick, fh.getvalue()) fh.close() def test_roundtrip(self): for tree, newicks in self.trees_newick_lists: newick = newicks[0] fh = StringIO(newick) tree = _newick_to_tree_node(fh) fh2 = StringIO() _tree_node_to_newick(tree, fh2) fh2.seek(0) tree2 = _newick_to_tree_node(fh2) self.assertEqual(newick, fh2.getvalue()) self._assert_equal(tree, tree2) fh.close() fh2.close() def test_newick_to_tree_node_convert_underscores(self): fh = StringIO('(_:0.1, _a, _b)__;') tree = _newick_to_tree_node(fh, convert_underscores=False) fh2 = StringIO() _tree_node_to_newick(tree, fh2) self.assertEquals(fh2.getvalue(), "('_':0.1,'_a','_b')'__';\n") fh2.close() fh.close() def test_newick_sniffer_valid_files(self): for _, newicks in self.trees_newick_lists: for newick in newicks: fh = StringIO(newick) self.assertEqual(_newick_sniffer(fh), (True, {})) fh.close() def test_newick_sniffer_invalid_files(self): for invalid, _ in self.invalid_newicks: fh = StringIO(invalid) self.assertEqual(_newick_sniffer(fh), (False, {})) fh.close() if __name__ == '__main__': unittest.main()
[ "unittest.main", "skbio.io.newick._tree_node_to_newick", "skbio.io.newick._newick_to_tree_node", "skbio.TreeNode", "six.StringIO", "skbio.io.newick._newick_sniffer" ]
[((12650, 12665), 'unittest.main', 'unittest.main', ([], {}), '()\n', (12663, 12665), False, 'import unittest\n'), ((11859, 11889), 'six.StringIO', 'StringIO', (['"""(_:0.1, _a, _b)__;"""'], {}), "('(_:0.1, _a, _b)__;')\n", (11867, 11889), False, 'from six import StringIO\n'), ((11905, 11956), 'skbio.io.newick._newick_to_tree_node', '_newick_to_tree_node', (['fh'], {'convert_underscores': '(False)'}), '(fh, convert_underscores=False)\n', (11925, 11956), False, 'from skbio.io.newick import _newick_to_tree_node, _tree_node_to_newick, _newick_sniffer\n'), ((11971, 11981), 'six.StringIO', 'StringIO', ([], {}), '()\n', (11979, 11981), False, 'from six import StringIO\n'), ((11990, 12021), 'skbio.io.newick._tree_node_to_newick', '_tree_node_to_newick', (['tree', 'fh2'], {}), '(tree, fh2)\n', (12010, 12021), False, 'from skbio.io.newick import _newick_to_tree_node, _tree_node_to_newick, _newick_sniffer\n'), ((1775, 1793), 'skbio.TreeNode', 'TreeNode', ([], {}), '(**kwargs)\n', (1783, 1793), False, 'from skbio import TreeNode\n'), ((8401, 8411), 'skbio.TreeNode', 'TreeNode', ([], {}), '()\n', (8409, 8411), False, 'from skbio import TreeNode\n'), ((8534, 8557), 'skbio.TreeNode', 'TreeNode', ([], {'name': '"""athing"""'}), "(name='athing')\n", (8542, 8557), False, 'from skbio import TreeNode\n'), ((8601, 8623), 'skbio.TreeNode', 'TreeNode', ([], {'length': '(200.0)'}), '(length=200.0)\n', (8609, 8623), False, 'from skbio import TreeNode\n'), ((8662, 8696), 'skbio.TreeNode', 'TreeNode', ([], {'name': '"""[a]"""', 'length': '(200.0)'}), "(name='[a]', length=200.0)\n", (8670, 8696), False, 'from skbio import TreeNode\n'), ((10787, 10804), 'six.StringIO', 'StringIO', (['invalid'], {}), '(invalid)\n', (10795, 10804), False, 'from six import StringIO\n'), ((11170, 11180), 'six.StringIO', 'StringIO', ([], {}), '()\n', (11178, 11180), False, 'from six import StringIO\n'), ((11193, 11223), 'skbio.io.newick._tree_node_to_newick', '_tree_node_to_newick', (['tree', 'fh'], {}), '(tree, fh)\n', (11213, 11223), False, 'from skbio.io.newick import _newick_to_tree_node, _tree_node_to_newick, _newick_sniffer\n'), ((11435, 11451), 'six.StringIO', 'StringIO', (['newick'], {}), '(newick)\n', (11443, 11451), False, 'from six import StringIO\n'), ((11471, 11495), 'skbio.io.newick._newick_to_tree_node', '_newick_to_tree_node', (['fh'], {}), '(fh)\n', (11491, 11495), False, 'from skbio.io.newick import _newick_to_tree_node, _tree_node_to_newick, _newick_sniffer\n'), ((11514, 11524), 'six.StringIO', 'StringIO', ([], {}), '()\n', (11522, 11524), False, 'from six import StringIO\n'), ((11537, 11568), 'skbio.io.newick._tree_node_to_newick', '_tree_node_to_newick', (['tree', 'fh2'], {}), '(tree, fh2)\n', (11557, 11568), False, 'from skbio.io.newick import _newick_to_tree_node, _tree_node_to_newick, _newick_sniffer\n'), ((11613, 11638), 'skbio.io.newick._newick_to_tree_node', '_newick_to_tree_node', (['fh2'], {}), '(fh2)\n', (11633, 11638), False, 'from skbio.io.newick import _newick_to_tree_node, _tree_node_to_newick, _newick_sniffer\n'), ((12513, 12530), 'six.StringIO', 'StringIO', (['invalid'], {}), '(invalid)\n', (12521, 12530), False, 'from six import StringIO\n'), ((1480, 1498), 'skbio.TreeNode', 'TreeNode', ([], {}), '(**kwargs)\n', (1488, 1498), False, 'from skbio import TreeNode\n'), ((2065, 2083), 'skbio.TreeNode', 'TreeNode', ([], {}), '(**kwargs)\n', (2073, 2083), False, 'from skbio import TreeNode\n'), ((10502, 10518), 'six.StringIO', 'StringIO', (['newick'], {}), '(newick)\n', (10510, 10518), False, 'from six import StringIO\n'), ((10547, 10571), 'skbio.io.newick._newick_to_tree_node', '_newick_to_tree_node', (['fh'], {}), '(fh)\n', (10567, 10571), False, 'from skbio.io.newick import _newick_to_tree_node, _tree_node_to_newick, _newick_sniffer\n'), ((10882, 10906), 'skbio.io.newick._newick_to_tree_node', '_newick_to_tree_node', (['fh'], {}), '(fh)\n', (10902, 10906), False, 'from skbio.io.newick import _newick_to_tree_node, _tree_node_to_newick, _newick_sniffer\n'), ((12288, 12304), 'six.StringIO', 'StringIO', (['newick'], {}), '(newick)\n', (12296, 12304), False, 'from six import StringIO\n'), ((12560, 12579), 'skbio.io.newick._newick_sniffer', '_newick_sniffer', (['fh'], {}), '(fh)\n', (12575, 12579), False, 'from skbio.io.newick import _newick_to_tree_node, _tree_node_to_newick, _newick_sniffer\n'), ((12338, 12357), 'skbio.io.newick._newick_sniffer', '_newick_sniffer', (['fh'], {}), '(fh)\n', (12353, 12357), False, 'from skbio.io.newick import _newick_to_tree_node, _tree_node_to_newick, _newick_sniffer\n')]
"""Driver class for SpaceMouse controller. This class provides a driver support to SpaceMouse on Mac OS X. In particular, we assume you are using a SpaceMouse Wireless by default. To set up a new SpaceMouse controller: 1. Download and install driver from https://www.3dconnexion.com/service/drivers.html 2. Install hidapi library through pip (make sure you run uninstall hid first if it is installed). 3. Make sure SpaceMouse is connected before running the script 4. (Optional) Based on the model of SpaceMouse, you might need to change the vendor id and product id that correspond to the device. For Linux support, you can find open-source Linux drivers and SDKs online. See http://spacenav.sourceforge.net/ """ import time import threading from collections import namedtuple import numpy as np try: import hid except ModuleNotFoundError as exc: raise ImportError("Unable to load module hid, required to interface with SpaceMouse. " "Only Mac OS X is officially supported. Install the additional " "requirements with `pip install -r requirements-ik.txt`") from exc from spirl.data.block_stacking.src.robosuite.utils.transform_utils import rotation_matrix from spirl.data.block_stacking.src.robosuite import Device AxisSpec = namedtuple("AxisSpec", ["channel", "byte1", "byte2", "scale"]) SPACE_MOUSE_SPEC = { "x": AxisSpec(channel=1, byte1=1, byte2=2, scale=1), "y": AxisSpec(channel=1, byte1=3, byte2=4, scale=-1), "z": AxisSpec(channel=1, byte1=5, byte2=6, scale=-1), "roll": AxisSpec(channel=1, byte1=7, byte2=8, scale=-1), "pitch": AxisSpec(channel=1, byte1=9, byte2=10, scale=-1), "yaw": AxisSpec(channel=1, byte1=11, byte2=12, scale=1), } def to_int16(y1, y2): """Convert two 8 bit bytes to a signed 16 bit integer.""" x = (y1) | (y2 << 8) if x >= 32768: x = -(65536 - x) return x def scale_to_control(x, axis_scale=350., min_v=-1.0, max_v=1.0): """Normalize raw HID readings to target range.""" x = x / axis_scale x = min(max(x, min_v), max_v) return x def convert(b1, b2): """Converts SpaceMouse message to commands.""" return scale_to_control(to_int16(b1, b2)) class SpaceMouse(Device): """A minimalistic driver class for SpaceMouse with HID library.""" def __init__(self, vendor_id=9583, product_id=50735): """Initialize a SpaceMouse handler. Args: vendor_id: HID device vendor id product_id: HID device product id Note: Use hid.enumerate() to view all USB human interface devices (HID). Make sure SpaceMouse is detected before running the script. You can look up its vendor/product id from this method. """ print("Opening SpaceMouse device") self.device = hid.device() self.device.open(vendor_id, product_id) # SpaceMouse print("Manufacturer: %s" % self.device.get_manufacturer_string()) print("Product: %s" % self.device.get_product_string()) self._display_controls() self.single_click_and_hold = False self._control = [0., 0., 0., 0., 0., 0.] self._reset_state = 0 self.rotation = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]]) self._enabled = False # launch a new listener thread to listen to SpaceMouse self.thread = threading.Thread(target=self.run) self.thread.daemon = True self.thread.start() def _display_controls(self): """ Method to pretty print controls. """ def print_command(char, info): char += " " * (30 - len(char)) print("{}\t{}".format(char, info)) print("") print_command("Control", "Command") print_command("Right button", "reset simulation") print_command("Left button (hold)", "close gripper") print_command("Move mouse laterally", "move arm horizontally in x-y plane") print_command("Move mouse vertically", "move arm vertically") print_command( "Twist mouse about an axis", "rotate arm about a corresponding axis" ) print_command("ESC", "quit") print("") def _reset_internal_state(self): """ Resets internal state of controller, except for the reset signal. """ self.rotation = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]]) def start_control(self): """ Method that should be called externally before controller can start receiving commands. """ self._reset_internal_state() self._reset_state = 0 self._enabled = True def get_controller_state(self): """Returns the current state of the 3d mouse, a dictionary of pos, orn, grasp, and reset.""" dpos = self.control[:3] * 0.005 roll, pitch, yaw = self.control[3:] * 0.005 self.grasp = self.control_gripper # convert RPY to an absolute orientation drot1 = rotation_matrix(angle=-pitch, direction=[1., 0, 0], point=None)[:3, :3] drot2 = rotation_matrix(angle=roll, direction=[0, 1., 0], point=None)[:3, :3] drot3 = rotation_matrix(angle=yaw, direction=[0, 0, 1.], point=None)[:3, :3] self.rotation = self.rotation.dot(drot1.dot(drot2.dot(drot3))) return dict( dpos=dpos, rotation=self.rotation, grasp=self.grasp, reset=self._reset_state ) def run(self): """Listener method that keeps pulling new messages.""" t_last_click = -1 while True: d = self.device.read(13) if d is not None and self._enabled: if d[0] == 1: ## readings from 6-DoF sensor self.y = convert(d[1], d[2]) self.x = convert(d[3], d[4]) self.z = convert(d[5], d[6]) * -1.0 self.roll = convert(d[7], d[8]) self.pitch = convert(d[9], d[10]) self.yaw = convert(d[11], d[12]) self._control = [ self.x, self.y, self.z, self.roll, self.pitch, self.yaw, ] elif d[0] == 3: ## readings from the side buttons # press left button if d[1] == 1: t_click = time.time() elapsed_time = t_click - t_last_click t_last_click = t_click self.single_click_and_hold = True # release left button if d[1] == 0: self.single_click_and_hold = False # right button is for reset if d[1] == 2: self._reset_state = 1 self._enabled = False self._reset_internal_state() @property def control(self): """Returns 6-DoF control.""" return np.array(self._control) @property def control_gripper(self): """Maps internal states into gripper commands.""" if self.single_click_and_hold: return 1.0 return 0 if __name__ == "__main__": space_mouse = SpaceMouse() for i in range(100): print(space_mouse.control, space_mouse.control_gripper) time.sleep(0.02)
[ "threading.Thread", "hid.device", "spirl.data.block_stacking.src.robosuite.utils.transform_utils.rotation_matrix", "time.sleep", "time.time", "numpy.array", "collections.namedtuple" ]
[((1320, 1382), 'collections.namedtuple', 'namedtuple', (['"""AxisSpec"""', "['channel', 'byte1', 'byte2', 'scale']"], {}), "('AxisSpec', ['channel', 'byte1', 'byte2', 'scale'])\n", (1330, 1382), False, 'from collections import namedtuple\n'), ((2863, 2875), 'hid.device', 'hid.device', ([], {}), '()\n', (2873, 2875), False, 'import hid\n'), ((3259, 3322), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]]'], {}), '([[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]])\n', (3267, 3322), True, 'import numpy as np\n'), ((3430, 3463), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.run'}), '(target=self.run)\n', (3446, 3463), False, 'import threading\n'), ((4420, 4483), 'numpy.array', 'np.array', (['[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]]'], {}), '([[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]])\n', (4428, 4483), True, 'import numpy as np\n'), ((7167, 7190), 'numpy.array', 'np.array', (['self._control'], {}), '(self._control)\n', (7175, 7190), True, 'import numpy as np\n'), ((7532, 7548), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (7542, 7548), False, 'import time\n'), ((5067, 5131), 'spirl.data.block_stacking.src.robosuite.utils.transform_utils.rotation_matrix', 'rotation_matrix', ([], {'angle': '(-pitch)', 'direction': '[1.0, 0, 0]', 'point': 'None'}), '(angle=-pitch, direction=[1.0, 0, 0], point=None)\n', (5082, 5131), False, 'from spirl.data.block_stacking.src.robosuite.utils.transform_utils import rotation_matrix\n'), ((5155, 5217), 'spirl.data.block_stacking.src.robosuite.utils.transform_utils.rotation_matrix', 'rotation_matrix', ([], {'angle': 'roll', 'direction': '[0, 1.0, 0]', 'point': 'None'}), '(angle=roll, direction=[0, 1.0, 0], point=None)\n', (5170, 5217), False, 'from spirl.data.block_stacking.src.robosuite.utils.transform_utils import rotation_matrix\n'), ((5241, 5302), 'spirl.data.block_stacking.src.robosuite.utils.transform_utils.rotation_matrix', 'rotation_matrix', ([], {'angle': 'yaw', 'direction': '[0, 0, 1.0]', 'point': 'None'}), '(angle=yaw, direction=[0, 0, 1.0], point=None)\n', (5256, 5302), False, 'from spirl.data.block_stacking.src.robosuite.utils.transform_utils import rotation_matrix\n'), ((6534, 6545), 'time.time', 'time.time', ([], {}), '()\n', (6543, 6545), False, 'import time\n')]
import numpy as np from .combine_data import combination_column_range_map from .load_dataset import load_dataset_np def statistics(dataset): if isinstance(dataset, str): dataset = load_dataset_np(dataset_name=dataset) if not isinstance(dataset, np.ndarray): raise TypeError('dataset must be np.ndarray or the name of dataset(string).') min = np.nanmin(dataset) max = np.nanmax(dataset) median = np.nanmedian(dataset) mean = np.nanmean(dataset) std = np.nanstd(dataset) var = np.nanvar(dataset) result = { 'min': min, 'max': max, 'median': median, 'mean': mean, 'std': std, 'var': var, } return result def statistics_on_every_fields(dataset): if isinstance(dataset, str): dataset = load_dataset_np(dataset_name=dataset) if not isinstance(dataset, np.ndarray): raise TypeError('dataset must be np.ndarray or the name of dataset(string).') result = dict() inner_size = dataset.shape[-1] for field_name, column_range in combination_column_range_map.items(): if column_range[1] - 1 > inner_size: continue sub_dataset = dataset[:, column_range[0]-1: column_range[1]-1] min = np.nanmin(sub_dataset) max = np.nanmax(sub_dataset) median = np.nanmedian(sub_dataset) mean = np.nanmean(sub_dataset) std = np.nanstd(sub_dataset) var = np.nanvar(sub_dataset) d = { 'min': min, 'max': max, 'median': median, 'mean': mean, 'std': std, 'var': var, } result[field_name] = d return result def show_statistics(dataset): result = statistics(dataset) print('Dataset:', dataset if isinstance(dataset, str) else dataset.shape) print(result) def show_statistics_on_every_fields(dataset): result = statistics_on_every_fields(dataset) print('Dataset:', dataset if isinstance(dataset, str) else dataset.shape) print(result)
[ "numpy.nanmedian", "numpy.nanstd", "numpy.nanmin", "numpy.nanvar", "numpy.nanmax", "numpy.nanmean" ]
[((373, 391), 'numpy.nanmin', 'np.nanmin', (['dataset'], {}), '(dataset)\n', (382, 391), True, 'import numpy as np\n'), ((402, 420), 'numpy.nanmax', 'np.nanmax', (['dataset'], {}), '(dataset)\n', (411, 420), True, 'import numpy as np\n'), ((434, 455), 'numpy.nanmedian', 'np.nanmedian', (['dataset'], {}), '(dataset)\n', (446, 455), True, 'import numpy as np\n'), ((467, 486), 'numpy.nanmean', 'np.nanmean', (['dataset'], {}), '(dataset)\n', (477, 486), True, 'import numpy as np\n'), ((497, 515), 'numpy.nanstd', 'np.nanstd', (['dataset'], {}), '(dataset)\n', (506, 515), True, 'import numpy as np\n'), ((526, 544), 'numpy.nanvar', 'np.nanvar', (['dataset'], {}), '(dataset)\n', (535, 544), True, 'import numpy as np\n'), ((1254, 1276), 'numpy.nanmin', 'np.nanmin', (['sub_dataset'], {}), '(sub_dataset)\n', (1263, 1276), True, 'import numpy as np\n'), ((1291, 1313), 'numpy.nanmax', 'np.nanmax', (['sub_dataset'], {}), '(sub_dataset)\n', (1300, 1313), True, 'import numpy as np\n'), ((1331, 1356), 'numpy.nanmedian', 'np.nanmedian', (['sub_dataset'], {}), '(sub_dataset)\n', (1343, 1356), True, 'import numpy as np\n'), ((1372, 1395), 'numpy.nanmean', 'np.nanmean', (['sub_dataset'], {}), '(sub_dataset)\n', (1382, 1395), True, 'import numpy as np\n'), ((1410, 1432), 'numpy.nanstd', 'np.nanstd', (['sub_dataset'], {}), '(sub_dataset)\n', (1419, 1432), True, 'import numpy as np\n'), ((1447, 1469), 'numpy.nanvar', 'np.nanvar', (['sub_dataset'], {}), '(sub_dataset)\n', (1456, 1469), True, 'import numpy as np\n')]
import os import functools import numpy as np from keras.models import Sequential from keras.layers import Embedding, SimpleRNN, Dense, Dropout from keras.callbacks import EarlyStopping from utilnn import accuracy, fscore, coef def load_data(labels_prefix): """ @param labels_prefix: 'classfication' or 'regression' @return: (inputs_train, outputs_train, inputs_test, outputs_test) """ # Generate file path inputs_train_file_path = os.path.join( os.path.pardir, "data", "word-embedding" + "_train") outputs_train_file_path = os.path.join( os.path.pardir, "data", labels_prefix + "_train") inputs_test_file_path = os.path.join( os.path.pardir, "data", "word-embedding" + "_test") outputs_test_file_path = os.path.join( os.path.pardir, "data", "regression" + "_test") # test label always use regression # Get data with open(inputs_train_file_path, 'rb') as inputs_train_file: inputs_train = np.load(inputs_train_file) with open(outputs_train_file_path, 'rb') as outputs_train_file: outputs_train = np.load(outputs_train_file) with open(inputs_test_file_path, 'rb') as inputs_test_file: inputs_test = np.load(inputs_test_file) with open(outputs_test_file_path, 'rb') as outputs_test_file: outputs_test = np.load(outputs_test_file) # Return data return (inputs_train, outputs_train, inputs_test, outputs_test) def save_model(model, model_file_name): model_file_path = os.path.join( os.path.pardir, "models", model_file_name + ".h5") model.save(model_file_path) def rnn(inputs_train, outputs_train, inputs_test, outputs_test, loss, train_embedding): """ Recurrent neural network. @param loss: 'classification' or 'regression' @param train_embedding: 0 - initialize with word_embedding_matrix, trainable=False 1 - initialize with word_embedding_matrix, trainable=True 2 - initialize with random matrix, trainable=True """ # Load word-embedding matrix word_embedding_matrix_file_path = os.path.join( os.path.pardir, "data", "word-embedding_matrix") with open(word_embedding_matrix_file_path, 'rb') as word_embedding_matrix_file: word_embedding_matrix = np.load(word_embedding_matrix_file) # Split to train-set and validation-set split_at = len(inputs_train) - len(inputs_train) * 2 // 10 (inputs_train, inputs_validation) = \ (inputs_train[:split_at], inputs_train[split_at:]) (outputs_train, outputs_validation) = \ (outputs_train[:split_at], outputs_train[split_at:]) # Build RNN model if train_embedding == 0: embedding_layer = Embedding(word_embedding_matrix.shape[0], word_embedding_matrix.shape[1], weights=[word_embedding_matrix], input_length=inputs_train.shape[1], trainable=False) elif train_embedding == 1: embedding_layer = Embedding(word_embedding_matrix.shape[0], word_embedding_matrix.shape[1], weights=[word_embedding_matrix], input_length=inputs_train.shape[1], trainable=True) elif train_embedding == 2: embedding_layer = Embedding(word_embedding_matrix.shape[0], word_embedding_matrix.shape[1], input_length=inputs_train.shape[1], trainable=True) else: raise ValueError("train_embedding should be 0 or 1 or 2.") model = Sequential() model.add(embedding_layer) model.add(SimpleRNN(128, unroll=True)) model.add(Dropout(0.5)) model.add(Dense(outputs_train.shape[1], activation='softmax')) print(model.summary()) # compile model.compile(loss=loss, optimizer='adam', metrics=['accuracy']) # train if loss == 'categorical_crossentropy': early_stopping = EarlyStopping( min_delta=0.005, patience=3, restore_best_weights=True) elif loss == 'mean_squared_error': early_stopping = EarlyStopping( min_delta=0.0005, patience=3, restore_best_weights=True) else: raise ValueError( "loss should be 'categorical_crossentropy' or 'mean_squared_error'.") model.fit(inputs_train, outputs_train, epochs=100, batch_size=128, validation_data=(inputs_validation, outputs_validation), callbacks=[early_stopping]) # evaluate outputs_test_pred = np.asarray(model.predict(inputs_test)) acc_eval = accuracy(outputs_test, outputs_test_pred) fscore_eval = fscore(outputs_test, outputs_test_pred) coef_eval = coef(outputs_test, outputs_test_pred) print("Evaluation: acc - %.4f - fscore: %.4f - coef: %.4f - pvalue: %.4f" % (acc_eval, fscore_eval, coef_eval[0], coef_eval[1])) # return model return model rnn_static = functools.partial(rnn, train_embedding=0) rnn_non_static = functools.partial(rnn, train_embedding=1) rnn_rand = functools.partial(rnn, train_embedding=2) if __name__ == "__main__": inputs_train, outputs_train, inputs_test, outputs_test = \ load_data("classification") model = rnn_static(inputs_train, outputs_train, inputs_test, outputs_test, loss='categorical_crossentropy') save_model(model, "rnn_static_classification") model = rnn_non_static(inputs_train, outputs_train, inputs_test, outputs_test, loss='categorical_crossentropy') save_model(model, "rnn_non_static_classification") model = rnn_rand(inputs_train, outputs_train, inputs_test, outputs_test, loss='categorical_crossentropy') save_model(model, "rnn_rand_classification") inputs_train, outputs_train, inputs_test, outputs_test = \ load_data("regression") model = rnn_static(inputs_train, outputs_train, inputs_test, outputs_test, loss='mean_squared_error') save_model(model, "rnn_static_regression") model = rnn_non_static(inputs_train, outputs_train, inputs_test, outputs_test, loss='mean_squared_error') save_model(model, "rnn_non_static_regression") model = rnn_rand(inputs_train, outputs_train, inputs_test, outputs_test, loss='mean_squared_error') save_model(model, "rnn_rand_regression")
[ "keras.layers.SimpleRNN", "functools.partial", "numpy.load", "utilnn.coef", "utilnn.fscore", "keras.layers.Dropout", "utilnn.accuracy", "keras.layers.Dense", "keras.callbacks.EarlyStopping", "keras.layers.Embedding", "keras.models.Sequential", "os.path.join" ]
[((4815, 4856), 'functools.partial', 'functools.partial', (['rnn'], {'train_embedding': '(0)'}), '(rnn, train_embedding=0)\n', (4832, 4856), False, 'import functools\n'), ((4874, 4915), 'functools.partial', 'functools.partial', (['rnn'], {'train_embedding': '(1)'}), '(rnn, train_embedding=1)\n', (4891, 4915), False, 'import functools\n'), ((4927, 4968), 'functools.partial', 'functools.partial', (['rnn'], {'train_embedding': '(2)'}), '(rnn, train_embedding=2)\n', (4944, 4968), False, 'import functools\n'), ((458, 523), 'os.path.join', 'os.path.join', (['os.path.pardir', '"""data"""', "('word-embedding' + '_train')"], {}), "(os.path.pardir, 'data', 'word-embedding' + '_train')\n", (470, 523), False, 'import os\n'), ((563, 625), 'os.path.join', 'os.path.join', (['os.path.pardir', '"""data"""', "(labels_prefix + '_train')"], {}), "(os.path.pardir, 'data', labels_prefix + '_train')\n", (575, 625), False, 'import os\n'), ((663, 727), 'os.path.join', 'os.path.join', (['os.path.pardir', '"""data"""', "('word-embedding' + '_test')"], {}), "(os.path.pardir, 'data', 'word-embedding' + '_test')\n", (675, 727), False, 'import os\n'), ((766, 826), 'os.path.join', 'os.path.join', (['os.path.pardir', '"""data"""', "('regression' + '_test')"], {}), "(os.path.pardir, 'data', 'regression' + '_test')\n", (778, 826), False, 'import os\n'), ((1501, 1564), 'os.path.join', 'os.path.join', (['os.path.pardir', '"""models"""', "(model_file_name + '.h5')"], {}), "(os.path.pardir, 'models', model_file_name + '.h5')\n", (1513, 1564), False, 'import os\n'), ((2114, 2175), 'os.path.join', 'os.path.join', (['os.path.pardir', '"""data"""', '"""word-embedding_matrix"""'], {}), "(os.path.pardir, 'data', 'word-embedding_matrix')\n", (2126, 2175), False, 'import os\n'), ((3483, 3495), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3493, 3495), False, 'from keras.models import Sequential\n'), ((4467, 4508), 'utilnn.accuracy', 'accuracy', (['outputs_test', 'outputs_test_pred'], {}), '(outputs_test, outputs_test_pred)\n', (4475, 4508), False, 'from utilnn import accuracy, fscore, coef\n'), ((4527, 4566), 'utilnn.fscore', 'fscore', (['outputs_test', 'outputs_test_pred'], {}), '(outputs_test, outputs_test_pred)\n', (4533, 4566), False, 'from utilnn import accuracy, fscore, coef\n'), ((4583, 4620), 'utilnn.coef', 'coef', (['outputs_test', 'outputs_test_pred'], {}), '(outputs_test, outputs_test_pred)\n', (4587, 4620), False, 'from utilnn import accuracy, fscore, coef\n'), ((976, 1002), 'numpy.load', 'np.load', (['inputs_train_file'], {}), '(inputs_train_file)\n', (983, 1002), True, 'import numpy as np\n'), ((1095, 1122), 'numpy.load', 'np.load', (['outputs_train_file'], {}), '(outputs_train_file)\n', (1102, 1122), True, 'import numpy as np\n'), ((1209, 1234), 'numpy.load', 'np.load', (['inputs_test_file'], {}), '(inputs_test_file)\n', (1216, 1234), True, 'import numpy as np\n'), ((1324, 1350), 'numpy.load', 'np.load', (['outputs_test_file'], {}), '(outputs_test_file)\n', (1331, 1350), True, 'import numpy as np\n'), ((2301, 2336), 'numpy.load', 'np.load', (['word_embedding_matrix_file'], {}), '(word_embedding_matrix_file)\n', (2308, 2336), True, 'import numpy as np\n'), ((2727, 2894), 'keras.layers.Embedding', 'Embedding', (['word_embedding_matrix.shape[0]', 'word_embedding_matrix.shape[1]'], {'weights': '[word_embedding_matrix]', 'input_length': 'inputs_train.shape[1]', 'trainable': '(False)'}), '(word_embedding_matrix.shape[0], word_embedding_matrix.shape[1],\n weights=[word_embedding_matrix], input_length=inputs_train.shape[1],\n trainable=False)\n', (2736, 2894), False, 'from keras.layers import Embedding, SimpleRNN, Dense, Dropout\n'), ((3541, 3568), 'keras.layers.SimpleRNN', 'SimpleRNN', (['(128)'], {'unroll': '(True)'}), '(128, unroll=True)\n', (3550, 3568), False, 'from keras.layers import Embedding, SimpleRNN, Dense, Dropout\n'), ((3584, 3596), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3591, 3596), False, 'from keras.layers import Embedding, SimpleRNN, Dense, Dropout\n'), ((3612, 3663), 'keras.layers.Dense', 'Dense', (['outputs_train.shape[1]'], {'activation': '"""softmax"""'}), "(outputs_train.shape[1], activation='softmax')\n", (3617, 3663), False, 'from keras.layers import Embedding, SimpleRNN, Dense, Dropout\n'), ((3855, 3924), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'min_delta': '(0.005)', 'patience': '(3)', 'restore_best_weights': '(True)'}), '(min_delta=0.005, patience=3, restore_best_weights=True)\n', (3868, 3924), False, 'from keras.callbacks import EarlyStopping\n'), ((2980, 3146), 'keras.layers.Embedding', 'Embedding', (['word_embedding_matrix.shape[0]', 'word_embedding_matrix.shape[1]'], {'weights': '[word_embedding_matrix]', 'input_length': 'inputs_train.shape[1]', 'trainable': '(True)'}), '(word_embedding_matrix.shape[0], word_embedding_matrix.shape[1],\n weights=[word_embedding_matrix], input_length=inputs_train.shape[1],\n trainable=True)\n', (2989, 3146), False, 'from keras.layers import Embedding, SimpleRNN, Dense, Dropout\n'), ((4002, 4072), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'min_delta': '(0.0005)', 'patience': '(3)', 'restore_best_weights': '(True)'}), '(min_delta=0.0005, patience=3, restore_best_weights=True)\n', (4015, 4072), False, 'from keras.callbacks import EarlyStopping\n'), ((3232, 3361), 'keras.layers.Embedding', 'Embedding', (['word_embedding_matrix.shape[0]', 'word_embedding_matrix.shape[1]'], {'input_length': 'inputs_train.shape[1]', 'trainable': '(True)'}), '(word_embedding_matrix.shape[0], word_embedding_matrix.shape[1],\n input_length=inputs_train.shape[1], trainable=True)\n', (3241, 3361), False, 'from keras.layers import Embedding, SimpleRNN, Dense, Dropout\n')]
# Generated by Django 2.1 on 2018-08-20 00:34 import datetime from django.db import migrations, models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('catalogo', '0002_auto_20180820_0017'), ] operations = [ migrations.AlterField( model_name='game', name='created_date', field=models.DateTimeField(default=datetime.datetime(2018, 8, 20, 0, 34, 22, 883116, tzinfo=utc)), ), ]
[ "datetime.datetime" ]
[((422, 483), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(8)', '(20)', '(0)', '(34)', '(22)', '(883116)'], {'tzinfo': 'utc'}), '(2018, 8, 20, 0, 34, 22, 883116, tzinfo=utc)\n', (439, 483), False, 'import datetime\n')]
import errno import json import logging import os import random import shutil import socket import typing import boto3 import jmespath import pexpect import pexpect.exceptions class EcsTunnelException(Exception): pass class EcsTunnel: cluster_id: str task_id: str container_name: typing.Optional[str] aws_cli_exec: str remote_port_netcat_exec: str def __init__( self, cluster_id: str, task_id: str, container_name: str = None, aws_cli_exec: str = 'aws', aws_access_key_id: str = None, aws_secret_access_key: str = None, aws_session_token: str = None, aws_region_name: str = None, aws_profile_name: str = None, remote_port_netcat_exec: str = 'nc', ): self.cluster_id = cluster_id self.task_id = task_id self.container_name = container_name self.aws_cli_exec = aws_cli_exec self.remote_port_netcat_exec = remote_port_netcat_exec self._logger = logging.getLogger('ecs_tunnel') self._boto3_session = boto3.Session( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, region_name=aws_region_name, profile_name=aws_profile_name ) self._ecs_client = self._boto3_session.client('ecs') self._ssm_client = self._boto3_session.client('ssm') self._aws_access_key_id = aws_access_key_id self._aws_secret_access_key = aws_secret_access_key self._aws_session_token = aws_session_token self._aws_region_name = aws_region_name self._aws_profile_name = aws_profile_name self._ssm_target_id = self._get_task_id() self._port_fw_procs: typing.List[pexpect.spawn] = [] self._ecs_exec_sessions: typing.List[dict] = [] # See warning in https://docs.python.org/3/library/subprocess.html#popen-constructor self._resolved_aws_cli_exec = shutil.which(self.aws_cli_exec) if not self._resolved_aws_cli_exec: raise FileNotFoundError(self.aws_cli_exec) def _get_task_id(self): response = self._ecs_client.describe_tasks( cluster=self.cluster_id, tasks=[ self.task_id, ] ) if failure_reason := jmespath.search(f"failures[0].reason", response): raise EcsTunnelException(f'Task failure. Reason: {failure_reason}') if self.container_name: container_runtime_id = jmespath.search( f"tasks[0].containers[?name == '{self.container_name}'].runtimeId | [0]", response ) else: container_runtime_id = jmespath.search(f"tasks[0].containers[0].runtimeId", response) if container_runtime_id is None: raise EcsTunnelException('Task runtime id could not be resolved') return f'ecs:{self.cluster_id}_{self.task_id}_{container_runtime_id}' def _get_env(self): aws_env = os.environ if self._aws_profile_name: aws_env['AWS_DEFAULT_PROFILE'] = self._aws_profile_name if self._aws_access_key_id: aws_env['AWS_ACCESS_KEY_ID'] = self._aws_access_key_id if self._aws_access_key_id: aws_env['AWS_SECRET_ACCESS_KEY'] = self._aws_secret_access_key if self._aws_session_token: aws_env['AWS_SESSION_TOKEN'] = self._aws_session_token if self._aws_region_name: aws_env['AWS_DEFAULT_REGION'] = self._aws_region_name return aws_env def _get_ssm_start_session_cmd(self, local_port: int, remote_port: int) -> typing.List[str]: parameters = { 'portNumber': [ str(remote_port) ], 'localPortNumber': [ str(local_port) ] } parameters_json = json.dumps(parameters) aws_cmd = [ 'ssm', 'start-session', '--target', self._ssm_target_id, '--document-name', 'AWS-StartPortForwardingSession', '--parameters', parameters_json ] return aws_cmd @classmethod def _get_port(cls, port: int = None, check_in_use=False) -> int: """ Try to find a random dynamic port to use as a local port """ # If port is not set return a random dynamic port if port: return port candidate_port = random.randrange(1024, 49151) if check_in_use: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.bind(('0.0.0.0', candidate_port)) return candidate_port except socket.error as e: if e.errno == errno.EADDRINUSE: return cls._get_port() else: raise else: return candidate_port def local_port_tunnel(self, remote_port: int, local_port: int = None) -> int: """ Tunnel a local port to a local port on the remote instance :return the local port """ local_port = self._get_port(local_port, check_in_use=True) aws_cmd = self._get_ssm_start_session_cmd(local_port=local_port, remote_port=remote_port) self._logger.debug(f'AWS CLI start session cmd: {self._resolved_aws_cli_exec} {" ".join(aws_cmd)}') child = pexpect.spawn(command=self._resolved_aws_cli_exec, args=aws_cmd, env=self._get_env()) try: child.expect('Waiting for connections') except pexpect.exceptions.TIMEOUT: raise EcsTunnelException(f'AWS session-manager did not reach "Waiting for connections". ' f'Stdout: {child.before}') self._logger.debug(f'Session started successfully (Pid: {child.pid})') self._logger.debug(f'{child.before=}') self._logger.debug(f'{child.after=}') self._port_fw_procs.append(child) self._logger.debug(f'Forwarding {local_port} to {self.task_id}:{remote_port}') return local_port def _run_remote_ecs_cmd(self, cmd: str): execute_command_args = { 'cluster': self.cluster_id, 'command': cmd, 'interactive': True, 'task': self.task_id } if self.container_name: execute_command_args['container'] = self.container_name exec_response = self._ecs_client.execute_command(**execute_command_args) self._ecs_exec_sessions.append(exec_response) self._logger.debug(f'Started ECS exec command: {exec_response}') def remote_port_tunnel(self, remote_port: int, remote_host: str, local_port: int = None): # A random port to proxy through # TODO: check in use proxy_port = self._get_port() netcat_cmd = f'{self.remote_port_netcat_exec} -lk -p {proxy_port} -e {self.remote_port_netcat_exec} {remote_host} {remote_port}' self._run_remote_ecs_cmd(cmd=netcat_cmd) return self.local_port_tunnel(local_port=local_port, remote_port=proxy_port) def http_proxy_port_tunnel(self, remote_port=None, local_port=None): remote_port = self._get_port(remote_port) local_port = self._get_port(local_port) ncat_cmd = f'ncat -l {remote_port} --proxy-type http' self._run_remote_ecs_cmd(cmd=ncat_cmd) return self.local_port_tunnel(local_port=local_port, remote_port=remote_port) def close(self): self._logger.debug('Trying to kill running exec sessions') for exec_session in self._ecs_exec_sessions: session_id = jmespath.search('session.sessionId', exec_session) if session_id: self._logger.debug(f'Terminating SSM session: {session_id}') self._ssm_client.terminate_session(SessionId=session_id) self._ecs_exec_sessions = [] self._logger.debug('Trying to kill running session-managers') for proc in self._port_fw_procs: if proc.isalive(): self._logger.debug(f'Killing AWS session-manager-plugin: {proc.pid}') proc.terminate() self._port_fw_procs = [] def __del__(self): # noinspection PyBroadException try: self.close() except BaseException: pass
[ "boto3.Session", "socket.socket", "shutil.which", "json.dumps", "jmespath.search", "random.randrange", "logging.getLogger" ]
[((1063, 1094), 'logging.getLogger', 'logging.getLogger', (['"""ecs_tunnel"""'], {}), "('ecs_tunnel')\n", (1080, 1094), False, 'import logging\n'), ((1126, 1328), 'boto3.Session', 'boto3.Session', ([], {'aws_access_key_id': 'aws_access_key_id', 'aws_secret_access_key': 'aws_secret_access_key', 'aws_session_token': 'aws_session_token', 'region_name': 'aws_region_name', 'profile_name': 'aws_profile_name'}), '(aws_access_key_id=aws_access_key_id, aws_secret_access_key=\n aws_secret_access_key, aws_session_token=aws_session_token, region_name\n =aws_region_name, profile_name=aws_profile_name)\n', (1139, 1328), False, 'import boto3\n'), ((2075, 2106), 'shutil.which', 'shutil.which', (['self.aws_cli_exec'], {}), '(self.aws_cli_exec)\n', (2087, 2106), False, 'import shutil\n'), ((3996, 4018), 'json.dumps', 'json.dumps', (['parameters'], {}), '(parameters)\n', (4006, 4018), False, 'import json\n'), ((4550, 4579), 'random.randrange', 'random.randrange', (['(1024)', '(49151)'], {}), '(1024, 49151)\n', (4566, 4579), False, 'import random\n'), ((2429, 2477), 'jmespath.search', 'jmespath.search', (['f"""failures[0].reason"""', 'response'], {}), "(f'failures[0].reason', response)\n", (2444, 2477), False, 'import jmespath\n'), ((2627, 2735), 'jmespath.search', 'jmespath.search', (['f"""tasks[0].containers[?name == \'{self.container_name}\'].runtimeId | [0]"""', 'response'], {}), '(\n f"tasks[0].containers[?name == \'{self.container_name}\'].runtimeId | [0]",\n response)\n', (2642, 2735), False, 'import jmespath\n'), ((2822, 2884), 'jmespath.search', 'jmespath.search', (['f"""tasks[0].containers[0].runtimeId"""', 'response'], {}), "(f'tasks[0].containers[0].runtimeId', response)\n", (2837, 2884), False, 'import jmespath\n'), ((4622, 4671), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (4635, 4671), False, 'import socket\n'), ((7736, 7786), 'jmespath.search', 'jmespath.search', (['"""session.sessionId"""', 'exec_session'], {}), "('session.sessionId', exec_session)\n", (7751, 7786), False, 'import jmespath\n')]
#!/usr/bin/env python3.6 import subprocess import os import sys import time class DependencyCheck(object): def __init__(self, command): self.command = command self.status = self._check() def _parse_which_resp(self, response): if os.path.exists(response): return True else: return False def _check(self): proc = subprocess.Popen("which {0} | awk {'print $1'}".format(self.command), \ stdout=subprocess.PIPE, \ stderr=subprocess.PIPE, \ ) stdout, stderr = proc.communicate() return self._parse_exists(stdout) class Compiler(object): def __init__(self, asm_file, system_os, arch): self.asm_name = os.path.abspath(asm_file) self.base_name, ext = os.path.splitext(self.asm_name) self.arch = arch self.os = system_os self.deps = ["nasm", "ld"] self._compile() def _check_deps(self): deps_stats = [CheckDependency(x) for x in self.deps] missing_dependencies = filter(lambda x: x.status is False, deps_stats) for dependency in missing_dependencies: print("[!] Dependency is missing: {0}".format(dependency.command)) if len(missing_dependencies) > 0: sys.exit(-1) return 0 def _parse_args(self): """ Parse the arch chosen by the user """ parsed = dict() for dep in self.deps: parsed[dep] = "" if self.os == "posix": if self.arch == "x64": parsed["nasm"] = "elf64" parsed["ld"] = "elf_x86_64" return parsed elif self.arch == "x86": parsed["nasm"] = "elf32" parsed["ld"] = "elf32_x86_64" return parsed return 0 def _link(self, parse): proc = subprocess.Popen("ld -m {0} -o {1} {2}.o".format(parse["ld"], self.base_name, self.base_name), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) while proc.poll() is None: time.sleep(1) return proc.poll() def _compile(self): """ Execute the steps necessary to compile asm code. """ parse = self._parse_args() if not parse: print("[!] Error: options parsing.") return -1 proc = subprocess.Popen("nasm -f {0} {1}".format(parse["nasm"], self.asm_name), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) while proc.poll() is None: time.sleep(1) if proc.poll() == 0: print("[+] Assembly code successfullly assembled.") if self._link(parse) == 0: print("[+] Assembly object successfully linked.") return 0 else: print("[!] Error: linking object.") return -1 else: print("[!] Error: assembling code.") return proc.poll() if __name__ == "__main__": main()
[ "os.path.abspath", "os.path.exists", "time.sleep", "os.path.splitext", "sys.exit" ]
[((264, 288), 'os.path.exists', 'os.path.exists', (['response'], {}), '(response)\n', (278, 288), False, 'import os\n'), ((752, 777), 'os.path.abspath', 'os.path.abspath', (['asm_file'], {}), '(asm_file)\n', (767, 777), False, 'import os\n'), ((808, 839), 'os.path.splitext', 'os.path.splitext', (['self.asm_name'], {}), '(self.asm_name)\n', (824, 839), False, 'import os\n'), ((1302, 1314), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (1310, 1314), False, 'import sys\n'), ((2109, 2122), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2119, 2122), False, 'import time\n'), ((2580, 2593), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2590, 2593), False, 'import time\n')]