id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
105407 | <reponame>daraymonsta/daily-do
'''
WRITTEN BY <NAME>
PURPOSE
Two strings are anagrams if you can make one from the other
by rearranging the letters. The function named is_anagram takes
two strings as its parameters, returning True if the strings are
anagrams and False otherwise.
EXAMPLE
The call is_anagram("typhoon", "opython") should return True
while the call is_anagram("Alice", "Bob") should return False.
'''
def str_to_sorted_list(temp_str):
# convert str to list of letters
temp_list = list(temp_str)
# sort list in alphabetical order
temp_list.sort()
return temp_list
def is_anagram(str1, str2):
# print the original strings
print('First string: {}'.format(str1))
print('Second string: {}'.format(str2))
# convert each string into a list of letters, then sort the
# list alphabetically
str1_sorted_list = str_to_sorted_list(str1)
str2_sorted_list = str_to_sorted_list(str2)
# compare whether the sorted lists of letters are the same
if str1_sorted_list == str2_sorted_list:
print('They are anagrams')
return True
else:
print('They are not anagrams')
return False
def run_test(str1,str2,should_anagram_be_true):
whether_anagram = is_anagram(str1, str2)
if whether_anagram == should_anagram_be_true:
print('Test passed\n')
else:
print('Test failed\n')
# test 1
str1 = "typhoon"
str2 = "opython"
run_test(str1,str2,True)
# test 2
str1 = "Alice"
str2 = "Bob"
run_test(str1,str2,False)
| StarcoderdataPython |
242803 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 15:48:50 2020
@author: <NAME>
Purpose Reads weight values from the weights file
"""
import struct
import numpy as np
byte_order = 'little'
size_float = 8
file = open("weights.bin","rb")
#Read in metadata
pixals_in_image = int.from_bytes(file.read(4),byte_order)
num_hidden_nodes = int.from_bytes(file.read(4),byte_order)
output_nodes = int.from_bytes(file.read(4),byte_order)
#Read in weight_hidden
buffer = file.read(pixals_in_image*num_hidden_nodes*size_float)
weight_hidden = struct.iter_unpack('d'*num_hidden_nodes, buffer)
weight_hidden = np.array([*weight_hidden])
#Read bias_hidden
buffer = file.read(num_hidden_nodes*size_float)
bias_hidden = struct.unpack('d'*num_hidden_nodes, buffer)
bias_hidden = np.array([*bias_hidden])
#Read in weight_output
buffer = file.read(output_nodes*num_hidden_nodes*size_float)
weight_output = struct.iter_unpack('d'*output_nodes, buffer)
weight_output = np.array([*weight_output])
#Read bias_output
buffer = file.read(output_nodes*num_hidden_nodes*size_float)
bias_output = struct.unpack('d'*output_nodes, buffer)
bias_output = np.array([*bias_output])
file.close()
print("weight_hidden")
print(weight_hidden)
print()
print("bias_hidden")
print(bias_hidden)
print()
print("weight_output")
print(weight_output)
print()
print("bias_output")
print(bias_output)
print()
| StarcoderdataPython |
6686454 | """CNNs for testing/experiments."""
import torch
import torch.nn as nn
from backpack.core.layers import Flatten
from deepobs.pytorch.testproblems import testproblems_modules
def cifar10_c4d3(conv_activation=nn.ReLU, dense_activation=nn.ReLU):
"""CNN for CIFAR-10 dataset with 4 convolutional and 3 fc layers.
Modified from:
https://github.com/Zhenye-Na/deep-learning-uiuc/tree/master/assignments/mp3
(remove Dropout, Dropout2d and BatchNorm2d)
"""
return nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3, padding=1),
conv_activation(),
nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, padding=1),
conv_activation(),
nn.MaxPool2d(kernel_size=2, stride=2),
# Conv Layer block 2
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, padding=1),
conv_activation(),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1),
conv_activation(),
nn.MaxPool2d(kernel_size=2, stride=2),
# Flatten
Flatten(),
# Dense layers
nn.Linear(2048, 512),
dense_activation(),
nn.Linear(512, 64),
dense_activation(),
nn.Linear(64, 10),
)
def deepobs_cifar10_c3d3(conv_activation=nn.ReLU, dense_activation=nn.ReLU):
"""3c3d network from DeepOBS.
The weight matrices are initialized using Xavier initialization and the
biases are initialized to zero.
"""
def all_children(sequential):
children = []
for child in sequential.children():
if isinstance(child, nn.Sequential):
children += all_children(child)
else:
children.append(child)
return children
def replace_activations(c3d3):
"Replace ReLUs with specified activations for conv and dense."
# conv activations
c3d3.relu1 = conv_activation()
c3d3.relu2 = conv_activation()
c3d3.relu3 = conv_activation()
# dense activations
c3d3.relu4 = dense_activation()
c3d3.relu5 = dense_activation()
return c3d3
def replace_deepobs_flatten(c3d3):
"""Replace DeepOBS flatten with bpexts Flatten."""
c3d3.flatten = Flatten()
return c3d3
def set_tf_same_hyperparams(c3d3):
"""Forward pass to set the hyperparams of padding and max pooling
in tensorflow 'same' mode."""
CIFAR10_TEST_SHAPE = (1, 3, 32, 32)
input = torch.rand(CIFAR10_TEST_SHAPE)
_ = c3d3(input)
return c3d3
num_outputs = 10
c3d3 = testproblems_modules.net_cifar10_3c3d(num_outputs)
c3d3 = set_tf_same_hyperparams(c3d3)
c3d3 = replace_activations(c3d3)
c3d3 = replace_deepobs_flatten(c3d3)
modules = all_children(c3d3)
return nn.Sequential(*modules)
| StarcoderdataPython |
1664540 | # Generated by Django 3.0.9 on 2020-08-18 10:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shopping', '0019_auto_20200817_2046'),
]
operations = [
migrations.RemoveField(
model_name='cartproduct',
name='store',
),
]
| StarcoderdataPython |
230706 | <filename>7kyu/greatest_common_divisor.py
# http://www.codewars.com/kata/5500d54c2ebe0a8e8a0003fd/
def mygcd(x, y):
remainder = max(x, y) % min(x, y)
if remainder == 0:
return min(x, y)
else:
return mygcd(min(x, y), remainder)
| StarcoderdataPython |
3423831 | # Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
class FailedValidation(Exception):
pass
def validate_config(params, error_callback):
"""Validate an undercloud configuration described by params
:param params: A dict containing all of the undercloud.conf option
names mapped to their proposed values.
:param error_callback: A callback function that should be used to handle
errors. The function must accept a single parameter, which will be
a string describing the error.
"""
local_params = dict(params)
_validate_value_formats(local_params, error_callback)
_validate_in_cidr(local_params, error_callback)
_validate_dhcp_range(local_params, error_callback)
_validate_inspection_range(local_params, error_callback)
_validate_no_overlap(local_params, error_callback)
_validate_ips(local_params, error_callback)
def _validate_ips(params, error_callback):
def is_ip(value, param_name):
try:
netaddr.IPAddress(value)
except netaddr.core.AddrFormatError:
error_callback(
'%s "%s" must be a valid IP address' % (param_name, value))
for ip in params['undercloud_nameservers']:
is_ip(ip, 'undercloud_nameservers')
def _validate_value_formats(params, error_callback):
"""Validate format of some values
Certain values have a specific format that must be maintained in order to
work properly. For example, local_ip must be in CIDR form, and the
hostname must be a FQDN.
"""
try:
local_ip = netaddr.IPNetwork(params['local_ip'])
if local_ip.prefixlen == 32:
raise netaddr.AddrFormatError('Invalid netmask')
except netaddr.core.AddrFormatError as e:
message = ('local_ip "%s" not valid: "%s" '
'Value must be in CIDR format.' %
(params['local_ip'], str(e)))
error_callback(message)
hostname = params['undercloud_hostname']
if hostname is not None and '.' not in hostname:
message = 'Hostname "%s" is not fully qualified.' % hostname
error_callback(message)
def _validate_in_cidr(params, error_callback):
cidr = netaddr.IPNetwork(params['network_cidr'])
def validate_addr_in_cidr(params, name, pretty_name=None, require_ip=True):
try:
if netaddr.IPAddress(params[name]) not in cidr:
message = ('%s "%s" not in defined CIDR "%s"' %
(pretty_name or name, params[name], cidr))
error_callback(message)
except netaddr.core.AddrFormatError:
if require_ip:
message = 'Invalid IP address: %s' % params[name]
error_callback(message)
params['just_local_ip'] = params['local_ip'].split('/')[0]
# undercloud.conf uses inspection_iprange, the configuration wizard
# tool passes the values separately.
if 'inspection_iprange' in params:
inspection_iprange = params['inspection_iprange'].split(',')
params['inspection_start'] = inspection_iprange[0]
params['inspection_end'] = inspection_iprange[1]
validate_addr_in_cidr(params, 'just_local_ip', 'local_ip')
validate_addr_in_cidr(params, 'network_gateway')
# NOTE(bnemec): The ui needs to be externally accessible, which means in
# many cases we can't have the public vip on the provisioning network.
# In that case users are on their own to ensure they've picked valid
# values for the VIP hosts.
if ((params['undercloud_service_certificate'] or
params['generate_service_certificate']) and
not params['enable_ui']):
validate_addr_in_cidr(params, 'undercloud_public_host',
require_ip=False)
validate_addr_in_cidr(params, 'undercloud_admin_host',
require_ip=False)
validate_addr_in_cidr(params, 'dhcp_start')
validate_addr_in_cidr(params, 'dhcp_end')
validate_addr_in_cidr(params, 'inspection_start', 'Inspection range start')
validate_addr_in_cidr(params, 'inspection_end', 'Inspection range end')
def _validate_dhcp_range(params, error_callback):
dhcp_start = netaddr.IPAddress(params['dhcp_start'])
dhcp_end = netaddr.IPAddress(params['dhcp_end'])
if dhcp_start >= dhcp_end:
message = ('Invalid dhcp range specified, dhcp_start "%s" does '
'not come before dhcp_end "%s"' %
(dhcp_start, dhcp_end))
error_callback(message)
def _validate_inspection_range(params, error_callback):
inspection_start = netaddr.IPAddress(params['inspection_start'])
inspection_end = netaddr.IPAddress(params['inspection_end'])
if inspection_start >= inspection_end:
message = ('Invalid inspection range specified, inspection_start '
'"%s" does not come before inspection_end "%s"' %
(inspection_start, inspection_end))
error_callback(message)
def _validate_no_overlap(params, error_callback):
"""Validate the provisioning and inspection ip ranges do not overlap"""
dhcp_set = netaddr.IPSet(netaddr.IPRange(params['dhcp_start'],
params['dhcp_end']))
inspection_set = netaddr.IPSet(netaddr.IPRange(params['inspection_start'],
params['inspection_end']))
# If there is any intersection of the two sets then we have a problem
if dhcp_set & inspection_set:
message = ('Inspection DHCP range "%s-%s" overlaps provisioning '
'DHCP range "%s-%s".' %
(params['inspection_start'], params['inspection_end'],
params['dhcp_start'], params['dhcp_end']))
error_callback(message)
| StarcoderdataPython |
6590743 | <gh_stars>0
# Project Quex (http://quex.sourceforge.net); License: MIT;
# (C) 2005-2020 <NAME>;
#_______________________________________________________________________________
from quex.input.code.base import CodeFragment, CodeFragment_NULL
from quex.engine.analyzer.door_id_address_label import DoorID
from quex.engine.analyzer.state.core import Processor
from quex.engine.analyzer.state.entry import Entry
import quex.engine.state_machine.index as index
from quex.engine.misc.tools import typed
from quex.engine.state_machine.character_counter import SmLineColumnCountInfo
from quex.engine.operations.operation_list import Op
from quex.blackboard import Lng
from quex.constants import E_IncidenceIDs
from copy import copy
#__________________________________________________________________________
#
# TerminalState:
# .-------------------------------------------------.
# .-----. | |
# | 341 |--'accept'--> input_p = position[2]; --->---+---------. |
# '-----' | set terminating zero; | | |
# .-----. | | .---------. |
# | 412 |--'accept'--> column_n += length ------>---+ | pattern | |
# '-----' | set terminating zero; | | match |--->
# .-----. | | | actions | |
# | 765 |--'accept'--> line_n += 2; ------------>---' '---------' |
# '-----' | set terminating zero; |
# | |
# '-------------------------------------------------'
#
# A terminal state prepares the execution of the user's pattern match
# actions and the start of the next analysis step. For this, it computes
# line and column numbers, sets terminating zeroes in strings and resets
# the input pointer to the position where the next analysis step starts.
#__________________________________________________________________________
class Terminal(Processor):
@typed(Name=(str,str), Code=CodeFragment)
def __init__(self, Code, Name, IncidenceId=None, RequiredRegisterSet=None,
RequiresLexemeBeginF=False, RequireLexemeTerminatingZeroF=False,
dial_db=None,
PureCode=None):
assert dial_db is not None
assert isinstance(IncidenceId, int) \
or IncidenceId is None \
or IncidenceId in E_IncidenceIDs
Processor.__init__(self, index.get(), Entry(dial_db))
if IncidenceId is not None:
self.__incidence_id = IncidenceId
self.__door_id = DoorID.incidence(IncidenceId, dial_db)
else:
self.__incidence_id = None
self.__door_id = None
self.__code = Code
self.__pure_code = PureCode
if self.__pure_code is None:
self.__pure_code = Code.get_code()
self.__name = Name
if RequiredRegisterSet is not None:
self.__required_register_set = RequiredRegisterSet
else:
self.__required_register_set = set()
self.__requires_lexeme_terminating_zero_f = RequireLexemeTerminatingZeroF
self.__requires_lexeme_begin_f = RequiresLexemeBeginF
@property
def door_id(self):
assert self.__incidence_id is not None
assert self.__door_id is not None
return self.__door_id
def clone(self, NewIncidenceId=None):
# TODO: clone manually
result = Terminal(Code = copy(self.__code),
Name = self.__name,
IncidenceId = NewIncidenceId if NewIncidenceId is None \
else self.__incidence_id,
RequiredRegisterSet = self.__required_register_set,
RequiresLexemeBeginF = self.__requires_lexeme_begin_f,
RequireLexemeTerminatingZeroF = self.__requires_lexeme_terminating_zero_f,
dial_db = self.entry.dial_db,
PureCode = self.__pure_code)
result.__door_id = self.__door_id
if NewIncidenceId is not None:
result.set_incidence_id(NewIncidenceId, ForceF=True)
return result
def incidence_id(self):
return self.__incidence_id
def set_incidence_id(self, IncidenceId, ForceF=False):
assert ForceF or self.__incidence_id is None
self.__incidence_id = IncidenceId
self.__door_id = DoorID.incidence(IncidenceId, self.entry.dial_db)
def name(self):
return self.__name
def code(self, dial_db):
return self.__code.get_code()
def pure_code(self):
return self.__pure_code
def requires_lexeme_terminating_zero_f(self):
return self.__requires_lexeme_terminating_zero_f
def requires_lexeme_begin_f(self):
return self.__requires_lexeme_begin_f
def required_register_set(self):
return self.__required_register_set
class TerminalCmdList(Terminal):
def __init__(self, IncidenceId, CmdList, Name, dial_db, RequiredRegisterSet=None):
Terminal.__init__(self, CodeFragment_NULL, Name, IncidenceId=IncidenceId,
dial_db=dial_db, RequiredRegisterSet=RequiredRegisterSet)
self.__cmd_list = CmdList
def code(self, dial_db):
return Lng.COMMAND_LIST(self.__cmd_list, dial_db)
class TerminalGotoDoorId(TerminalCmdList):
def __init__(self, DoorId, IncidenceId, LCCI, Name, RequiredRegisterSet, terminal_factory, ExtraCmdList=None):
if LCCI:
run_time_counter_f, \
cmd_list = SmLineColumnCountInfo.get_OpList(LCCI,
ModeName=terminal_factory.mode_name)
terminal_factory.run_time_counter_required_f |= run_time_counter_f
else:
cmd_list = []
if ExtraCmdList: cmd_list.extend(ExtraCmdList)
cmd_list.append(Op.GotoDoorId(DoorId))
TerminalCmdList.__init__(self, IncidenceId, cmd_list, Name,
terminal_factory.dial_db,
RequiredRegisterSet=RequiredRegisterSet)
| StarcoderdataPython |
1792745 | class Sensor(object):
"""
Dummy sensor object
"""
def __init__(self):
pass
def read_data(self):
pass
def adjust_polling_rate(self):
pass | StarcoderdataPython |
3225906 | """Black format your Jupyter Notebook and JupyterLab.
Usage:
------
Format one Jupyter file:
$ jblack notebook.ipynb
Format multiple Jupyter files:
$ jblack notebook_1.ipynb notebook_2.ipynb [...]
Format a directory:
$ jblack python/
Format one Jupyter file with a line length of 70:
$ jblack -l 70 notebook.ipynb
"""
import os
from argparse import ArgumentParser, Namespace, RawTextHelpFormatter
from black import TargetVersion
def parse_args(*args: str) -> Namespace:
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--check", action="store_true")
# parser.add_argument("--diff", action="store_true")
parser.add_argument("--pyi", action="store_true")
parser.add_argument("-l", "--line-length", type=int, default=88)
parser.add_argument("-s", "--skip-string-normalization", action="store_true")
parser.add_argument("-w", "--workers", type=int, default=1, help="number of worker processes")
parser.add_argument("--show-invalid-code", action="store_true")
parser.add_argument("targets", nargs="+", default=os.getcwd())
parser.add_argument(
"-t",
"--target-version",
nargs="+",
help="Python versions that should be supported by Black's output. [default: per-file auto-detection]",
choices=[version.name.lower() for version in TargetVersion],
)
return parser.parse_args(args)
| StarcoderdataPython |
8059576 | <reponame>ConsultingMD/covid-data-public
import enum
import pathlib
import pandas as pd
import datetime
import dateutil.parser
import pydantic
import structlog
from covidactnow.datapublic import common_fields
from covidactnow.datapublic.common_fields import CommonFields
from covidactnow.datapublic import common_init
DATA_ROOT = pathlib.Path(__file__).parent.parent / "data"
TSA_HOSPITALIZATIONS_URL = (
"https://www.dshs.texas.gov/coronavirus/TexasCOVID-19HospitalizationsOverTimebyTSA.xlsx"
)
@enum.unique
class Fields(common_fields.GetByValueMixin, common_fields.FieldNameAndCommonField, enum.Enum):
TSA_REGION_ID = "TSA ID", None
TSA_AREA = "TSA AREA", None
DATE = "date", CommonFields.DATE
CURRENT_HOSPITALIZED = CommonFields.CURRENT_HOSPITALIZED, CommonFields.CURRENT_HOSPITALIZED
class TexasTraumaServiceAreaHospitalizationsUpdater(pydantic.BaseModel):
"""Updates latest Trauma Service Area hospitalizations."""
output_csv: pathlib.Path
class Config:
arbitrary_types_allowed = True
@staticmethod
def make_with_data_root(
data_root: pathlib.Path,
) -> "TexasTraumaServiceAreaHospitalizationsUpdater":
return TexasTraumaServiceAreaHospitalizationsUpdater(
output_csv=data_root / "states" / "tx" / "tx_tsa_hospitalizations.csv"
)
@staticmethod
def parse_data(data, field):
index = [Fields.TSA_REGION_ID, Fields.TSA_AREA]
# Fixing strange type mismatches from excel sheet
date_replacements = {
"44051": "2020-08-08",
"44059": "2020-08-16",
"39668": "2020-08-08",
}
data = data.rename(date_replacements, axis="columns")
data = (
data.set_index(index)
.stack()
.reset_index()
.rename({"level_2": Fields.DATE, 0: field}, axis=1)
)
# Dates in the TSA excel spreadsheets have lots of small data issues. This addresses
# some known inconsistencies and handles when columns are duplicated (for example,
# '2020-08-17.x' and '2020-08-17.y' containing almost identical data).
data[Fields.DATE] = data[Fields.DATE].str.lstrip("Hospitalizations ")
data[Fields.DATE] = data[Fields.DATE].str.rstrip(".x").str.rstrip(".y")
data = data.set_index(index + [Fields.DATE])
data = data.loc[~data.index.duplicated(keep="last")]
data = data.reset_index()
data[Fields.DATE] = data[Fields.DATE].apply(
lambda x: dateutil.parser.parse(x).date().isoformat()
)
# Drop all state level values
data = data.loc[data[Fields.TSA_REGION_ID].notnull(), :]
data[Fields.TSA_REGION_ID] = data[Fields.TSA_REGION_ID].apply(lambda x: x.rstrip("."))
return data
def update(self):
data = pd.read_excel(TSA_HOSPITALIZATIONS_URL, header=2, sheet_name=None)
hosp_data = self.parse_data(
data["COVID-19 Hospitalizations"], CommonFields.CURRENT_HOSPITALIZED
)
icu_data = self.parse_data(data["COVID-19 ICU"], CommonFields.CURRENT_ICU)
index = [Fields.TSA_REGION_ID, Fields.TSA_AREA, CommonFields.DATE]
hosp_data.set_index(index, inplace=True)
icu_data.set_index(index, inplace=True)
return hosp_data.merge(
icu_data, left_index=True, right_index=True, how="outer"
).reset_index()
if __name__ == "__main__":
common_init.configure_logging()
log = structlog.get_logger()
updater = TexasTraumaServiceAreaHospitalizationsUpdater.make_with_data_root(DATA_ROOT)
data = updater.update()
data.to_csv(updater.output_csv, index=False)
log.info("Updated TSA Hospitalizations", output_csv=str(updater.output_csv))
| StarcoderdataPython |
83853 | <filename>pyexfil/Comm/DNSoTLS/constants.py
import os
DNS_OVER_TLS_PORT = 853
CHUNK_SIZE = 128
CHECK_CERT = True # We recommend using valid certificates. An invalid certificate (self-signed) might trigger alerts on some systems.
LOCAL_HOST = 'localhost'
MAX_BUFFER = 4096
MAX_CLIENTS = 5
if os.getcwd() == 'DNSoTLS':
CERT_FILE = 'cert.ccc'
elif os.getcwd() == 'PyExfil':
CERT_FILE = 'pyexfil/Comm/DNSoTLS/cert.ccc'
else:
CERT_FILE = 'pyexfil/Comm/DNSoTLS/cert.ccc'
| StarcoderdataPython |
3354678 | <reponame>vestial/vision-video-analyzer
from main.utils.feedback.shot.shot_recommendation import get_shot_recommendation
from main.utils.histogram_analyzer import get_exposure_histogram
from main.utils.shots_analyzer import get_background, get_contrast, get_shot_screenshot, get_shots, get_shots_length
from celery import chain
#Use celery to execute tasks sequentially
def celery_analyze_shots(video):
result = []
celery_chain = chain(get_shots.si(video), get_exposure_histogram.si(video),
get_shots_length.si(video), get_contrast.si(video),
get_background.si(video),
get_shot_screenshot.si(video))()
exposures = celery_chain.parent.parent.parent.parent.get()
shot_lengths = celery_chain.parent.parent.parent.get()
contrasts = celery_chain.parent.parent.get()
backgrounds = celery_chain.parent.get()
screenshots = celery_chain.get()
recommendations = get_shot_recommendation(video, exposures, shot_lengths,
contrasts, backgrounds)
result = [
exposures, shot_lengths, contrasts, backgrounds, screenshots,
recommendations
]
return result | StarcoderdataPython |
3237723 | #!/usr/bin/python
"""
Retrieves and collects data from the the NetApp E-series web server
and sends the data to an influxdb server
"""
import struct
import time
import logging
import socket
import argparse
import concurrent.futures
import requests
import json
import hashlib
from datetime import datetime
from datetime import datetime
from influxdb import InfluxDBClient
from influxdb.exceptions import InfluxDBClientError
try:
import cPickle as pickle
except ImportError:
import pickle
DEFAULT_USERNAME = 'admin'
DEFAULT_PASSWORD = '<PASSWORD>'
DEFAULT_SYSTEM_NAME = 'unnamed'
INFLUXDB_HOSTNAME = 'influxdb'
INFLUXDB_PORT = 8086
INFLUXDB_DATABASE = 'eseries'
# NOTE(bdustin): time in seconds between folder collections
FOLDER_COLLECTION_INTERVAL = 60*10
__version__ = '1.0'
#######################
# LIST OF METRICS######
#######################
CONTROLLER_PARAMS = [
"observedTime",
"observedTimeInMS",
"readIOps",
"writeIOps",
"otherIOps",
"combinedIOps",
"readThroughput",
"writeThroughput",
"combinedThroughput",
"readResponseTime",
"readResponseTimeStdDev",
"writeResponseTime",
"writeResponseTimeStdDev",
"combinedResponseTime",
"combinedResponseTimeStdDev",
"averageReadOpSize",
"averageWriteOpSize",
"readOps",
"writeOps",
"readPhysicalIOps",
"writePhysicalIOps",
"controllerId",
"cacheHitBytesPercent",
"randomIosPercent",
"mirrorBytesPercent",
"fullStripeWritesBytesPercent",
"maxCpuUtilization",
"maxCpuUtilizationPerCore"
"cpuAvgUtilization",
"cpuAvgUtilizationPerCore"
"cpuAvgUtilizationPerCoreStdDev",
"raid0BytesPercent",
"raid1BytesPercent",
"raid5BytesPercent",
"raid6BytesPercent",
"ddpBytesPercent",
"readHitResponseTime",
"readHitResponseTimeStdDev",
"writeHitResponseTime",
"writeHitResponseTimeStdDev",
"combinedHitResponseTime",
"combinedHitResponseTimeStdDev"
]
DRIVE_PARAMS = [
'averageReadOpSize',
'averageWriteOpSize',
'combinedIOps',
'combinedResponseTime',
'combinedThroughput',
'otherIOps',
'readIOps',
'readOps',
'readPhysicalIOps',
'readResponseTime',
'readThroughput',
'writeIOps',
'writeOps',
'writePhysicalIOps',
'writeResponseTime',
'writeThroughput'
]
INTERFACE_PARAMS = [
"readIOps",
"writeIOps",
"otherIOps",
"combinedIOps",
"readThroughput",
"writeThroughput",
"combinedThroughput",
"readResponseTime",
"writeResponseTime",
"combinedResponseTime",
"averageReadOpSize",
"averageWriteOpSize",
"readOps",
"writeOps",
"queueDepthTotal",
"queueDepthMax",
"channelErrorCounts"
]
SYSTEM_PARAMS = [
"maxCpuUtilization",
"cpuAvgUtilization"
]
VOLUME_PARAMS = [
'averageReadOpSize',
'averageWriteOpSize',
'combinedIOps',
'combinedResponseTime',
'combinedThroughput',
'flashCacheHitPct',
'flashCacheReadHitBytes',
'flashCacheReadHitOps',
'flashCacheReadResponseTime',
'flashCacheReadThroughput',
'otherIOps',
'queueDepthMax',
'queueDepthTotal',
'readCacheUtilization',
'readHitBytes',
'readHitOps',
'readIOps',
'readOps',
'readPhysicalIOps',
'readResponseTime',
'readThroughput',
'writeCacheUtilization',
'writeHitBytes',
'writeHitOps',
'writeIOps',
'writeOps',
'writePhysicalIOps',
'writeResponseTime',
'writeThroughput'
]
MEL_PARAMS = [
'id',
'description',
'location'
]
#######################
# PARAMETERS###########
#######################
NUMBER_OF_THREADS = 10
# LOGGING
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
requests.packages.urllib3.disable_warnings()
LOG = logging.getLogger("collector")
# Disables reset connection warning message if the connection time is too long
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)
#######################
# ARGUMENT PARSER######
#######################
PARSER = argparse.ArgumentParser()
PARSER.add_argument('-u', '--username', default='',
help='Provide the username used to connect to the Web Services Proxy. '
'If not specified, will check for the \'/collector/config.json\' file. '
'Otherwise, it will default to \'' + DEFAULT_USERNAME + '\'')
PARSER.add_argument('-p', '--password', default='',
help='Provide the password for this user to connect to the Web Services Proxy. '
'If not specified, will check for the \'/collector/config.json\' file. '
'Otherwise, it will default to \'' + DEFAULT_PASSWORD + '\'')
PARSER.add_argument('-t', '--intervalTime', type=int, default=5,
help='Provide the time (seconds) in which the script polls and sends data '
'from the SANtricity webserver to the influxdb backend. '
'If not specified, will use the default time of 60 seconds. <time>')
PARSER.add_argument('--proxySocketAddress', default='webservices',
help='Provide both the IP address and the port for the SANtricity webserver. '
'If not specified, will default to localhost. <IPv4 Address:port>')
PARSER.add_argument('-r', '--retention', type=str, default='52w',
help='The default retention duration for influxdb')
PARSER.add_argument('-s', '--showStorageNames', action='store_true',
help='Outputs the storage array names found from the SANtricity webserver')
PARSER.add_argument('-v', '--showVolumeNames', action='store_true', default=0,
help='Outputs the volume names found from the SANtricity webserver')
PARSER.add_argument('-f', '--showInterfaceNames', action='store_true', default=0,
help='Outputs the interface names found from the SANtricity webserver')
PARSER.add_argument('-a', '--showVolumeMetrics', action='store_true', default=0,
help='Outputs the volume payload metrics before it is sent')
PARSER.add_argument('-d', '--showDriveNames', action='store_true', default=0,
help='Outputs the drive names found from the SANtricity webserver')
PARSER.add_argument('-b', '--showDriveMetrics', action='store_true', default=0,
help='Outputs the drive payload metrics before it is sent')
PARSER.add_argument('-c', '--showSystemMetrics', action='store_true', default=0,
help='Outputs the system payload metrics before it is sent')
PARSER.add_argument('-m', '--showMELMetrics', action='store_true', default=0,
help='Outputs the MEL payload metrics before it is sent')
PARSER.add_argument('-e', '--showStateMetrics', action='store_true', default=0,
help='Outputs the state payload metrics before it is sent')
PARSER.add_argument('-g', '--showInterfaceMetrics', action='store_true', default=0,
help='Outputs the interface payload metrics before it is sent')
PARSER.add_argument('-i', '--showIteration', action='store_true', default=0,
help='Outputs the current loop iteration')
PARSER.add_argument('-n', '--doNotPost', action='store_true', default=0,
help='Pull information, but do not post to influxdb')
CMD = PARSER.parse_args()
PROXY_BASE_URL = 'http://{}/devmgr/v2/storage-systems'.format(CMD.proxySocketAddress)
RETENTION_DUR = CMD.retention
#######################
# HELPER FUNCTIONS#####
#######################
def get_configuration():
try:
with open("config.json") as config_file:
config_data = json.load(config_file)
if config_data:
return config_data
except:
return dict()
def get_session():
"""
Returns a session with the appropriate content type and login information.
:return: Returns a request session for the SANtricity RestAPI Webserver
"""
request_session = requests.Session()
# Try to use what was passed in for username/password...
username = CMD.username
password = <PASSWORD>
# ...if there was nothing passed in then try to read it from config file
if ((username is None or username == "") and (password is None or password == "")):
# Try to read username and password from config file, if it exists
# Otherwise default to DEFAULT_USERNAME/DEFAULT_PASSWORD
try:
with open("config.json") as config_file:
config_data = json.load(config_file)
if (config_data):
username = config_data["username"]
password = config_data["password"]
except:
LOG.exception("Unable to open \"/collector/config.json\" file")
username = DEFAULT_USERNAME
password = <PASSWORD>
request_session.auth = (username, password)
request_session.headers = {"Accept": "application/json",
"Content-Type": "application/json",
"netapp-client-type": "grafana-" + __version__}
# Ignore the self-signed certificate issues for https
request_session.verify = False
return request_session
def get_system_name(sys):
sys_name = sys.get("name", sys["id"])
# If this storage device lacks a name, use the id
if not sys_name or len(sys_name) <= 0:
sys_name = sys["id"]
# If this storage device still lacks a name, use a default
if not sys_name or len(sys_name) <= 0:
sys_name = DEFAULT_SYSTEM_NAME
return sys_name
def get_drive_location(storage_id, session):
"""
:param storage_id: Storage system ID on the Webserver
:param session: the session of the thread that calls this definition
::return: returns a dictionary containing the disk id matched up against
the tray id it is located in:
"""
hardware_list = session.get("{}/{}/hardware-inventory".format(
PROXY_BASE_URL, storage_id)).json()
tray_list = hardware_list["trays"]
drive_list = hardware_list["drives"]
tray_ids = {}
drive_location = {}
for tray in tray_list:
tray_ids[tray["trayRef"]] = tray["trayId"]
for drive in drive_list:
drive_tray = drive["physicalLocation"]["trayRef"]
tray_id = tray_ids.get(drive_tray)
if tray_id != "none":
drive_location[drive["driveRef"]] = [tray_id, drive["physicalLocation"]["slot"]]
else:
LOG.error("Error matching drive to a tray in the storage system")
return drive_location
def collect_storage_metrics(sys):
"""
Collects all defined storage metrics and posts them to influxdb
:param sys: The JSON object of a storage_system
"""
try:
session = get_session()
client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)
sys_id = sys["id"]
sys_name = get_system_name(sys)
json_body = list()
# Get Drive statistics
drive_stats_list = session.get(("{}/{}/analysed-drive-statistics").format(
PROXY_BASE_URL, sys_id)).json()
drive_locations = get_drive_location(sys_id, session)
if CMD.showDriveNames:
for stats in drive_stats_list:
location_send = drive_locations.get(stats["diskId"])
LOG.info(("Tray{:02.0f}, Slot{:03.0f}").format(location_send[0], location_send[1]))
# Add Drive statistics to json body
for stats in drive_stats_list:
disk_location_info = drive_locations.get(stats["diskId"])
disk_item = dict(
measurement = "disks",
tags = dict(
sys_id = sys_id,
sys_name = sys_name,
sys_tray = ("{:02.0f}").format(disk_location_info[0]),
sys_tray_slot = ("{:03.0f}").format(disk_location_info[1])
),
fields = dict(
(metric, stats.get(metric)) for metric in DRIVE_PARAMS
)
)
if CMD.showDriveMetrics:
LOG.info("Drive payload: %s", disk_item)
json_body.append(disk_item)
# Get interface statistics
interface_stats_list = session.get(("{}/{}/analysed-interface-statistics").format(
PROXY_BASE_URL, sys_id)).json()
if CMD.showInterfaceNames:
for stats in interface_stats_list:
LOG.info(stats["interfaceId"])
# Add interface statistics to json body
for stats in interface_stats_list:
if_item = dict(
measurement = "interface",
tags = dict(
sys_id = sys_id,
sys_name = sys_name,
interface_id = stats["interfaceId"],
channel_type = stats["channelType"]
),
fields = dict(
(metric, stats.get(metric)) for metric in INTERFACE_PARAMS
)
)
if CMD.showInterfaceMetrics:
LOG.info("Interface payload: %s", if_item)
json_body.append(if_item)
# Get System statistics
system_stats_list = session.get(("{}/{}/analysed-system-statistics").format(
PROXY_BASE_URL, sys_id)).json()
# Add System statistics to json body
sys_item = dict(
measurement = "systems",
tags = dict(
sys_id = sys_id,
sys_name = sys_name
),
fields = dict(
(metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS
)
)
if CMD.showSystemMetrics:
LOG.info("System payload: %s", sys_item)
json_body.append(sys_item)
# Get Volume statistics
volume_stats_list = session.get(("{}/{}/analysed-volume-statistics").format(
PROXY_BASE_URL, sys_id)).json()
if CMD.showVolumeNames:
for stats in volume_stats_list:
LOG.info(stats["volumeName"])
# Add Volume statistics to json body
for stats in volume_stats_list:
vol_item = dict(
measurement = "volumes",
tags = dict(
sys_id = sys_id,
sys_name = sys_name,
vol_name = stats["volumeName"]
),
fields = dict(
(metric, stats.get(metric)) for metric in VOLUME_PARAMS
)
)
if CMD.showVolumeMetrics:
LOG.info("Volume payload: %s", vol_item)
json_body.append(vol_item)
if not CMD.doNotPost:
client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision="s")
except RuntimeError:
LOG.error(("Error when attempting to post statistics for {}/{}").format(sys["name"], sys["id"]))
def collect_major_event_log(sys):
"""
Collects all defined MEL metrics and posts them to influxdb
:param sys: The JSON object of a storage_system
"""
try:
session = get_session()
client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)
sys_id = sys["id"]
sys_name = get_system_name(sys)
json_body = list()
start_from = -1
mel_grab_count = 8192
query = client.query("SELECT id FROM major_event_log WHERE sys_id='%s' ORDER BY time DESC LIMIT 1" % sys_id)
if query:
start_from = int(next(query.get_points())["id"]) + 1
mel_response = session.get(("{}/{}/mel-events").format(PROXY_BASE_URL, sys_id),
params = {"count": mel_grab_count, "startSequenceNumber": start_from}, timeout=(6.10, CMD.intervalTime*2)).json()
if CMD.showMELMetrics:
LOG.info("Starting from %s", str(start_from))
LOG.info("Grabbing %s MELs", str(len(mel_response)))
for mel in mel_response:
item = dict(
measurement = "major_event_log",
tags = dict(
sys_id = sys_id,
sys_name = sys_name,
event_type = mel["eventType"],
time_stamp = mel["timeStamp"],
category = mel["category"],
priority = mel["priority"],
critical = mel["critical"],
ascq = mel["ascq"],
asc = mel["asc"]
),
fields = dict(
(metric, mel.get(metric)) for metric in MEL_PARAMS
),
time = datetime.utcfromtimestamp(int(mel["timeStamp"])).isoformat()
)
if CMD.showMELMetrics:
LOG.info("MEL payload: %s", item)
json_body.append(item)
client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision="s")
except RuntimeError:
LOG.error(("Error when attempting to post MEL for {}/{}").format(sys["name"], sys["id"]))
def create_failure_dict_item(sys_id, sys_name, fail_type, obj_ref, obj_type, is_active, the_time):
item = dict(
measurement = "failures",
tags = dict(
sys_id = sys_id,
sys_name = sys_name,
failure_type = fail_type,
object_ref = obj_ref,
object_type = obj_type,
active = is_active
),
fields = dict(
name_of = sys_name,
type_of = fail_type
),
time = the_time
)
return item
def collect_system_state(sys, checksums):
"""
Collects state information from the storage system and posts it to influxdb
:param sys: The JSON object of a storage_system
"""
try:
session = get_session()
client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)
sys_id = sys["id"]
sys_name = get_system_name(sys)
# query the api and get a list of current failures for this system
failure_response = session.get(("{}/{}/failures").format(PROXY_BASE_URL, sys_id)).json()
# we can skip us if this is the same response we handled last time
old_checksum = checksums.get(str(sys_id))
new_checksum = hashlib.md5(str(failure_response).encode("utf-8")).hexdigest()
if old_checksum is not None and str(new_checksum) == str(old_checksum):
return
checksums.update({str(sys_id) : str(new_checksum)})
# pull most recent failures for this system from our database, including their active status
query_string = ("SELECT last(\"type_of\"),failure_type,object_ref,object_type,active FROM \"failures\" WHERE (\"sys_id\" = '{}') GROUP BY \"sys_name\", \"failure_type\"").format(sys_id)
query = client.query(query_string)
failure_points = list(query.get_points())
json_body = list()
# take care of active failures we don't know about
for failure in failure_response:
r_fail_type = failure.get("failureType")
r_obj_ref = failure.get("objectRef")
r_obj_type = failure.get("objectType")
# we push if we haven't seen this, or we think it's inactive
push = True
for point in failure_points:
p_fail_type = point["failure_type"]
p_obj_ref = point["object_ref"]
p_obj_type = point["object_type"]
p_active = point["active"]
if (r_fail_type == p_fail_type
and r_obj_ref == p_obj_ref
and r_obj_type == p_obj_type):
if p_active == "True":
push = False # we already know this is an active failure so don't push
break
if push:
if CMD.showStateMetrics:
LOG.info("Failure payload T1: %s", item)
json_body.append(create_failure_dict_item(sys_id, sys_name,
r_fail_type, r_obj_ref, r_obj_type,
True, datetime.utcnow().isoformat()))
# take care of failures that are no longer active
for point in failure_points:
# we only care about points that we think are active
p_active = point["active"]
if not p_active:
continue
p_fail_type = point["failure_type"]
p_obj_ref = point["object_ref"]
p_obj_type = point["object_type"]
# we push if we are no longer active, but think that we are
push = True
for failure in failure_response:
r_fail_type = failure.get("failureType")
r_obj_ref = failure.get("objectRef")
r_obj_type = failure.get("objectType")
if (r_fail_type == p_fail_type
and r_obj_ref == p_obj_ref
and r_obj_type == p_obj_type):
push = False # we are still active, so don't push
break
if push:
if CMD.showStateMetrics:
LOG.info("Failure payload T2: %s", item)
json_body.append(create_failure_dict_item(sys_id, sys_name,
p_fail_type, p_obj_ref, p_obj_type,
False, datetime.utcnow().isoformat()))
# write failures to influxdb
if CMD.showStateMetrics:
LOG.info("Writing {} failures".format(len(json_body)))
client.write_points(json_body, database=INFLUXDB_DATABASE)
except RuntimeError:
LOG.error(("Error when attempting to post state information for {}/{}").format(sys["name"], sys["id"]))
def create_continuous_query(params_list, database):
try:
for metric in params_list:
ds_select = "SELECT mean(\"" + metric + "\") AS \"ds_" + metric + "\" INTO \"" + INFLUXDB_DATABASE + "\".\"downsample_retention\".\"" + database + "\" FROM \"" + database + "\" WHERE (time < now()-1w) GROUP BY time(5m)"
#LOG.info(ds_select)
client.create_continuous_query("downsample_" + database + "_" + metric, ds_select, INFLUXDB_DATABASE, "")
#client.drop_continuous_query("downsample_" + database + "_" + metric, INFLUXDB_DATABASE)
except Exception as err:
LOG.info("Creation of continuous query on '{}' failed: {}".format(database, err))
def get_storage_system_ids_folder_list():
PROXY_FOLDER_URL = ("http://{}/devmgr/v2/folders").format(CMD.proxySocketAddress)
folder_response = SESSION.get(PROXY_FOLDER_URL).json()
folders = list()
for folder in folder_response:
folder_name = folder["name"]
subfolder = dict(
name = folder_name,
systemIDs = list(),
systemNames = list()
)
for system in folder["storageSystemIds"]:
subfolder["systemIDs"].append(system)
folders.append(subfolder)
return folders
def add_system_names_to_ids_list(folder_of_ids):
try:
response = SESSION.get(PROXY_BASE_URL)
if response.status_code != 200:
LOG.warning("We were unable to retrieve the storage-system list! Status-code={}".format(response.status_code))
except requests.exceptions.HTTPError or requests.exceptions.ConnectionError as e:
LOG.warning("Unable to connect to the Web Services instance to get storage-system list!", e)
except Exception as e:
LOG.warning("Unexpected exception!", e)
else:
storageList = response.json()
for folder in folder_of_ids:
for storage_id in folder["systemIDs"]:
for system in storageList:
if (system["id"] == storage_id):
folder["systemNames"].append(system["name"])
break
for folder in folder_of_ids:
if (folder["name"] == "All Storage Systems"):
for system in storageList:
folder["systemIDs"].append(system["id"])
folder["systemNames"].append(system["name"])
return folder_of_ids
def get_storage_system_folder_list():
folders = get_storage_system_ids_folder_list()
return add_system_names_to_ids_list(folders)
def collect_system_folders(systems):
"""
Collects all folders defined in the WSP and posts them to influxdb
:param systems: List of all system folders (names and IDs)
"""
try:
client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)
json_body = list()
for folder in systems:
for name in folder["systemNames"]:
sys_item = dict(
measurement = "folders",
tags = dict(
folder_name = folder["name"],
sys_name = name
),
fields = dict(
dummy = 0
)
)
json_body.append(sys_item)
if not CMD.doNotPost:
client.drop_measurement("folders")
client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision="s")
except RuntimeError:
LOG.error("Error when attempting to post system folders")
#######################
# MAIN FUNCTIONS#######
#######################
if __name__ == "__main__":
executor = concurrent.futures.ThreadPoolExecutor(NUMBER_OF_THREADS)
SESSION = get_session()
loopIteration = 1
client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)
client.create_database(INFLUXDB_DATABASE)
try:
# Ensure we can connect. Wait for 2 minutes for WSP to startup.
SESSION.get(PROXY_BASE_URL, timeout=120)
configuration = get_configuration()
# set up our default retention policies if we have that configured
try:
client.create_retention_policy("default_retention", "1w", "1", INFLUXDB_DATABASE, True)
except InfluxDBClientError:
LOG.info("Updating retention policy to {}...".format("1w"))
client.alter_retention_policy("default_retention", INFLUXDB_DATABASE,
"1w", "1", True)
try:
client.create_retention_policy("downsample_retention", RETENTION_DUR, "1", INFLUXDB_DATABASE, False)
except InfluxDBClientError:
LOG.info("Updating retention policy to {}...".format(RETENTION_DUR))
client.alter_retention_policy("downsample_retention", INFLUXDB_DATABASE,
RETENTION_DUR, "1", False)
# set up continuous queries that will downsample our metric data periodically
create_continuous_query(DRIVE_PARAMS, "disks")
create_continuous_query(SYSTEM_PARAMS, "system")
create_continuous_query(VOLUME_PARAMS, "volumes")
create_continuous_query(INTERFACE_PARAMS, "interface")
for system in configuration.get("storage_systems", list()):
LOG.info("system: %s", str(system))
body = dict(controllerAddresses=system.get("addresses"),
password=system.get("password") or configuration.get("array_password"),
acceptCertificate=True)
response = SESSION.post(PROXY_BASE_URL, json=body)
response.raise_for_status()
except requests.exceptions.HTTPError or requests.exceptions.ConnectionError:
LOG.exception("Failed to add configured systems!")
except json.decoder.JSONDecodeError:
LOG.exception("Failed to open configuration file due to invalid JSON!")
# Time that we last collected array folder information
last_folder_collection = -1
checksums = dict()
while True:
time_start = time.time()
try:
response = SESSION.get(PROXY_BASE_URL)
if response.status_code != 200:
LOG.warning("We were unable to retrieve the storage-system list! Status-code={}".format(response.status_code))
except requests.exceptions.HTTPError or requests.exceptions.ConnectionError as e:
LOG.warning("Unable to connect to the Web Services instance to get storage-system list!", e)
except Exception as e:
LOG.warning("Unexpected exception!", e)
else:
storageList = response.json()
LOG.info("Names: %s", len(storageList))
if CMD.showStorageNames:
for storage in storageList:
storage_name = storage["name"]
if not storage_name or len(storage_name) <= 0:
storage_name = storage["id"]
if not storage_name or len(storage_name) <= 0:
storage_name = DEFAULT_STORAGE_NAME
LOG.info(storage_name)
# Grab array folders and commit the data to InfluxDB
if (last_folder_collection < 0 or time.time() - last_folder_collection >= FOLDER_COLLECTION_INTERVAL):
LOG.info("Collecting system folder information...")
storage_system_list = get_storage_system_folder_list()
collect_system_folders(storage_system_list)
last_folder_collection = time.time()
# Iterate through all storage systems and collect metrics
collector = [executor.submit(collect_storage_metrics, sys) for sys in storageList]
concurrent.futures.wait(collector)
# Iterate through all storage system and collect state information
collector = [executor.submit(collect_system_state, sys, checksums) for sys in storageList]
concurrent.futures.wait(collector)
# Iterate through all storage system and collect MEL entries
collector = [executor.submit(collect_major_event_log, sys) for sys in storageList]
concurrent.futures.wait(collector)
time_difference = time.time() - time_start
if CMD.showIteration:
LOG.info("Time interval: {:07.4f} Time to collect and send:"
" {:07.4f} Iteration: {:00.0f}"
.format(CMD.intervalTime, time_difference, loopIteration))
loopIteration += 1
# Dynamic wait time to get the proper interval
wait_time = CMD.intervalTime - time_difference
if CMD.intervalTime < time_difference:
LOG.error("The interval specified is not long enough. Time used: {:07.4f} "
"Time interval specified: {:07.4f}"
.format(time_difference, CMD.intervalTime))
wait_time = time_difference
time.sleep(wait_time)
| StarcoderdataPython |
3575460 | velocidade = float(input("Qual é a velocidade atual do carro? "))
limite_permitido = 80
if velocidade > limite_permitido:
multa = (velocidade - 80) * 7
print(f"MULTADO! Você excedeu o limite permitido que é de 80Km/h\nVocê deve pagar uma multa de R${multa:.2f}!")
print("Tenha um bom dia! Dirija com segurança!")
| StarcoderdataPython |
4916350 | from typing import Any
from fugue.dataframe import DataFrame, DataFrames, LocalDataFrame, ArrayDataFrame
from fugue.extensions.context import ExtensionContext
from fugue.extensions.transformer.constants import OUTPUT_TRANSFORMER_DUMMY_SCHEMA
class Transformer(ExtensionContext):
"""The interface to process logical partitions of a dataframe.
A dataframe such as SparkDataFrame can be distributed. But this interface is about
local process, scalability and throughput is not a concern of Transformer.
To implement this class, you should not have ``__init__``, please directly implement
the interface functions.
.. note::
Before implementing this class, do you really need to implement this
interface? Do you know the interfaceless feature of Fugue? Commonly, if you don't
need to implement :meth:`~.on_init`, you can choose the
interfaceless approach which may decouple your code from Fugue.
It's important to understand |PartitionTutorial|, and please
also read |TransformerTutorial|
Due to similar issue on spark
`pickling ABC objects <https://github.com/cloudpipe/cloudpickle/issues/305>`_.
This class is not ABC. If you encounter the similar issue, possible solution
`at <https://github.com/cloudpipe/cloudpickle/issues/305#issuecomment-529246171>`_
"""
def get_output_schema(self, df: DataFrame) -> Any: # pragma: no cover
"""Generate the output schema on the driver side.
.. note::
* This is running on driver
* This is the only function in this interface that is facing the entire
DataFrame that is not necessarily local, for example a SparkDataFrame
* Normally, you should not consume this dataframe in this step, and you s
hould only use its schema and metadata
* You can access all properties except for :meth:`~.cursor`
:param df: the entire dataframe you are going to transform.
:return: |SchemaLikeObject|, should not be None or empty
"""
raise NotImplementedError
def on_init(self, df: DataFrame) -> None: # pragma: no cover
"""Callback for initializing
:ref:`physical partition that contains one or multiple logical partitions
<tutorial:tutorials/advanced/partition:physical vs logical partitions>`.
You may put expensive initialization logic here so you will not have to repeat
that in :meth:`~.transform`
.. note::
* This call can be on a random machine (depending on the ExecutionEngine you
use), you should get the context from the properties of this class
* You can get physical partition no (if available from the execution egnine)
from :meth:`~.cursor`
* The input dataframe may be unbounded, but must be empty aware. That means
you must not consume the df by any means, and you can not count.
However you can safely peek the first row of the dataframe for multiple
times.
* The input dataframe is never empty. Empty physical partitions are skipped
:param df: the entire dataframe of this physical partition
"""
pass
def transform(self, df: LocalDataFrame) -> LocalDataFrame: # pragma: no cover
"""The transformation logic from one local dataframe to another local dataframe.
.. note::
* This function operates on :ref:`logical partition level
<tutorial:tutorials/advanced/partition:physical vs logical partitions>`
* This call can be on a random machine (depending on the ExecutionEngine you
use), you should get the :class:`context
<fugue.extensions.context.ExtensionContext>` from the properties of this
class
* The input dataframe may be unbounded, but must be empty aware. It's safe to
consume it for ONLY ONCE
* The input dataframe is never empty. Empty dataframes are skipped
:param df: one logical partition to transform on
:return: transformed dataframe
"""
raise NotImplementedError
class OutputTransformer(Transformer):
def process(self, df: LocalDataFrame) -> None: # pragma: no cover
raise NotImplementedError
def get_output_schema(self, df: DataFrame) -> Any:
return OUTPUT_TRANSFORMER_DUMMY_SCHEMA
def transform(self, df: LocalDataFrame) -> LocalDataFrame:
self.process(df)
return ArrayDataFrame([], OUTPUT_TRANSFORMER_DUMMY_SCHEMA)
class CoTransformer(ExtensionContext):
"""The interface to process logical partitions of a :ref:`zipped dataframe
<tutorial:tutorials/advanced/execution_engine:zip & comap>`.
A dataframe such as SparkDataFrame can be distributed. But this interface is about
local process, scalability and throughput is not a concern of CoTransformer.
To implement this class, you should not have ``__init__``, please directly implement
the interface functions.
.. note::
Before implementing this class, do you really need to implement this
interface? Do you know the interfaceless feature of Fugue? Commonly, if you don't
need to implement :meth:`~.on_init`, you can choose the
interfaceless approach which may decouple your code from Fugue.
It's important to understand |ZipComap|, and
please also read |CoTransformerTutorial|
Due to similar issue on spark
`pickling ABC objects <https://github.com/cloudpipe/cloudpickle/issues/305>`_.
This class is not ABC. If you encounter the similar issue, possible solution
`at <https://github.com/cloudpipe/cloudpickle/issues/305#issuecomment-529246171>`_
"""
def get_output_schema(self, dfs: DataFrames) -> Any: # pragma: no cover
"""Generate the output schema on the driver side.
.. note::
* This is running on driver
* Currently, ``dfs`` is a collection of empty dataframes with the same
structure and schemas
* Normally, you should not consume this dataframe in this step, and you s
hould only use its schema and metadata
* You can access all properties except for :meth:`~.cursor`
:param dfs: the collection of dataframes you are going to transform. They
are empty dataframes with the same structure and schemas
:return: |SchemaLikeObject|, should not be None or empty
"""
raise NotImplementedError
def on_init(self, dfs: DataFrames) -> None: # pragma: no cover
"""Callback for initializing
:ref:`physical partition that contains one or multiple logical partitions
<tutorial:tutorials/advanced/partition:physical vs logical partitions>`.
You may put expensive initialization logic here so you will not have to repeat
that in :meth:`~.transform`
.. note::
* This call can be on a random machine (depending on the ExecutionEngine you
use), you should get the context from the properties of this class
* You can get physical partition no (if available from the execution egnine)
from :meth:`~.cursor`
* Currently, ``dfs`` is a collection of empty dataframes with the same
structure and schemas
:param dfs: a collection of empty dataframes with the same structure and schemas
"""
pass
def transform(self, dfs: DataFrames) -> LocalDataFrame: # pragma: no cover
"""The transformation logic from a collection of dataframes (with the same
partition keys) to a local dataframe.
.. note::
* This call can be on a random machine (depending on the ExecutionEngine you
use), you should get the :class:`context
<fugue.extensions.context.ExtensionContext>`
from the properties of this class
:param dfs: a collection of dataframes with the same partition keys
:return: transformed dataframe
"""
raise NotImplementedError
class OutputCoTransformer(CoTransformer):
def process(self, dfs: DataFrames) -> None: # pragma: no cover
raise NotImplementedError
def get_output_schema(self, dfs: DataFrames) -> Any:
return OUTPUT_TRANSFORMER_DUMMY_SCHEMA
def transform(self, dfs: DataFrames) -> LocalDataFrame:
self.process(dfs)
return ArrayDataFrame([], OUTPUT_TRANSFORMER_DUMMY_SCHEMA)
| StarcoderdataPython |
5007576 | <filename>laymon/observers.py
from .interfaces import Observer, ObserverFactory
from .displays import FeatureMapDisplay
class FeatureMapObserver(Observer):
"""
An class used to create observers that are used to monitor the feature maps of the given layer.
"""
def __init__(self, layer, layer_name, update_display):
"""
Initialises and creates a new observer object to track the feature
maps of the specified PyTorch layer.
:param layer: pyTorch layer
:param layer_name: string
:param update_display: method used to update the display of the observer object
"""
self._layer = layer
self._layer_name = layer_name
# update_display needs to be a callable method.
if not callable(update_display):
raise TypeError("update display method should be callable.")
self._update_display = update_display
# Sets the description of the observer object.
self._description = f"Observer -> {self._layer_name}"
def update(self, parameters):
"""
Update the display attached to the observer with the new parameters/activations.
:param parameters: Tensor
:return: None
"""
# Update the display of the observer with the new parameters.
self._update_display(parameters=parameters, display_title=self._layer_name)
def get_layer_name(self):
"""Returns the layer name being observed."""
return self._layer_name
def get_layer(self):
"""Returns the layer object being observed."""
return self._layer
class FeatureMapObserverFactory(ObserverFactory):
"""A factory type class to create a FeatureMapObserver for the given layer"""
display_object = FeatureMapDisplay
def create(self, layer, layer_name):
"""
Create a FeatureMapObserver for the given layer and attaches the display function
for the layer being monitored.
:param layer:
:param layer_name:
:return:
"""
return FeatureMapObserver(
layer=layer, layer_name=layer_name, update_display=self.display_object().update_display
)
| StarcoderdataPython |
1951743 | import yfinance as yf
import util
# @param ticker_symbol - str
# @param time_period - Valid Periods: 1d, 5d, 1mo,3mo,6mo,1y,2y,5y,10y,ytd,maxi
# @param time_interval - Valid Periods:`1m , 2m, 5m, 15m, 30m, 60m, 90m, 1h, 1d, 5d, 1wk, 1mo, 3mo
def get_historical_data(ticker_symbol: str, time_period: str, time_interval: str) -> yf.Ticker:
return yf.Ticker(ticker_symbol).history(period=time_period, interval=time_interval)
# todo: check all is functional
def get_current_stock_data(ticker_symbol: str) -> dict:
historical_stock_data = get_historical_data(ticker_symbol, "3d", "2m")
stock_data = historical_stock_data.iloc[-1].to_dict()
del stock_data["Dividends"]
del stock_data["Stock Splits"]
stock_data["SMA"] = util.calculate_sma(historical_stock_data)[0] # method broken because of history access
stock_data["PREVSMA"] = util.calculate_sma(historical_stock_data)[1]
stock_data["EMA"] = util.calculate_ema(historical_stock_data)
stock_data["PREVPRICE"] = historical_stock_data.iloc[-2].to_dict()[
"Close"
] # might need to change, only checks price 2 minutes ago
return stock_data
def get_price_slope(ticker_symbol: str): # refactor maybe
n = 5 # checks last 3 minutes of data
historical_stock_data = get_historical_data(ticker_symbol, "1d", "1m")
stock_price_by_time = []
for i in range(-n, 0):
stock_price_by_time.append(historical_stock_data.iloc[i].to_dict()["Close"])
slope = util.linear_regress_slope(1, stock_price_by_time)
return slope
def get_volume_slope(ticker_symbol: str): # refactor maybe
n = 5 # checks last 3 minutes of data
historical_stock_data = get_historical_data(ticker_symbol, "1d", "1m")
stock_volume_by_time = []
for i in range(-n, 0):
stock_volume_by_time.append(historical_stock_data.iloc[i].to_dict()["Volume"])
slope = util.linear_regress_slope(1, stock_volume_by_time)
return slope
def get_stock_company_name(ticker_symbol: str):
return yf.Ticker(ticker_symbol).info["longName"]
| StarcoderdataPython |
5000861 | from app.config import gunicorn_settings
from app.logging import log_config
# Gunicorn config variables
accesslog = gunicorn_settings.ACCESS_LOG
bind = f"{gunicorn_settings.HOST}:{gunicorn_settings.PORT}"
errorlog = gunicorn_settings.ERROR_LOG
keepalive = gunicorn_settings.KEEPALIVE
logconfig_dict = log_config.dict(exclude_none=True)
loglevel = gunicorn_settings.LOG_LEVEL
reload = gunicorn_settings.RELOAD
threads = gunicorn_settings.THREADS
timeout = gunicorn_settings.TIMEOUT
worker_class = gunicorn_settings.WORKER_CLASS
workers = gunicorn_settings.WORKERS
| StarcoderdataPython |
1617400 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Sample script to print a simple list of all the nodes registered with the controller.
Output format: ``app_name,tier_name,node_name,host_name``
"""
from __future__ import print_function
from appd.cmdline import parse_argv
from appd.request import AppDynamicsClient
__author__ = '<NAME>'
__copyright__ = 'Copyright (c) 2013-2015 AppDynamics Inc.'
args = parse_argv()
c = AppDynamicsClient(args.url, args.username, args.password, args.account, args.verbose)
for app in c.get_applications():
for node in c.get_nodes(app.id):
print(','.join([app.name, node.tier_name, node.name, node.machine_name]))
| StarcoderdataPython |
3328291 | <reponame>duarteocarmo/CervicalCancer<gh_stars>1-10
from Project_Clean_data import raw
from Project_Clean_data import header
from matplotlib.pyplot import boxplot, xticks, ylabel, title, show
import numpy as np
from textwrap import wrap
# find integer columns
integer = []
for column in range(raw.shape[1]):
if np.array_equal(raw[:, column], raw[:, column].astype(bool)) is True:
continue
else:
integer.append(column)
# data and header with only integer attributes
raw_nobinary = raw[:, integer]
header_nobinary = header[integer]
# outlier detection
for column in range(raw_nobinary.shape[1]):
number_of_outliers = 0
vector = raw_nobinary[:, column]
minimum = np.amin(vector)
maximum = np.amax(vector)
q50 = np.percentile(vector, 50)
q75 = np.percentile(vector, 75)
q25 = np.percentile(vector, 25)
upper = min(maximum, q75 + 1.5 * (q75 - q25))
lower = max(minimum, q25 - 1.5 * (q75 - q25))
for row in range(raw_nobinary.shape[0]):
if raw_nobinary[row, column] > upper or raw_nobinary[row, column] < lower:
number_of_outliers += 1
print(header_nobinary[column], round(number_of_outliers * 100 / np.size(vector)), '% are outliers.')
# Wrap labels
labels = [ '\n'.join(wrap(l, 20)) for l in header_nobinary ]
# Box plot the data
boxplot(raw_nobinary, 0, 'r.')
xticks(range(1,np.size(header_nobinary)+1),labels,rotation=45, fontsize=6)
title('Cervical Cancer Analysis - Attributes')
show()
| StarcoderdataPython |
145272 | <gh_stars>0
from typing import Iterator, List, Optional
from django.db import models
from ufdl.annotation_utils.image_segmentation import annotations_iterator
from ufdl.core_app.exceptions import BadArgumentValue
from ufdl.core_app.models import Dataset, DatasetQuerySet
from ufdl.core_app.models.files import File
from wai.annotations.domain.image.segmentation import ImageSegmentationInstance
from ._SegmentationLayerImage import SegmentationLayerImage
class ImageSegmentationDatasetQuerySet(DatasetQuerySet):
pass
class ImageSegmentationDataset(Dataset):
# The labels of the dataset (new-line separated)
labels = models.TextField()
objects = ImageSegmentationDatasetQuerySet.as_manager()
@classmethod
def domain_code(cls) -> str:
return "is"
def can_merge(self, other) -> Optional[str]:
# Test any higher-priority conditions
super_reason = super().can_merge(other)
# If they failed, report them
if super_reason is not None:
return super_reason
assert isinstance(other, ImageSegmentationDataset)
# Make sure the other data-set has the same labels as us
return (
None
if self.get_labels() == other.get_labels() else
"Source data-set has different labels to target"
)
def merge_annotations(self, other, files):
for source_file, target_file in files:
# Delete any existing layers from the target file
self.annotations.for_file(target_file.filename).delete()
# Add the layers from the source file to this data-set
for layer in other.annotations.for_file(source_file.filename).all():
SegmentationLayerImage(
dataset=self,
filename=target_file.filename,
label=layer.label,
mask=layer.mask
).save()
def clear_annotations(self):
self.annotations.delete()
def delete_file(self, filename: str):
# Delete the file as usual
file = super().delete_file(filename)
# Remove the file's annotation layers as well
self.annotations.for_file(filename).delete()
return file
def get_annotations_iterator(self) -> Iterator[ImageSegmentationInstance]:
return annotations_iterator(
self.iterate_filenames(),
self.get_labels(),
self.get_layer,
self.get_file
)
def get_layer(self, filename: str, label: str) -> Optional[bytes]:
"""
Gets the layer mask for the given filename/label.
:param filename:
The file the mask is for.
:param label:
The label of the mask.
:return:
The mask data.
"""
# Make sure the filename is valid
self.has_file(filename, throw=True)
# Must be a valid label
if not self.has_label(label):
raise BadArgumentValue("set_layer", "label", label, str(self.get_labels()))
# Get the existing layer instance
layer = self.annotations.for_file(filename).for_label(label).first()
return (
layer.mask.get_data()
if layer is not None else
None
)
def set_layer(self, filename: str, label: str, mask: bytes):
"""
Adds/updates the mask for a layer of a particular file.
:param filename:
The name of the file the mask layer is for.
:param label:
The label for the layer.
:param mask:
The binary mask data.
"""
# Make sure the filename is valid
self.has_file(filename, throw=True)
# Must be a valid label
if not self.has_label(label):
raise BadArgumentValue("set_layer", "label", label, str(self.get_labels()))
# Create a file reference for the mask
mask_file = File.create(mask)
# Get the existing layer instance
layer = self.annotations.for_file(filename).for_label(label).first()
# If the layer already exists, update it's mask
if layer is not None:
original_mask_file = layer.mask
layer.mask = mask_file
original_mask_file.delete()
# If the layer doesn't exist already, create it
else:
layer = SegmentationLayerImage(
dataset=self,
filename=filename,
label=label,
mask=mask_file
)
# Save the new/updated layer
layer.save()
def get_labels(self) -> List[str]:
"""
Gets the canonically-ordered list of labels for this data-set.
:return:
The list of labels.
"""
return (
self.labels.split("\n")
if self.labels != "" else
[]
)
def set_labels(self, labels: List[str]):
"""
Sets the labels for this data-set.
:param labels:
The labels.
"""
assert len(labels) == len(set(labels)), f"Duplicate labels in: " + ", ".join(labels)
# Set the labels text field
self.labels = (
""
if len(labels) == 0 else
"\n".join(labels)
)
self.save(update_fields=["labels"])
# Remove any annotations that aren't in the label set
self.annotations.filter(~models.Q(label__in=labels)).delete()
def has_label(self, label: str) -> bool:
"""
Whether this data-set has a given label.
:param label:
The label to check for.
:return:
True if the label is valid, False if not.
"""
return label in self.get_labels()
| StarcoderdataPython |
3593839 | <reponame>CatTiger/vnpy<filename>venv/lib/python3.7/site-packages/tigeropen/quote/response/quote_brief_response.py
# -*- coding: utf-8 -*-
"""
Created on 2018/10/31
@author: gaoan
"""
import json
import six
from tigeropen.common.consts import TradingSession
from tigeropen.common.util.string_utils import get_string
from tigeropen.quote.domain.quote_brief import QuoteBrief, HourTrading
from tigeropen.common.response import TigerResponse
BRIEF_FIELD_MAPPINGS = {'latestPrice': 'latest_price', 'preClose': 'prev_close', 'secType': 'sec_type',
'timestamp': 'latest_time', 'askPrice': 'ask_price', 'askSize': 'ask_size',
'bidPrice': 'bid_price', 'bidSize': 'bid_size'}
class QuoteBriefResponse(TigerResponse):
def __init__(self):
super(QuoteBriefResponse, self).__init__()
self.briefs = []
self._is_success = None
def parse_response_content(self, response_content):
response = super(QuoteBriefResponse, self).parse_response_content(response_content)
if 'is_success' in response:
self._is_success = response['is_success']
if self.data:
data_json = json.loads(self.data)
if 'items' in data_json:
for item in data_json['items']:
brief = QuoteBrief()
for key, value in item.items():
if value is None:
continue
if isinstance(value, six.string_types):
value = get_string(value)
if key == 'hourTrading':
hour_trading = HourTrading()
for sub_key, sub_value in value.items():
if isinstance(sub_value, six.string_types):
sub_value = get_string(sub_value)
if sub_key == 'tag':
if sub_value == '盘前':
hour_trading.trading_session = TradingSession.PreMarket
elif sub_value == '盘后':
hour_trading.trading_session = TradingSession.AfterHours
else:
sub_tag = BRIEF_FIELD_MAPPINGS[
sub_key] if sub_key in BRIEF_FIELD_MAPPINGS else sub_key
if hasattr(hour_trading, sub_tag):
setattr(hour_trading, sub_tag, sub_value)
brief.hour_trading = hour_trading
else:
tag = BRIEF_FIELD_MAPPINGS[key] if key in BRIEF_FIELD_MAPPINGS else key
if hasattr(brief, tag):
setattr(brief, tag, value)
self.briefs.append(brief)
| StarcoderdataPython |
1787472 | ind0 = '' #master string
lis0 = [''] #paritioning substrings
lis00 = [] #master matrix of indices of lis0 alongside length of partitioned strings
counter = 0
for i in range(len(s)-1):
ind1 = ' ' #inner string
if (ord(s[i]) <= ord(s[i+1])):
ind1 = s[i]
lis0[counter] += s[i]
else:
lis00.append([counter,len(lis0[counter])])
counter += 1
lis0.append('')
ind0 += ind1
if ord(s[len(s)-2]) <= ord(s[len(s)-1]):
ind0 += s[len(s)-1]
else:
ind0 += ' '
ans = 1
for i in range(len(lis00)):
if lis00[i][1] >= ans:
ans = lis00[i][1]
value = 0
for i in range(len(lis00)):
if ans == lis00[i][1]:
value = lis00[i][0]
break
sub = lis0[value]
index = 0
for i in range(len(s) - len(sub) + 1):
if ( s[i:i+len(sub)] == sub ):
index = i
print("Longest substring in alphabetical order is:", s[index:index+len(sub)+1])
| StarcoderdataPython |
9760579 | <reponame>somnus0208/tinyftp
import socket
import os
import argparse
import struct
import tls
import sys
class ArgumentParserError(Exception): pass
class ThrowingArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise ArgumentParserError(message)
def exit(self, status=0, message=None):
pass
def command_cd(client, args):
tlv = tls.TLV(tls.REQ_W_DIR_CHANGE, args.param)
print(args.param)
client.send_tlv(tlv)
tlv = client.recv_tlv()
if tlv.tag == tls.RES_ERROR:
print('error: {}'.format(tlv.val))
def command_lcd(client, args):
nw_dir = client.working_dir
if os.path.isabs(args.param):
nw_dir = args.param
else:
nw_dir = os.path.join(nw_dir,args.param)
try:
os.listdir(nw_dir)
client.working_dir = nw_dir
except FileNotFoundError:
print('error: no such file or directory:{0}'.format(args.param))
def command_pwd(client, args):
tlv = tls.TLV(tls.REQ_W_DIR)
client.send_tlv(tlv)
tlv = client.recv_tlv()
if tlv.tag == tls.RES_ERROR:
print('error: {}'.format(tlv.val))
elif tlv.tag == tls.RES_CMD_OK:
print('remote:{0}'.format(tlv.val))
def command_lpwd(client, args):
print(client.working_dir)
def command_lst(client, args):
tlv = tls.TLV(tls.REQ_DIR)
client.send_tlv(tlv)
tlv = client.recv_tlv()
if tlv.tag == tls.RES_ERROR:
print('error:{}'.format(tlv.val))
elif tlv.tag == tls.RES_DIR:
print(tlv.val)
def command_put(args):
print("upload")
def command_get(client, args):
tlv = tls.TLV(tls.REQ_FILE,args.param)
client.send_tlv(tlv)
tlv = client.recv_tlv()
if tlv.tag == tls.RES_ERROR:
print('Error:{}'.format(tlv.val))
elif tlv.tag == tls.RES_FILE_BEG:
w_dir = client.working_dir
file_name = tlv.val
file_abs_name = os.path.join(w_dir,file_name)
file_object = open(file_abs_name, 'wb')
tlv = client.recv_tlv()
if tlv.tag == tls.RES_FILE_SIZE:
eval(tlv.val)
tlv = client.recv_tlv()
while tlv.tag!=tls.RES_FILE_END:
file_object.write(tlv.val)
tlv = client.recv_tlv()
file_object.close()
def command_cls(client, args):
client.send_tlv(tls.TLV(tls.REQ_CLS))
sys.exit()
if __name__=='__main__':
mainparser = argparse.ArgumentParser()
mainparser.add_argument('serverip')
addr = (mainparser.parse_args().serverip,9000)
client = tls.tlvsocket()
client.working_dir = os.getcwd()
client.connect(addr)
print('server {} connected'.format(addr[0]))
parser = ThrowingArgumentParser(description='tiny ftp client')
subparsers = parser.add_subparsers()
parser_pwd = subparsers.add_parser('close', help='close this connection')
parser_pwd.set_defaults(handler=command_cls)
parser_pwd = subparsers.add_parser('pwd', help='print name of working directory')
parser_pwd.set_defaults(handler=command_pwd)
parser_lpwd = subparsers.add_parser('lpwd', help='print name of local working directory')
parser_lpwd.set_defaults(handler=command_lpwd)
parser_cd = subparsers.add_parser('cd', help='change remote working directory')
parser_cd.add_argument('param',metavar='directory')
parser_cd.set_defaults(handler=command_cd)
parser_lcd = subparsers.add_parser('lcd', help='changes local working directory')
parser_lcd.add_argument('param',metavar='directory')
parser_lcd.set_defaults(handler=command_lcd)
parser_lst = subparsers.add_parser('list', help='returns information of working directory')
parser_lst.set_defaults(handler=command_lst)
parser_put = subparsers.add_parser('put', help='uploads file')
parser_put.add_argument('param',metavar='file')
parser_put.set_defaults(handler=command_put)
parser_get = subparsers.add_parser('get', help='downloads file')
parser_get.add_argument('param',metavar='file')
parser_get.set_defaults(handler=command_get)
while True:
sub_cmd = input('Tinyftp>>>')
try:
args = parser.parse_args(sub_cmd.split())
if hasattr(args, 'handler'):
args.handler(client, args)
except ArgumentParserError:
parser.print_help()
continue
| StarcoderdataPython |
1667587 | import pytest
from lunavl.sdk.errors.errors import LunaVLError
from lunavl.sdk.errors.exceptions import LunaSDKException
from lunavl.sdk.faceengine.setting_provider import DetectorType
from lunavl.sdk.image_utils.image import VLImage
from tests.base import BaseTestClass
from tests.resources import ONE_FACE
class TestEstimateGazeDirection(BaseTestClass):
"""
Test estimate gaze direction.
"""
@classmethod
def setup_class(cls):
super().setup_class()
cls.detector = cls.faceEngine.createFaceDetector(DetectorType.FACE_DET_DEFAULT)
cls.warper = cls.faceEngine.createFaceWarper()
cls.gazeEstimator = cls.faceEngine.createGazeEstimator()
cls.faceDetection = cls.detector.detectOne(VLImage.load(filename=ONE_FACE))
cls.warp = cls.warper.warp(faceDetection=cls.faceDetection)
@staticmethod
def validate_gaze_estimation(receivedDict: dict):
"""
Validate gaze estimation reply
"""
assert sorted(["pitch", "yaw"]) == sorted(receivedDict.keys())
for gaze in ("pitch", "yaw"):
assert isinstance(receivedDict[gaze], float)
assert -180 <= receivedDict[gaze] <= 180
def test_estimate_gaze_landmarks5(self):
"""
Test gaze estimator with landmarks 5
"""
landMarks5Transformation = self.warper.makeWarpTransformationWithLandmarks(self.faceDetection, "L5")
gazeEstimation = self.gazeEstimator.estimate(landMarks5Transformation, self.warp).asDict()
self.validate_gaze_estimation(gazeEstimation)
def test_estimate_gaze_landmarks68(self):
"""
Test gaze estimator with landmarks 68 (not supported by estimator)
"""
faceDetection = self.detector.detectOne(VLImage.load(filename=ONE_FACE), detect68Landmarks=True)
landMarks68Transformation = self.warper.makeWarpTransformationWithLandmarks(faceDetection, "L68")
with pytest.raises(TypeError):
self.gazeEstimator.estimate(landMarks68Transformation, self.warp)
def test_estimate_gaze_landmarks68_without_landmarks68_detection(self):
"""
Test gaze estimator with landmarks 68
"""
faceDetection = self.detector.detectOne(VLImage.load(filename=ONE_FACE), detect68Landmarks=False)
with pytest.raises(ValueError):
self.warper.makeWarpTransformationWithLandmarks(faceDetection, "L68")
def test_estimate_gaze_landmarks_wrong(self):
"""
Test gaze estimator with wrong landmarks
"""
with pytest.raises(ValueError):
self.warper.makeWarpTransformationWithLandmarks(self.faceDetection, "L10")
def test_estimate_gaze_without_transformation(self):
"""
Test gaze estimator without transformation
"""
faceDetection = self.detector.detectOne(VLImage.load(filename=ONE_FACE), detect68Landmarks=False)
with pytest.raises(LunaSDKException) as exceptionInfo:
self.gazeEstimator.estimate(faceDetection.landmarks5, self.warp)
self.assertLunaVlError(exceptionInfo, LunaVLError.InvalidInput)
| StarcoderdataPython |
4894470 | #!/usr/bin/python
import time
def fib(n):
return 1 if n <= 2 else fib(n-1) + fib(n-2)
cnt = 0
print 'Calculating Fib(40)...'
while 1:
t = time.time()
fib(40)
cnt += 1
print 'Iteration', cnt, ':', time.time()-t, 'Seconds'
| StarcoderdataPython |
1813417 | <filename>ocr_joplin_notes/cli.py
# -*- coding: utf-8 -*-
"""Console script for ocr_joplin_notes."""
import sys
import click
from .ocr_joplin_notes import (
set_language,
set_autorotation,
set_mode,
run_mode, set_add_previews,
)
from . import __version__
def parse_argument(arg):
"""Helper function for wild arguments"""
if arg in ["No", "N", "NO", "OFF", "off", "n", "no"]:
return "no"
else:
return "yes"
@click.command()
@click.option(
"--mode",
"mode",
default="",
help="""Specify the mode""",
)
@click.option(
"--tag",
"tag",
default=None,
help="""Specify the Joplin tag""",
)
@click.option(
"--exclude_tags",
"exclude_tags",
default=None,
multiple=True,
help="""Specify the Joplin tags to be excluded""",
)
@click.option(
"-l",
"--language",
"language",
default="eng",
help="""Specify the OCR Language. Refer to Tesseract's documentation found here:
https://github.com/tesseract-ocr/tesseract/wiki""",
)
@click.option(
"--add-previews",
"add_previews",
default="yes",
help="""Specify whether to add preview images to the note, when a PDF file is processed. """
"""Default = yes (specify 'no' to disable). """,
)
@click.option(
"--autorotation",
"autorotation",
default="yes",
help="""Specify whether to rotate images."""
""" Default = yes (pecify 'no' to disable). """,
)
@click.version_option(version=__version__)
def main(
mode="",
tag=None,
exclude_tags=None,
language="eng",
add_previews="yes",
autorotation="yes",
):
f""" Console script for ocr_joplin_notes.
ocr_joplin_nodes <mode>
"""
set_mode(mode)
set_language(language)
set_autorotation(parse_argument(autorotation))
set_add_previews(parse_argument(add_previews))
click.echo("Mode: " + mode)
if tag is not None:
click.echo("Tag: " + tag)
if exclude_tags is not None:
click.echo("Exclude Tags: " + str(exclude_tags))
click.echo("Language: " + language)
click.echo("Add previews: " + add_previews)
click.echo("Autorotation: " + autorotation)
res = run_mode(mode, tag, exclude_tags)
if res == 0:
click.echo("Finished")
return 0
else:
click.echo("Aborted")
return res
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| StarcoderdataPython |
5173086 | <filename>experiments/testing_env/models/ppo_entropy/model/src/model_ppo.py<gh_stars>0
import torch
import torch.nn as nn
class Model(torch.nn.Module):
def __init__(self, input_shape, outputs_count):
super(Model, self).__init__()
self.device = "cpu"
hidden_size = 64
self.layers_features = [
nn.Linear(input_shape[0], hidden_size),
nn.ReLU(),
]
self.layers_ext_value = [
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, 1)
]
self.layers_int_curiosity_value = [
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, 1)
]
self.layers_int_entropy_value = [
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, 1)
]
self.layers_policy = [
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, outputs_count)
]
for i in range(len(self.layers_features)):
if hasattr(self.layers_features[i], "weight"):
torch.nn.init.xavier_uniform_(self.layers_features[i].weight)
for i in range(len(self.layers_ext_value)):
if hasattr(self.layers_ext_value[i], "weight"):
torch.nn.init.xavier_uniform_(self.layers_ext_value[i].weight)
for i in range(len(self.layers_int_curiosity_value)):
if hasattr(self.layers_int_curiosity_value[i], "weight"):
torch.nn.init.xavier_uniform_(self.layers_int_curiosity_value[i].weight)
for i in range(len(self.layers_int_entropy_value)):
if hasattr(self.layers_int_entropy_value[i], "weight"):
torch.nn.init.xavier_uniform_(self.layers_int_entropy_value[i].weight)
for i in range(len(self.layers_policy)):
if hasattr(self.layers_policy[i], "weight"):
torch.nn.init.xavier_uniform_(self.layers_policy[i].weight)
self.model_features = nn.Sequential(*self.layers_features)
self.model_features.to(self.device)
self.model_ext_value = nn.Sequential(*self.layers_ext_value)
self.model_ext_value.to(self.device)
self.model_int_curiosity_value = nn.Sequential(*self.layers_int_curiosity_value)
self.model_int_curiosity_value.to(self.device)
self.model_int_entropy_value = nn.Sequential(*self.layers_int_entropy_value)
self.model_int_entropy_value.to(self.device)
self.model_policy = nn.Sequential(*self.layers_policy)
self.model_policy.to(self.device)
print("model_ppo")
print(self.model_features)
print(self.model_ext_value)
print(self.model_int_curiosity_value)
print(self.model_int_entropy_value)
print(self.model_policy)
print("\n\n")
def forward(self, state):
features = self.model_features(state)
policy = self.model_policy(features)
ext_value = self.model_ext_value(features)
int_curiosity_value = self.model_int_curiosity_value(features)
int_entropy_value = self.model_int_entropy_value(features)
return policy, ext_value, int_curiosity_value, int_entropy_value
def save(self, path):
print("saving ", path)
torch.save(self.model_features.state_dict(), path + "model_features.pt")
torch.save(self.model_ext_value.state_dict(), path + "model_ext_value.pt")
torch.save(self.model_int_curiosity_value.state_dict(), path + "model_int_curiosity_value.pt")
torch.save(self.model_int_entropy_value.state_dict(), path + "model_int_entropy_value.pt")
torch.save(self.model_policy.state_dict(), path + "model_policy.pt")
def load(self, path):
print("loading ", path)
self.model_features.load_state_dict(torch.load(path + "model_features.pt", map_location = self.device))
self.model_ext_value.load_state_dict(torch.load(path + "model_ext_value.pt", map_location = self.device))
self.model_int_curiosity_value.load_state_dict(torch.load(path + "model_int_curiosity_value.pt", map_location = self.device))
self.model_int_entropy_value.load_state_dict(torch.load(path + "model_int_entropy_value.pt", map_location = self.device))
self.model_policy.load_state_dict(torch.load(path + "model_policy.pt", map_location = self.device))
self.model_features.eval()
self.model_ext_value.eval()
self.model_int_curiosity_value.eval()
self.model_int_entropy_value.eval()
self.model_policy.eval()
def get_activity_map(self, state):
state_t = torch.tensor(state, dtype=torch.float32).detach().to(self.device).unsqueeze(0)
features = self.model_features(state_t)
features = features.reshape((1, 128, 6, 6))
upsample = nn.Upsample(size=(self.input_shape[1], self.input_shape[2]), mode='bicubic')
features = upsample(features).sum(dim = 1)
result = features[0].to("cpu").detach().numpy()
k = 1.0/(result.max() - result.min())
q = 1.0 - k*result.max()
result = k*result + q
return result
| StarcoderdataPython |
3341258 | <reponame>kupc25648/RL-Structure
'''
==================================================================
Frame structure environemnt file
This file contains environment for train agent using reinforcement learning
ENV : contains Game class
Game1 : REDUCE TOTAL SURFACE for Q-Learning , Double Q-Learning and Actor-Critic - 6 Actions
Game2 : REDUCE STRAIN ENERGY for Q-Learning , Double Q-Learning and Actor-Critic - 6 Actions
Game3 : REDUCE TOTAL SURFACE for DDPG - 6 Actions
Game4 : REDUCE STRAIN ENERGY for DDPG - 6 Actions
Game5 : REDUCE TOTAL SURFACE for MADDPG - 6 Actions
Game6 : REDUCE STRAIN ENERGY for MADDPG - 6 Actions
Adjustable parameter are under '研究室'
フレーム構造環境ファイル
このファイルには、強化学習を使用したトレーニングエージェントの環境が含まれています
ENV:ゲームクラスを含む
Game1 : Q-Learning、Double Q-Learning、Actor-Criticの総表面を減らす-6行動
Game2 : Q-Learning、Double Q-Learning、Actor-Criticの歪みエネルギーの削減-6行動
Game3 : DDPGの総表面を減らす-6行動
Game4 : DDPGの歪みエネルギーの削減-6行動
Game5 : MADDPGの総表面を減らす-6行動
Game6 : MADDPGの歪みエネルギーの削減-6行動
調整可能なパラメータは「研究室」の下にあります
==================================================================
'''
'''
======================================================
IMPORT PART
======================================================
'''
import math
import os
import datetime
import random
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as mtri
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
'''
======================================================
Helper functions
======================================================
'''
def state_data(i,j,self):
# Boundary Conditions ----------------------------
bc = 0
if (self.gen_model.n_u_name_div[i][j].res) == [0,0,0,0,0,0]:
bc = 1 # free
elif (self.gen_model.n_u_name_div[i][j].res) == [0,1,0,0,0,0]:
bc = 2 # roller x, z
elif (self.gen_model.n_u_name_div[i][j].res) == [1,1,1,0,0,0]:
bc = 3 # pin
elif (self.gen_model.n_u_name_div[i][j].res) == [1,1,1,1,1,1]:
bc = 4 # fixed
bc = bc/4
# Theta --------------------------------------------
try:
if i-1 >= 0:
n_up = (self.gen_model.n_u_name_div[i][j].coord[1] - self.gen_model.n_u_name_div[i-1][j].coord[1])/self.gen_model.span
else:
n_up = 0
except:
n_up = 0
try:
n_down = (self.gen_model.n_u_name_div[i][j].coord[1] - self.gen_model.n_u_name_div[i+1][j].coord[1])/self.gen_model.span
except:
n_down = 0
try:
if j-1 >= 0:
n_left = (self.gen_model.n_u_name_div[i][j].coord[1] - self.gen_model.n_u_name_div[i][j-1].coord[1])/self.gen_model.span
else:
n_left = 0
except:
n_left = 0
try:
n_right = (self.gen_model.n_u_name_div[i][j].coord[1] - self.gen_model.n_u_name_div[i][j+1].coord[1])/self.gen_model.span
except:
n_right = 0
# Rotation-x-i ------------------------------------
try:
dtxi = (self.gen_model.n_u_name_div[i][j].global_d[3][0]-self.dtxmin)/(self.dtxmax-self.dtxmin)
except:
dtxi = 0
# Rotation-z-i ------------------------------------
try:
dtzi = (self.gen_model.n_u_name_div[i][j].global_d[5][0]-self.dtzmin)/(self.dtzmax-self.dtzmin)
except:
dtzi =0
# Rotation-x-j ------------------------------------
try:
if i-1 >= 0:
dtxj_up = (self.gen_model.n_u_name_div[i-1][j].global_d[3][0]-self.dtxmin)/(self.dtxmax-self.dtxmin)
else:
dtxj_up = 0
except:
dtxj_up = 0
try:
dtxj_down = (self.gen_model.n_u_name_div[i+1][j].global_d[3][0]-self.dtxmin)/(self.dtxmax-self.dtxmin)
except:
dtxj_down = 0
try:
if j-1 >= 0:
dtxj_left = (self.gen_model.n_u_name_div[i][j-1].global_d[3][0]-self.dtxmin)/(self.dtxmax-self.dtxmin)
else:
dtxj_left = 0
except:
dtxj_left = 0
try:
dtxj_right = (self.gen_model.n_u_name_div[i][j+1].global_d[3][0]-self.dtxmin)/(self.dtxmax-self.dtxmin)
except:
dtxj_right = 0
# Rotation-z-j ------------------------------------
try:
if i-1 >= 0:
dtzj_up = (self.gen_model.n_u_name_div[i-1][j].global_d[5][0]-self.dtzmin)/(self.dtzmax-self.dtzmin)
else:
dtzj_up = 0
except:
dtzj_up = 0
try:
dtzj_down = (self.gen_model.n_u_name_div[i+1][j].global_d[5][0]-self.dtzmin)/(self.dtzmax-self.dtzmin)
except:
dtzj_down = 0
try:
if j-1 >= 0:
dtzj_left = (self.gen_model.n_u_name_div[i][j-1].global_d[5][0]-self.dtzmin)/(self.dtzmax-self.dtzmin)
else:
dtzj_left = 0
except:
dtzj_left = 0
try:
dtzj_right = (self.gen_model.n_u_name_div[i][j+1].global_d[5][0]-self.dtzmin)/(self.dtzmax-self.dtzmin)
except:
dtzj_right = 0
# Deformation-i ------------------------------------
try:
di = (self.gen_model.n_u_name_div[i][j].global_d[1][0]-self.dymin)/(self.dymax-self.dymin)
except:
di = 0
# Deformation-j ------------------------------------
try:
if i-1 >= 0:
dj_up = (self.gen_model.n_u_name_div[i-1][j].global_d[1][0]-self.dymin)/(self.dymax-self.dymin)
else:
dj_up = 0
except:
dj_up = 0
try:
dj_down = (self.gen_model.n_u_name_div[i+1][j].global_d[1][0]-self.dymin)/(self.dymax-self.dymin)
except:
dj_down = 0
try:
if j-1 >= 0:
dj_left = (self.gen_model.n_u_name_div[i][j-1].global_d[1][0]-self.dymin)/(self.dymax-self.dymin)
else:
dj_left = 0
except:
dj_left = 0
try:
dj_right = (self.gen_model.n_u_name_div[i][j+1].global_d[1][0]-self.dymin)/(self.dymax-self.dymin)
except:
dj_right = 0
# Zi/Zmax -------------------------------------------
geo = (self.gen_model.n_u_name_div[i][j].coord[1])/(self.max_y_val)
# PosXi/PosXmax -------------------------------------
pos1= i/self.num_x
# PosYi/PosYmax -------------------------------------
pos2= j/self.num_z
return dtxi,dtzi,dtxj_up,dtxj_down,dtxj_left,dtxj_right,dtzj_up,dtzj_down,dtzj_left,dtzj_right,di,dj_up,dj_down,dj_left,dj_right,n_up,n_down,n_left,n_right,pos1,pos2,bc,geo
'''
======================================================
CLASS PART
======================================================
'''
# Environment class, contain Game class. Do not change / 環境クラス、ゲームクラスを含みます。 変えないで
class ENV:
def __init__(self,game):
self.name = 'FRAME_ENV'
self.game = game
self.num_agents = game.num_agents
self.over = 0
#=======================
#State Action Reward Next_State Done
#=======================
self.state = self.game.state
self.action = self.game.action #get action from rl or data file
self.reward = self.game.reward
self.next_state = self.game.next_state
self.done = self.game.done
#=======================
#Output Action
#=======================
self.output = [] #output = [St,at,rt,St+1,Done]
def check_over(self):
if self.game.done_counter == 1:
self.over = 1
else:
pass
def reset(self):
self.over = 0
self.game.reset()
self.state = self.game.state
self.action = self.game.action
self.reward = self.game.reward
self.next_state = self.game.next_state
self.done = self.game.done
self.output = []
def gen_output(self):
'''
Output_list
1. replay buffer = [St,at,rt,St+1,Done]
2. other format(.txt)
3. render
'''
# reset output to empty list
for i in range(self.num_agents):
x = []
# output = [St,at,rt,St+1,Done] one replay buffer
x.append(self.state[-1])
x.append(self.action[-1])
x.append(self.reward[-1])
x.append(self.next_state[-1])
x.append(self.done[-1])
self.output.append(x)
def save_output(self):
# save replaybufferfile as txt, csv
pass
#=============================================================================
# GAME 1 '研究室'
class Game1:
def __init__(self,end_step,alpha,max_y_val,model,num_agents=1,render=0,tell_action=False):
self.name = 'GAME 1' # Name of the game / ゲームの名前
self.description = 'AGENT HAS 6 ACTIONS: MOVE NODE (UP DOWN), MOVE TO SURROUNDING NODES (LEFT RIGHT UP DOWN)' # Game's description / ゲームの説明
self.objective = 'REDUCE TOTAL SURFACE' # Game's objective / ゲームの目的
self.tell_action =tell_action # Print agent action in console /コンソールでエージェントの行動を印刷する
self.num_agents = num_agents # Amount of agents in the game / ゲーム内のエージェントの数
self.gen_model = model # Gen structural model used in the game / ゲームで使用されるGen構造モデル
self.model = model.model # Structural model used in the game / ゲームで使用される構造モデル
self.num_x = model.num_x # Amount of Structural model's span in x axis (horizontal) / X軸での構造モデルのスパンの量(水平)
self.num_z = model.num_z # Amount of Structural model's span in z axis (horizontal) / Z軸での構造モデルのスパンの量(水平)
self.render = render # Render after each step / 各ステップ後にレンダリング
self.game_step = 1 # Game initial step / ゲームの最初のステップ
self.game_type = 0 # Game's state type / ゲームの状態タイプ
self.end_step = end_step # Game final step / ゲームの最終ステップ
self.alpha = alpha # Magnitude for agents to adjust structure as a factor of Structural model's span / エージェントが構造モデルのスパンの要素として構造を調整するための大きさ
self.y_step = self.alpha*self.gen_model.span # Magnitude for agents to adjust structure(m) / エージェントが構造を調整するための大きさ(m)
self.state = [] # Game state / ゲームの状態
self.action = [] # Game action / ゲームの行動
self.reward = [] # Game reward for each agent / 各エージェントのゲーム報酬
self.next_state = [] # Game next state / ゲームの次の状態
self.done = [] # Game over counter / ゲームオーバーカウンター
self.doing = [] # List of position(x,z) in the structure of each agent / 各エージェントの構造内のposition(x、z)のリスト
for i in range(self.num_agents): # Initialize starting position of each structure / 各構造の開始位置を初期化
self.doing.append([0,0])
self.metagrid = [] # 2-D Array of data in each structural node / 各構造ノードのデータの2次元配列
for i in range(self.num_z): # Initialize structural data array / 構造データ配列を初期化する
self.metagrid.append([])
for j in range(self.num_x):
self.metagrid[-1].append([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]) # Maximum num_state in this suite is 23 / このスイートの最大num_stateは23です
self.xmax = 0 # Maximum x coordinate value in this structural model (horizontal) / この構造モデルの最大x座標値(水平)
self.xmin = 0 # Minimum x coordinate value in this structural model (horizontal) / この構造モデル(水平)の最小x座標値
self.ymax = 0 # Maximum y coordinate value in this structural model (vertical) / この構造モデルのY座標の最大値(垂直)
self.ymin = 0 # Minimum y coordinate value in this structural model (vertical) / この構造モデルのY座標の最小値(垂直)
self.zmax = 0 # Maximum z coordinate value in this structural model (horizontal) / この構造モデルの最大Z座標値(水平)
self.zmin = 0 # Minimum z coordinate value in this structural model (horizontal) / この構造モデルの最小Z座標値(水平)
self.sval = 0.001 # small noise / 小さなノイズ
self.dymax = 0
self.dymin = 0
self.sdyval = 0
self.dmax0 = 0
self.dmaxset = 0
self.dtxmax = 0
self.dtxmin = 0
self.dtymax = 0
self.dtymin = 0
self.dtzmax = 0
self.dtzmin = 0
self.dtxmax0 = 0
self.dtymax0 = 0
self.dtzmax0 = 0
self.max_y_val = max_y_val # Maximum y coordinate value in this structural model for this game (vertical) / このゲームのこの構造モデルの最大y座標値(垂直)
#**********
self.int_strain_e = 0 # Initial Total length for this game / このゲームの初期の全長
self.strain_e = 0 # Current Total length for this game / このゲームの現在の合計の長さ
self.next_strain_e = 0 # Total length after agents do actions. Used for calculating reward / エージェントがアクションを実行した後の全長。 報酬の計算に使用されます
#**********
self.reward_counter = [] # List of reward of each agent / 各エージェントの報酬一覧
for i in range(self.num_agents): # Initialize reward for each agent / 各エージェントの報酬を初期化する
self.reward.append(0)
self.reward_counter.append(0)
self.done_counter = 0 # Counter for game end / ゲーム終了のカウンター
# Print out game properties at beginning of the game / ゲームの開始時にゲームのプロパティを印刷する
print('-------------------------------------------------------')
print(self.description)
print(self.objective)
print('GAME WILL BE ENDED AFTER {} STEP'.format(self.end_step))
print('-------------------------------------------------------')
# Function to set Game's state type / ゲームの状態タイプを設定する関数
def set_gametype(self,game_type):
self.game_type = game_type
# Fuction to update structural data array / 構造化データ配列を更新する関数
def _update_metagrid(self):
# update structure
self.model.restore()
self.model.gen_all()
xlist = []
ylist = []
zlist = []
dylist = []
dtxlist = []
dtylist = []
dtzlist = []
for i in range(len(self.model.nodes)):
xlist.append(self.model.nodes[i].coord[0])
ylist.append(self.model.nodes[i].coord[1])
zlist.append(self.model.nodes[i].coord[2])
dylist.append(self.model.nodes[i].global_d[1][0])
dtxlist.append(self.model.nodes[i].global_d[3][0])
dtylist.append(self.model.nodes[i].global_d[4][0])
dtzlist.append(self.model.nodes[i].global_d[5][0])
self.xmax = max(xlist)
self.xmin = min(xlist)
self.ymax = max(ylist)
self.ymin = min(ylist)
self.zmax = max(zlist)
self.zmin = min(zlist)
self.sdyval = self.sval*self.dymin
self.dtxmax = max(dtxlist)
self.dtxmin = min(dtxlist)
self.dtymax = max(dtylist)
self.dtymin = min(dtylist)
self.dtzmax = max(dtzlist)
self.dtzmin = min(dtzlist)
self.dymax = max(dylist)
self.dymin = min(dylist)
if self.dmaxset == 0:
self.dmax0 = abs(min(dylist))
self.dtxmax0 = max([abs(min(dtxlist)),abs(max(dtxlist))])
self.dtymax0 = max([abs(min(dtylist)),abs(max(dtylist))])
self.dtzmax0 = max([abs(min(dtzlist)),abs(max(dtzlist))])
self.dmaxset = 1
for i in range(self.num_z):
for j in range(self.num_x):
dtxi,dtzi,dtxj_up,dtxj_down,dtxj_left,dtxj_right,dtzj_up,dtzj_down,dtzj_left,dtzj_right,di,dj_up,dj_down,dj_left,dj_right,n_up,n_down,n_left,n_right,pos1,pos2,bc,geo = state_data(i,j,self)
'''
self.metagrid[i][j] = [[dtxi],[dtzi],[dtxj_up],[dtxj_down],[dtxj_left],[dtxj_right],[dtzj_up],[dtzj_down],[dtzj_left],[dtzj_right],[di],[dj_up],[dj_down],[dj_left],[dj_right],[n_up],[n_down],[n_left],[n_right],[pos1],[pos2],[bc],[geo]]
'''
self.metagrid[i][j] = [
[n_up],[n_down],[n_left],[n_right],
[pos1],[pos2],
[geo]
]
# Function to calculate value use in the reward system / 報酬制度での価値利用を計算する機能
def _game_gen_state_condi(self):
self.gen_model.gen_surface1() # Calculate total surface / 総表面積を計算する
self.strain_e = self.gen_model.surface_1 # Current total surface of this structure / この構造の総表面積
if self.game_step == 1: # Initial total length of this structure / この構造の初期の全長
self.int_strain_e = self.gen_model.surface_1
else:
pass
# Function to initialize state / 状態を初期化する関数
def _game_get_1_state(self,do):
self._update_metagrid() # update structural data array / 構造データ配列を更新する
# do = [i,j]
# metagrid[z,x]
# Check game type to generate state from structural data array / 構造データ配列から状態を生成するゲームタイプをチェックしてください
x = self.metagrid[do[0]][do[1]]
state = np.array(x) # state / 状態
return state
# Function to generate next state / 次の状態を生成する関数
def _game_get_next_state(self,do,action,i=0):
num = [action[0][0],action[0][1],action[0][2],action[0][3],action[0][4],action[0][5]]
num = num.index(max(num)) # Find maximum index of the action receive from Neural Network / ニューラルネットワークから受け取るアクションの最大インデックスを見つける
# next_state = f(action)
# Interpretation of action index / 行動のインデックスの解釈
if num == 0: # Adjust this node by moving up in the magnitude of y_step / y_stepの大きさを上に移動して、このノードを調整します
if self.gen_model.n_u_name_div[do[0]][do[1]].res[1] !=1:
if self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] + self.y_step <= self.max_y_val:
self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] += self.y_step
else:
pass
else:
pass
elif num == 1: # Adjust this node by moving down in the magnitude of y_step / y_stepの大きさを下に移動して、このノードを調整します
if self.gen_model.n_u_name_div[do[0]][do[1]].res[1] != 1:
if self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] - self.y_step >= 0:
self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] -= self.y_step
else:
pass
else:
pass
elif num == 2: # Agent move to other node to the right(move right x+1) / #エージェントは他のノードに右に移動します(右にx + 1移動)
# do[z,x]
if (do[1]+1 != (len(self.gen_model.n_u_name_div[0]))):
self.doing[0][1] = do[1]+1
else:
pass
elif num == 3: # Agent move to other node to the left(move left x-1) / エージェントは他のノードに左に移動します(左に移動x-1)
# do[z,x]
if (do[1] != 0):
self.doing[0][1] = do[1]-1
else:
pass
elif num == 4: # Agent move to other node to the upper(move up z-1) / エージェントが他のノードに移動します(上に移動z-1)
# do[z,x]
if (do[0] != 0):
self.doing[0][0] = do[0]-1
else:
pass
elif num == 5: # Agent move to other node to the lower(move down z+1) / エージェントは他のノードに移動します(z + 1に移動)
# do[z,x]
if (do[0]+1 != (len(self.gen_model.n_u_name_div))):
self.doing[0][0] = do[0]+1
else:
pass
announce = ['z_up','z_down','move right','move left','move up','move down'] # list of actions / 行動のリスト
if self.tell_action == True:
print(announce[num-1]) # print out action if tell_action is Trues / tell_actionがTrueの場合、行動を出力します
self._update_metagrid() # update structural data array / 構造データ配列を更新する
# Check game type to generate state from structural data array / 構造データ配列から状態を生成するゲームタイプをチェックしてください
x = self.metagrid[self.doing[i][0]][self.doing[i][1]]
next_state = np.array(x) # next_state / 次の状態
return next_state
# Function to calculate value use in the reward system / 報酬制度での価値利用を計算する機能
def _gen_gen_reward_condition(self):
self.gen_model.gen_surface1() # Calculate next state's total surface / 次の状態の総表面積を計算する
self.next_strain_e = self.gen_model.surface_1# Total surface of this structure in the next_state after agents do actions / エージェントが行動を実行した後のnext_state内のこの構造の総表面積
# Function to calculate reward for each agent / 各エージェントの報酬を計算する機能
def _game_get_reward(self,agent):
self.reward[agent] += 1000*(self.strain_e[0]-self.next_strain_e[0])/(self.int_strain_e[0]) # Reward rule / 報酬規定
if self.game_step == self.end_step: # Check if game is end / ゲームが終了したかどうかを確認する
self.done_counter = 1
return self.reward[agent],self.done_counter
# Function to reset every values and prepare for the next game / すべての値をリセットして次のゲームに備える機能
def reset(self):
self.state = [] # Game state / ゲームの状態
self.action = [] # Game action / ゲームの行動
self.reward = [] # Game reward for each agent / 各エージェントのゲーム報酬
for i in range(self.num_agents):
self.reward.append(0)
self.next_state = [] # Game next state / ゲームの次の状態
self.done = [] # Game over counter / ゲームオーバーカウンター
self.doing = [] # List of position(x,z) in the structure of each agent / 各エージェントの構造内のposition(x、z)のリスト
for i in range(self.num_agents): # Initialize starting position of each structure / 各構造の開始位置を初期化
self.doing.append([0,0])
self.game_step = 1 # Game initial step / ゲームの最初のステップ
self.xmax = 0 # Maximum x coordinate value in this structural model (horizontal) / この構造モデルの最大x座標値(水平)
self.xmin = 0 # Minimum x coordinate value in this structural model (horizontal) / この構造モデル(水平)の最小x座標値
self.ymax = 0 # Maximum y coordinate value in this structural model (vertical) / この構造モデルのY座標の最大値(垂直)
self.ymin = 0 # Minimum y coordinate value in this structural model (vertical) / この構造モデルのY座標の最小値(垂直)
self.zmax = 0 # Maximum z coordinate value in this structural model (horizontal) / この構造モデルの最大Z座標値(水平)
self.zmin = 0 # Minimum z coordinate value in this structural model (horizontal) / この構造モデルの最小Z座標値(水平)
self.sval = 0.001 # small noise / 小さなノイズ
self.dymax = 0
self.dymin = 0
self.sdyval = 0
self.dmax0 = 0
self.dmaxset = 0
self.dtxmax = 0
self.dtxmin = 0
self.dtymax = 0
self.dtymin = 0
self.dtzmax = 0
self.dtzmin = 0
self.dtxmax0 = 0
self.dtymax0 = 0
self.dtzmax0 = 0
#**********
self.int_strain_e = 0 # Initial Total length for this game / このゲームの初期の全長
self.strain_e = 0 # Current Total length for this game / このゲームの現在の合計の長さ
self.next_strain_e = 0 # Total length after agents do actions. Used for calculating reward / エージェントがアクションを実行した後の全長。 報酬の計算に使用されます
#**********
self.reward_counter = [] # List of reward of each agent / 各エージェントの報酬一覧
for i in range(self.num_agents): # Initialize reward for each agent / 各エージェントの報酬を初期化する
self.reward.append(0)
self.reward_counter.append(0)
self.done_counter = 0 # Counter for game end / ゲーム終了のカウンター
# Function change state to next_state / 関数は状態を次の状態に変更します
def step(self):
self.state = self.next_state
self.action = [] # Reset List of Action for each agent / 各エージェントのアクションリストをリセット
for i in range(len(self.reward)): # Reset List of Reward for each agent / 各エージェントの報酬リストをリセット
self.reward[i] = 0
self.next_state = [] # Reset List of next state for each agent / 各エージェントの次の状態のリストをリセット
self.done = [] # Reset List of game over counter / ゲームオーバーカウンターのリストをリセット
self.game_step += 1 # Increase game step counter / ゲームのステップカウンターを増やす
#=============================================================================
# GAME 2 '研究室'
class Game2:
def __init__(self,end_step,alpha,max_y_val,model,num_agents=1,render=0,tell_action=False):
self.name = 'GAME 2' # Name of the game / ゲームの名前
self.description = 'AGENT HAS 6 ACTIONS: MOVE NODE (UP DOWN), MOVE TO SURROUNDING NODES (LEFT RIGHT UP DOWN)' # Game's description / ゲームの説明
self.objective = 'REDUCE STRAIN ENERGY' # Game's objective / ゲームの目的
self.tell_action =tell_action # Print agent action in console /コンソールでエージェントの行動を印刷する
self.num_agents = num_agents # Amount of agents in the game / ゲーム内のエージェントの数
self.gen_model = model # Gen structural model used in the game / ゲームで使用されるGen構造モデル
self.model = model.model # Structural model used in the game / ゲームで使用される構造モデル
self.num_x = model.num_x # Amount of Structural model's span in x axis (horizontal) / X軸での構造モデルのスパンの量(水平)
self.num_z = model.num_z # Amount of Structural model's span in z axis (horizontal) / Z軸での構造モデルのスパンの量(水平)
self.render = render # Render after each step / 各ステップ後にレンダリング
self.game_step = 1 # Game initial step / ゲームの最初のステップ
self.game_type = 0 # Game's state type / ゲームの状態タイプ
self.end_step = end_step # Game final step / ゲームの最終ステップ
self.alpha = alpha # Magnitude for agents to adjust structure as a factor of Structural model's span / エージェントが構造モデルのスパンの要素として構造を調整するための大きさ
self.y_step = self.alpha*self.gen_model.span # Magnitude for agents to adjust structure(m) / エージェントが構造を調整するための大きさ(m)
self.state = [] # Game state / ゲームの状態
self.action = [] # Game action / ゲームの行動
self.reward = [] # Game reward for each agent / 各エージェントのゲーム報酬
self.next_state = [] # Game next state / ゲームの次の状態
self.done = [] # Game over counter / ゲームオーバーカウンター
self.doing = [] # List of position(x,z) in the structure of each agent / 各エージェントの構造内のposition(x、z)のリスト
for i in range(self.num_agents): # Initialize starting position of each structure / 各構造の開始位置を初期化
self.doing.append([0,0])
self.metagrid = [] # 2-D Array of data in each structural node / 各構造ノードのデータの2次元配列
for i in range(self.num_z): # Initialize structural data array / 構造データ配列を初期化する
self.metagrid.append([])
for j in range(self.num_x):
self.metagrid[-1].append([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]) # Maximum num_state in this suite is 23 / このスイートの最大num_stateは23です
self.xmax = 0 # Maximum x coordinate value in this structural model (horizontal) / この構造モデルの最大x座標値(水平)
self.xmin = 0 # Minimum x coordinate value in this structural model (horizontal) / この構造モデル(水平)の最小x座標値
self.ymax = 0 # Maximum y coordinate value in this structural model (vertical) / この構造モデルのY座標の最大値(垂直)
self.ymin = 0 # Minimum y coordinate value in this structural model (vertical) / この構造モデルのY座標の最小値(垂直)
self.zmax = 0 # Maximum z coordinate value in this structural model (horizontal) / この構造モデルの最大Z座標値(水平)
self.zmin = 0 # Minimum z coordinate value in this structural model (horizontal) / この構造モデルの最小Z座標値(水平)
self.sval = 0.001 # small noise / 小さなノイズ
self.dymax = 0
self.dymin = 0
self.sdyval = 0
self.dmax0 = 0
self.dmaxset = 0
self.dtxmax = 0
self.dtxmin = 0
self.dtymax = 0
self.dtymin = 0
self.dtzmax = 0
self.dtzmin = 0
self.dtxmax0 = 0
self.dtymax0 = 0
self.dtzmax0 = 0
self.max_y_val = max_y_val # Maximum y coordinate value in this structural model for this game (vertical) / このゲームのこの構造モデルの最大y座標値(垂直)
#**********
self.int_strain_e = 0 # Initial strain energy for this game / このゲームの初期ひずみエネルギー
self.strain_e = 0 # Current strain energy for this game / このゲームの現在のひずみエネルギー
self.next_strain_e = 0 # Strain energy after agents do actions. Used for calculating reward / エージェントがアクションを実行した後のエネルギーのひずみ。 報酬の計算に使用されます
#**********
self.reward_counter = [] # List of reward of each agent / 各エージェントの報酬一覧
for i in range(self.num_agents): # Initialize reward for each agent / 各エージェントの報酬を初期化する
self.reward.append(0)
self.reward_counter.append(0)
self.done_counter = 0 # Counter for game end / ゲーム終了のカウンター
# Print out game properties at beginning of the game / ゲームの開始時にゲームのプロパティを印刷する
print('-------------------------------------------------------')
print(self.description)
print(self.objective)
print('GAME WILL BE ENDED AFTER {} STEP'.format(self.end_step))
print('-------------------------------------------------------')
# Function to set Game's state type / ゲームの状態タイプを設定する関数
def set_gametype(self,game_type):
self.game_type = game_type
# Fuction to update structural data array / 構造化データ配列を更新する関数
def _update_metagrid(self):
# update structure
self.model.restore()
self.model.gen_all()
xlist = []
ylist = []
zlist = []
dylist = []
dtxlist = []
dtylist = []
dtzlist = []
for i in range(len(self.model.nodes)):
xlist.append(self.model.nodes[i].coord[0])
ylist.append(self.model.nodes[i].coord[1])
zlist.append(self.model.nodes[i].coord[2])
dylist.append(self.model.nodes[i].global_d[1][0])
dtxlist.append(self.model.nodes[i].global_d[3][0])
dtylist.append(self.model.nodes[i].global_d[4][0])
dtzlist.append(self.model.nodes[i].global_d[5][0])
self.xmax = max(xlist)
self.xmin = min(xlist)
self.ymax = max(ylist)
self.ymin = min(ylist)
self.zmax = max(zlist)
self.zmin = min(zlist)
self.sdyval = self.sval*self.dymin
self.dtxmax = max(dtxlist)
self.dtxmin = min(dtxlist)
self.dtymax = max(dtylist)
self.dtymin = min(dtylist)
self.dtzmax = max(dtzlist)
self.dtzmin = min(dtzlist)
self.dymax = max(dylist)
self.dymin = min(dylist)
if self.dmaxset == 0:
self.dmax0 = abs(min(dylist))
self.dtxmax0 = max([abs(min(dtxlist)),abs(max(dtxlist))])
self.dtymax0 = max([abs(min(dtylist)),abs(max(dtylist))])
self.dtzmax0 = max([abs(min(dtzlist)),abs(max(dtzlist))])
self.dmaxset = 1
for i in range(self.num_z):
for j in range(self.num_x):
dtxi,dtzi,dtxj_up,dtxj_down,dtxj_left,dtxj_right,dtzj_up,dtzj_down,dtzj_left,dtzj_right,di,dj_up,dj_down,dj_left,dj_right,n_up,n_down,n_left,n_right,pos1,pos2,bc,geo = state_data(i,j,self)
self.metagrid[i][j] = [[dtxi],[dtzi],[dtxj_up],[dtxj_down],[dtxj_left],[dtxj_right],[dtzj_up],[dtzj_down],[dtzj_left],[dtzj_right],[di],[dj_up],[dj_down],[dj_left],[dj_right],[n_up],[n_down],[n_left],[n_right],[pos1],[pos2],[bc],[geo]]
'''
self.metagrid[i][j] = [
[n_up],[n_down],[n_left],[n_right],
[pos1],[pos2],
[geo]
]
'''
# Function to calculate value use in the reward system / 報酬制度での価値利用を計算する機能
def _game_gen_state_condi(self):
self.model.restore() # Reset structural model's values / 構造モデルの値をリセットする
self.model.gen_all() # Calculate total length / 全長を計算する
self.strain_e = self.model.U_full # Current total length of this structure / この構造の現在の全長
if self.game_step == 1: # Initial total length of this structure / この構造の初期の全長
self.int_strain_e = self.model.U_full
else:
pass
# Function to initialize state / 状態を初期化する関数
def _game_get_1_state(self,do):
self._update_metagrid() # update structural data array / 構造データ配列を更新する
# do = [i,j]
# metagrid[z,x]
# Check game type to generate state from structural data array / 構造データ配列から状態を生成するゲームタイプをチェックしてください
x = self.metagrid[do[0]][do[1]]
state = np.array(x)# state
return state
# Function to generate next state / 次の状態を生成する関数
def _game_get_next_state(self,do,action,i=0):
num = [action[0][0],action[0][1],action[0][2],action[0][3],action[0][4],action[0][5]]
num = num.index(max(num)) # Find maximum index of the action receive from Neural Network / ニューラルネットワークから受け取るアクションの最大インデックスを見つける
# next_state = f(action)
# Interpretation of action index / 行動のインデックスの解釈
if num == 0: # Adjust this node by moving up in the magnitude of y_step / y_stepの大きさを上に移動して、このノードを調整します
if self.gen_model.n_u_name_div[do[0]][do[1]].res[1] !=1:
if self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] + self.y_step <= self.max_y_val:
self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] += self.y_step
else:
pass
else:
pass
elif num == 1: # Adjust this node by moving down in the magnitude of y_step / y_stepの大きさを下に移動して、このノードを調整します
if self.gen_model.n_u_name_div[do[0]][do[1]].res[1] != 1:
if self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] - self.y_step >= 0:
self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] -= self.y_step
else:
pass
else:
pass
elif num == 2: # Agent move to other node to the right(move right x+1) / #エージェントは他のノードに右に移動します(右にx + 1移動)
# do[z,x]
if (do[1]+1 != (len(self.gen_model.n_u_name_div[0]))):
self.doing[0][1] = do[1]+1
else:
pass
elif num == 3: # Agent move to other node to the left(move left x-1) / エージェントは他のノードに左に移動します(左に移動x-1)
# do[z,x]
if (do[1] != 0):
self.doing[0][1] = do[1]-1
else:
pass
elif num == 4: # Agent move to other node to the upper(move up z-1) / エージェントが他のノードに移動します(上に移動z-1)
# do[z,x]
if (do[0] != 0):
self.doing[0][0] = do[0]-1
else:
pass
elif num == 5: # Agent move to other node to the lower(move down z+1) / エージェントは他のノードに移動します(z + 1に移動)
# do[z,x]
if (do[0]+1 != (len(self.gen_model.n_u_name_div))):
self.doing[0][0] = do[0]+1
else:
pass
announce = ['z_up','z_down','move right','move left','move up','move down'] # list of actions / 行動のリスト
if self.tell_action == True:
print(announce[num-1]) # print out action if tell_action is Trues / tell_actionがTrueの場合、行動を出力します
self._update_metagrid() # update structural data array / 構造データ配列を更新する
# Check game type to generate state from structural data array / 構造データ配列から状態を生成するゲームタイプをチェックしてください
x = self.metagrid[self.doing[i][0]][self.doing[i][1]]
next_state = np.array(x) # next_state / 次の状態
return next_state
# Function to calculate value use in the reward system / 報酬制度での価値利用を計算する機能
def _gen_gen_reward_condition(self):
# Calculate next state total length / 次の状態の全長を計算する
self.model.restore() # Reset structural model's values / 構造モデルの値をリセットする
self.model.gen_all() # Calculate Strain energy / ひずみエネルギーを計算する
self.next_strain_e = self.model.U_full # Strain energy of this structure in the next_state after agents do actions / エージェントがアクションを実行した後の次の状態におけるこの構造のひずみエネルギー
# Function to calculate reward for each agent / 各エージェントの報酬を計算する機能
def _game_get_reward(self,agent):
self.reward[agent] += 1000*(self.strain_e[0]-self.next_strain_e[0])/(self.int_strain_e[0]) # Reward rule / 報酬規定
if self.game_step == self.end_step: # Check if game is end / ゲームが終了したかどうかを確認する
self.done_counter = 1
return self.reward[agent],self.done_counter
# Function to reset every values and prepare for the next game / すべての値をリセットして次のゲームに備える機能
def reset(self):
self.state = [] # Game state / ゲームの状態
self.action = [] # Game action / ゲームの行動
self.reward = [] # Game reward for each agent / 各エージェントのゲーム報酬
for i in range(self.num_agents):
self.reward.append(0)
self.next_state = [] # Game next state / ゲームの次の状態
self.done = [] # Game over counter / ゲームオーバーカウンター
self.doing = [] # List of position(x,z) in the structure of each agent / 各エージェントの構造内のposition(x、z)のリスト
for i in range(self.num_agents): # Initialize starting position of each structure / 各構造の開始位置を初期化
self.doing.append([0,0])
self.game_step = 1 # Game initial step / ゲームの最初のステップ
self.xmax = 0 # Maximum x coordinate value in this structural model (horizontal) / この構造モデルの最大x座標値(水平)
self.xmin = 0 # Minimum x coordinate value in this structural model (horizontal) / この構造モデル(水平)の最小x座標値
self.ymax = 0 # Maximum y coordinate value in this structural model (vertical) / この構造モデルのY座標の最大値(垂直)
self.ymin = 0 # Minimum y coordinate value in this structural model (vertical) / この構造モデルのY座標の最小値(垂直)
self.zmax = 0 # Maximum z coordinate value in this structural model (horizontal) / この構造モデルの最大Z座標値(水平)
self.zmin = 0 # Minimum z coordinate value in this structural model (horizontal) / この構造モデルの最小Z座標値(水平)
self.sval = 0.001 # small noise / 小さなノイズ
self.dymax = 0
self.dymin = 0
self.sdyval = 0
self.dmax0 = 0
self.dmaxset = 0
self.dtxmax = 0
self.dtxmin = 0
self.dtymax = 0
self.dtymin = 0
self.dtzmax = 0
self.dtzmin = 0
self.dtxmax0 = 0
self.dtymax0 = 0
self.dtzmax0 = 0
#**********
self.int_strain_e = 0 # Initial strain energy for this game / このゲームの初期ひずみエネルギー
self.strain_e = 0 # Current strain energy for this game / このゲームの現在のひずみエネルギー
self.next_strain_e = 0 # Strain energy after agents do actions. Used for calculating reward / エージェントがアクションを実行した後のエネルギーのひずみ。 報酬の計算に使用されます
#**********
self.reward_counter = [] # List of reward of each agent / 各エージェントの報酬一覧
for i in range(self.num_agents): # Initialize reward for each agent / 各エージェントの報酬を初期化する
self.reward.append(0)
self.reward_counter.append(0)
self.done_counter = 0 # Counter for game end / ゲーム終了のカウンター
# Function change state to next_state / 関数は状態を次の状態に変更します
def step(self):
self.state = self.next_state
self.action = [] # Reset List of Action for each agent / 各エージェントのアクションリストをリセット
for i in range(len(self.reward)): # Reset List of Reward for each agent / 各エージェントの報酬リストをリセット
self.reward[i] = 0
self.next_state = [] # Reset List of next state for each agent / 各エージェントの次の状態のリストをリセット
self.done = [] # Reset List of game over counter / ゲームオーバーカウンターのリストをリセット
self.game_step += 1 # Increase game step counter / ゲームのステップカウンターを増やす
#=============================================================================
# GAME 3
class Game3:
def __init__(self,end_step,alpha,max_y_val,model,num_agents=1,render=0,tell_action=False):
self.name = 'GAME 3'
self.description = 'AGENT HAS 2 SUB ACTIONS: MOVE NODE (UP DOWN), MOVE TO SURROUNDING NODES (LEFT RIGHT UP DOWN)'
self.objective = 'REDUCE TOTAL SURFACE'
self.tell_action =tell_action
self.num_agents = num_agents
self.gen_model = model
self.model = model.model
self.num_x = model.num_x
self.num_z = model.num_z
self.render = render # if render==0 no render
self.game_step = 1
self.game_type = 0
self.end_step = end_step # when will the game end
self.alpha = alpha # magnitude of adjusting node as a factor of span
self.y_step = self.alpha*self.gen_model.span
#=======================
#State Action'
# for MADDPG
#=======================
self.state = []
self.action = [] #get action from rl or data file
self.reward = []
self.next_state = []
self.done = []
#=======================
# Game rules
#=======================
self.doing = [] # list of doing node of each agent
self.doing = [[1,1],[len(self.gen_model.n_u_name_div)-2,len(self.gen_model.n_u_name_div[0])-2],
[1,len(self.gen_model.n_u_name_div[0])-2],
[len(self.gen_model.n_u_name_div)-2,1]
]
self.metagrid = []
for i in range(self.num_z):
self.metagrid.append([])
for j in range(self.num_x):
self.metagrid[-1].append([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]) # Maximum num_state in this suite is 23 / このスイートの最大num_stateは23です
self.xmax = 0
self.xmin = 0
self.ymax = 0
self.ymin = 0
self.zmax = 0
self.zmin = 0
self.sval = 0.001 # small noise
# =========================================
# deformation
# =========================================
self.dymax = 0
self.dymin = 0
self.sdyval = 0
self.dmax0 = 0
self.dmaxset = 0
self.dtxmax = 0
self.dtxmin = 0
self.dtymax = 0
self.dtymin = 0
self.dtzmax = 0
self.dtzmin = 0
self.dtxmax0 = 0
self.dtymax0 = 0
self.dtzmax0 = 0
self.max_y_val = max_y_val
self.bound = [self.y_step,self.y_step,1,1,1,1] #Adjusting has bound of self.max_y_val,djusting has bound of self.max_y_val, Moving is 1 or 0
self.int_strain_e = 0
self.strain_e = 0
self.next_strain_e =0
self.reward_counter = []
for i in range(self.num_agents):
self.reward.append(0)
self.reward_counter.append(0)
self.done_counter = 0
print('-------------------------------------------------------')
print(self.description)
print(self.objective)
print('GAME WILL BE ENDED AFTER {} STEP'.format(self.end_step))
print('-------------------------------------------------------')
def set_gametype(self,game_type):
self.game_type = game_type
def _update_metagrid(self):
# update structure
self.model.restore()
self.model.gen_all()
xlist = []
ylist = []
zlist = []
dylist = []
dtxlist = []
dtylist = []
dtzlist = []
for i in range(len(self.model.nodes)):
xlist.append(self.model.nodes[i].coord[0])
ylist.append(self.model.nodes[i].coord[1])
zlist.append(self.model.nodes[i].coord[2])
dylist.append(self.model.nodes[i].global_d[1][0])
dtxlist.append(self.model.nodes[i].global_d[3][0])
dtylist.append(self.model.nodes[i].global_d[4][0])
dtzlist.append(self.model.nodes[i].global_d[5][0])
self.xmax = max(xlist)
self.xmin = min(xlist)
self.ymax = max(ylist)
self.ymin = min(ylist)
self.zmax = max(zlist)
self.zmin = min(zlist)
self.sdyval = self.sval*self.dymin
self.dtxmax = max(dtxlist)
self.dtxmin = min(dtxlist)
self.dtymax = max(dtylist)
self.dtymin = min(dtylist)
self.dtzmax = max(dtzlist)
self.dtzmin = min(dtzlist)
self.dymax = max(dylist)
self.dymin = min(dylist)
if self.dmaxset == 0:
self.dmax0 = abs(min(dylist))
self.dtxmax0 = max([abs(min(dtxlist)),abs(max(dtxlist))])
self.dtymax0 = max([abs(min(dtylist)),abs(max(dtylist))])
self.dtzmax0 = max([abs(min(dtzlist)),abs(max(dtzlist))])
self.dmaxset = 1
for i in range(self.num_z):
for j in range(self.num_x):
dtxi,dtzi,dtxj_up,dtxj_down,dtxj_left,dtxj_right,dtzj_up,dtzj_down,dtzj_left,dtzj_right,di,dj_up,dj_down,dj_left,dj_right,n_up,n_down,n_left,n_right,pos1,pos2,bc,geo = state_data(i,j,self)
'''
self.metagrid[i][j] = [[dtxi],[dtzi],[dtxj_up],[dtxj_down],[dtxj_left],[dtxj_right],[dtzj_up],[dtzj_down],[dtzj_left],[dtzj_right],[di],[dj_up],[dj_down],[dj_left],[dj_right],[n_up],[n_down],[n_left],[n_right],[pos1],[pos2],[bc],[geo]]
'''
self.metagrid[i][j] = [
[n_up],[n_down],[n_left],[n_right],
[pos1],[pos2],
[geo]
]
# Function to calculate value use in the reward system / 報酬制度での価値利用を計算する機能
def _game_gen_state_condi(self):
self.gen_model.gen_surface1() # Calculate total surface / 総表面積を計算する
self.strain_e = self.gen_model.surface_1 # Current total surface of this structure / この構造の総表面積
if self.game_step == 1: # Initial total length of this structure / この構造の初期の全長
self.int_strain_e = self.gen_model.surface_1
else:
pass
def _game_get_1_state(self,do,multi=False):
if multi:
pass
else:
self._update_metagrid()
# do = [i,j]
# metagrid[z,x]
'''
if self.game_type==0:
print('There is no game type')
elif self.game_type==1:
# Theta-ij Pos Bc(numstate = 7)
x = self.metagrid[do[0]][do[1]][5:-1]
elif self.game_type==2:
# Z/Zmax Pos Bc (numstate = 4)
x = self.metagrid[do[0]][do[1]][9:]
elif self.game_type==3:
# Theta-ij di dj (numstate = 9)
x = self.metagrid[do[0]][do[1]][:9]
elif self.game_type==4:
# Theta-ij di dj Z/Zmax Pos Bc (numstate = 13)
x = self.metagrid[do[0]][do[1]]
'''
x = self.metagrid[do[0]][do[1]]
state = np.array(x)
return state
def _game_get_next_state(self,do,action,i=0,multi=False):
num = [action[0][0],action[0][1],action[0][2],action[0][3],action[0][4],action[0][5]]
adjnum = [action[0][0],action[0][1]]
movenum = [action[0][2],action[0][3],action[0][4],action[0][5]]
movenum = movenum.index(max(movenum))
adjnum = adjnum.index(max(adjnum))
num = num.index(max(num))
# next_state = f(action)
# Interprete action
if adjnum == 0:
step = action[0][0]*self.bound[0]
if self.gen_model.n_u_name_div[do[0]][do[1]].res[1] !=1:
if self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] + step <= self.max_y_val:
self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] += step
if self.tell_action == True:
print('Z+:{}'.format(action[0][0]*self.bound[0]))
else:
pass
else:
pass
#self.bad = 1
elif adjnum == 1:
step = action[0][1]*self.bound[1]
if self.gen_model.n_u_name_div[do[0]][do[1]].res[1] != 1:
if self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] - step >= 0:
self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] -= step
if self.tell_action == True:
print('Z-:{}'.format(action[0][1]*self.bound[1]))
else:
pass
else:
pass
#self.bad = 1
if movenum == 0:
# move right x+1
# do[z,x]
if (do[1]+1 != (len(self.gen_model.n_u_name_div[0]))):
self.doing[i][1] = do[1]+1
else:
#self.doing[i][1] = do[1]-1
pass
elif movenum == 1:
# move left x-1
# do[z,x]
if (do[1] != 0):
self.doing[i][1] = do[1]-1
else:
#self.doing[i][1] = do[1]+1
pass
elif movenum == 2:
# move up z-1
# do[z,x]
if (do[0] != 0):
self.doing[i][0] = do[0]-1
else:
#self.doing[i][0] = do[0]+1
pass
elif movenum == 3:
# move down z+1
# do[z,x]
if (do[0]+1 != (len(self.gen_model.n_u_name_div))):
self.doing[i][0] = do[0]+1
'''
if self.gen_model.n_u_name_div[do[0]+1][do[1]].res[1] !=1:
self.doing[i][0] = do[0]+1
else:
pass
'''
else:
#self.doing[i][0] = do[0]-1
pass
#announce1 = ['z_up','z_down']
announce2 = ['move right','move left','move up','move down']
if self.tell_action == True:
#print(announce1[adjnum-1]) # print out action
print(announce2[movenum-1]) # print out action
if multi:
pass
else:
self._update_metagrid()
if self.game_type==0:
print('There is no game type')
elif self.game_type==1:
# Theta-ij Pos Bc(numstate = 7)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]][5:-1]
elif self.game_type==2:
# Z/Zmax Pos Bc (numstate = 4)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]][9:]
elif self.game_type==3:
# Theta-ij di dj (numstate = 9)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]][:9]
elif self.game_type==4:
# Theta-ij di dj Z/Zmax Pos Bc (numstate = 13)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]]
next_state = np.array(x)
return next_state
def _game_get_next_state_maddpg(self,do,i):
'''
if self.game_type==0:
print('There is no game type')
elif self.game_type==1:
# Theta-ij Pos Bc(numstate = 7)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]][5:-1]
elif self.game_type==2:
# Z/Zmax Pos Bc (numstate = 4)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]][9:]
elif self.game_type==3:
# Theta-ij di dj (numstate = 9)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]][:9]
elif self.game_type==4:
# Theta-ij di dj Z/Zmax Pos Bc (numstate = 13)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]]
'''
x = self.metagrid[self.doing[i][0]][self.doing[i][1]]
next_state = np.array(x)
return next_state
# Function to calculate value use in the reward system / 報酬制度での価値利用を計算する機能
def _gen_gen_reward_condition(self):
self.gen_model.gen_surface1() # Calculate next state's total surface / 次の状態の総表面積を計算する
self.next_strain_e = self.gen_model.surface_1# Total surface of this structure in the next_state after agents do actions / エージェントが行動を実行した後のnext_state内のこの構造の総表面積
# Function to calculate reward for each agent / 各エージェントの報酬を計算する機能
def _game_get_reward(self,agent):
self.reward[agent] += 1000*(self.strain_e[0]-self.next_strain_e[0])/(self.int_strain_e[0]) # Reward rule / 報酬規定
if self.game_step == self.end_step: # Check if game is end / ゲームが終了したかどうかを確認する
self.done_counter = 1
return self.reward[agent],self.done_counter
def reset(self):
self.state = []
self.action = []
self.reward = []
self.doing = []
self.doing = [[1,1],[len(self.gen_model.n_u_name_div)-2,len(self.gen_model.n_u_name_div[0])-2],
[1,len(self.gen_model.n_u_name_div[0])-2],
[len(self.gen_model.n_u_name_div)-2,1]
]
for i in range(self.num_agents):
self.reward.append(0)
self.next_state = []
self.done = []
self.game_step = 1
self.xmax = 0
self.xmin = 0
self.ymax = 0
self.ymin = 0
self.zmax = 0
self.zmin = 0
self.dymax = 0
self.dymin = 0
self.sdyval = 0
self.dmax0 = 0
self.dmaxset = 0
self.dtxmax = 0
self.dtxmin = 0
self.dtymax = 0
self.dtymin = 0
self.dtzmax = 0
self.dtzmin = 0
self.dtxmax0 = 0
self.dtymax0 = 0
self.dtzmax0 = 0
self.int_strain_e = 0
self.strain_e = 0
self.next_strain_e =0
self.reward_counter = []
for i in range(self.num_agents):
self.reward.append(0)
self.reward_counter.append(0)
self.done_counter = 0
def step(self):
self.state = self.next_state
self.action = []
#self.reward = []
for i in range(len(self.reward)):
self.reward[i] = 0
self.next_state = []
self.done = []
self.game_step += 1
#=============================================================================
# GAME 4
class Game4:
def __init__(self,end_step,alpha,max_y_val,model,num_agents=1,render=0,tell_action=False):
self.name = 'GAME 4'
self.description = 'AGENT HAS 2 SUB-ACTIONS IN ONE STEP: 1. MOVE NODE (UP DOWN) IN CONTIUOUS SPACE, 2.MOVE TO SURROUNDING NODES (LEFT RIGHT UP DOWN)'
self.objective = 'REDUCE STRAIN ENERGY'
self.tell_action =tell_action
self.num_agents = num_agents
self.gen_model = model
self.model = model.model
self.num_x = model.num_x
self.num_z = model.num_z
self.render = render # if render==0 no render
self.game_step = 1
self.game_type = 0
self.end_step = end_step # when will the game end
self.alpha = alpha # magnitude of adjusting node as a factor of span
self.y_step = self.alpha*self.gen_model.span
#=======================
#State Action'
# for MADDPG
#=======================
self.state = []
self.action = [] #get action from rl or data file
self.reward = []
self.next_state = []
self.done = []
#=======================
# Game rules
#=======================
self.bad = 0
self.doing = [] # list of doing node of each agent
self.doing = [[1,1],[len(self.gen_model.n_u_name_div)-2,len(self.gen_model.n_u_name_div[0])-2],
[1,len(self.gen_model.n_u_name_div[0])-2],
[len(self.gen_model.n_u_name_div)-2,1]
]
'''
for i in range(self.num_agents):
self.doing.append([0,0])
'''
#do1:(len(self.gen_model.n_u_name_div[0])-1)
#do0:(len(self.gen_model.n_u_name_div)-1)
self.metagrid = []
for i in range(self.num_z):
self.metagrid.append([])
for j in range(self.num_x):
self.metagrid[-1].append([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]) # Maximum num_state in this suite is 23 / このスイートの最大num_stateは23です
self.xmax = 0
self.xmin = 0
self.ymax = 0
self.ymin = 0
self.zmax = 0
self.zmin = 0
self.sval = 0.001 # small noise
# =========================================
# deformation
# =========================================
self.dymax = 0
self.dymin = 0
self.sdyval = 0
self.dmax0 = 0
self.dmaxset = 0
self.dtxmax = 0
self.dtxmin = 0
self.dtymax = 0
self.dtymin = 0
self.dtzmax = 0
self.dtzmin = 0
self.dtxmax0 = 0
self.dtymax0 = 0
self.dtzmax0 = 0
self.max_y_val = max_y_val
self.bound = [self.y_step,self.y_step,1,1,1,1] #Adjusting has bound of self.max_y_val,djusting has bound of self.max_y_val, Moving is 1 or 0
self.int_strain_e = 0
self.strain_e = 0
self.next_strain_e =0
self.reward_counter = []
for i in range(self.num_agents):
self.reward.append(0)
self.reward_counter.append(0)
self.done_counter = 0
print('-------------------------------------------------------')
print(self.description)
print(self.objective)
print('GAME WILL BE ENDED AFTER {} STEP'.format(self.end_step))
print('-------------------------------------------------------')
def set_gametype(self,game_type):
self.game_type = game_type
def _update_metagrid(self):
# update structure
self.model.restore()
self.model.gen_all()
xlist = []
ylist = []
zlist = []
dylist = []
dtxlist = []
dtylist = []
dtzlist = []
for i in range(len(self.model.nodes)):
xlist.append(self.model.nodes[i].coord[0])
ylist.append(self.model.nodes[i].coord[1])
zlist.append(self.model.nodes[i].coord[2])
dylist.append(self.model.nodes[i].global_d[1][0])
dtxlist.append(self.model.nodes[i].global_d[3][0])
dtylist.append(self.model.nodes[i].global_d[4][0])
dtzlist.append(self.model.nodes[i].global_d[5][0])
self.xmax = max(xlist)
self.xmin = min(xlist)
self.ymax = max(ylist)
self.ymin = min(ylist)
self.zmax = max(zlist)
self.zmin = min(zlist)
self.sdyval = self.sval*self.dymin
self.dtxmax = max(dtxlist)
self.dtxmin = min(dtxlist)
self.dtymax = max(dtylist)
self.dtymin = min(dtylist)
self.dtzmax = max(dtzlist)
self.dtzmin = min(dtzlist)
self.dymax = max(dylist)
self.dymin = min(dylist)
if self.dmaxset == 0:
self.dmax0 = abs(min(dylist))
self.dtxmax0 = max([abs(min(dtxlist)),abs(max(dtxlist))])
self.dtymax0 = max([abs(min(dtylist)),abs(max(dtylist))])
self.dtzmax0 = max([abs(min(dtzlist)),abs(max(dtzlist))])
self.dmaxset = 1
for i in range(self.num_z):
for j in range(self.num_x):
dtxi,dtzi,dtxj_up,dtxj_down,dtxj_left,dtxj_right,dtzj_up,dtzj_down,dtzj_left,dtzj_right,di,dj_up,dj_down,dj_left,dj_right,n_up,n_down,n_left,n_right,pos1,pos2,bc,geo = state_data(i,j,self)
self.metagrid[i][j] = [[dtxi],[dtzi],[dtxj_up],[dtxj_down],[dtxj_left],[dtxj_right],[dtzj_up],[dtzj_down],[dtzj_left],[dtzj_right],[di],[dj_up],[dj_down],[dj_left],[dj_right],[n_up],[n_down],[n_left],[n_right],[pos1],[pos2],[bc],[geo]]
'''
self.metagrid[i][j] = [
[n_up],[n_down],[n_left],[n_right],
[pos1],[pos2],
[geo]
]
'''
def _game_gen_state_condi(self):
# Calculate strain energy
self.model.restore()
self.model.gen_all()
self.strain_e = self.model.U_full
# Calculate initial strain energy
if self.game_step == 1:
self.int_strain_e = self.model.U_full
else:
pass
def _game_get_1_state(self,do):
self._update_metagrid()
# do = [i,j]
# metagrid[z,x]
'''
if self.game_type==0:
print('There is no game type')
elif self.game_type==1:
# Theta-ij Pos Bc(numstate = 7)
x = self.metagrid[do[0]][do[1]][5:-1]
elif self.game_type==2:
# Z/Zmax Pos Bc (numstate = 4)
x = self.metagrid[do[0]][do[1]][9:]
elif self.game_type==3:
# Theta-ij di dj (numstate = 9)
x = self.metagrid[do[0]][do[1]][:9]
elif self.game_type==4:
# Theta-ij di dj Z/Zmax Pos Bc (numstate = 13)
x = self.metagrid[do[0]][do[1]]
'''
x = self.metagrid[do[0]][do[1]]
state = np.array(x)
return state
def _game_get_next_state(self,do,action,i=0):
num = [action[0][0],action[0][1],action[0][2],action[0][3],action[0][4],action[0][5]]
adjnum = [action[0][0],action[0][1]]
movenum = [action[0][2],action[0][3],action[0][4],action[0][5]]
movenum = movenum.index(max(movenum))
adjnum = adjnum.index(max(adjnum))
num = num.index(max(num))
# next_state = f(action)
# Interprete action
if adjnum == 0:
step = action[0][0]*self.bound[0]
if self.gen_model.n_u_name_div[do[0]][do[1]].res[1] !=1:
if self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] + step <= self.max_y_val:
self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] += step
if self.tell_action == True:
print('Z+:{}'.format(action[0][0]*self.bound[0]))
else:
pass
else:
pass
#self.bad = 1
elif adjnum == 1:
step = action[0][1]*self.bound[1]
if self.gen_model.n_u_name_div[do[0]][do[1]].res[1] != 1:
if self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] - step >= 0:
self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] -= step
if self.tell_action == True:
print('Z-:{}'.format(action[0][1]*self.bound[1]))
else:
pass
else:
pass
#self.bad = 1
if movenum == 0:
# move right x+1
# do[z,x]
if (do[1]+1 != (len(self.gen_model.n_u_name_div[0]))):
if self.gen_model.n_u_name_div[do[0]][do[1]+1].res[1] !=1:
self.doing[i][1] = do[1]+1
else:
pass
else:
#self.doing[i][1] = do[1]-1
pass
elif movenum == 1:
# move left x-1
# do[z,x]
if (do[1] != 0):
if self.gen_model.n_u_name_div[do[0]][do[1]-1].res[1] !=1:
self.doing[i][1] = do[1]-1
else:
pass
else:
#self.doing[i][1] = do[1]+1
pass
elif movenum == 2:
# move up z-1
# do[z,x]
if (do[0] != 0):
if self.gen_model.n_u_name_div[do[0]-1][do[1]].res[1] !=1:
self.doing[i][0] = do[0]-1
else:
pass
else:
#self.doing[i][0] = do[0]+1
pass
elif movenum == 3:
# move down z+1
# do[z,x]
if (do[0]+1 != (len(self.gen_model.n_u_name_div))):
if self.gen_model.n_u_name_div[do[0]+1][do[1]].res[1] !=1:
self.doing[i][0] = do[0]+1
else:
pass
else:
#self.doing[i][0] = do[0]-1
pass
#announce1 = ['z_up','z_down']
announce2 = ['move right','move left','move up','move down']
if self.tell_action == True:
#print(announce1[adjnum-1]) # print out action
print(announce2[movenum-1]) # print out action
self._update_metagrid()
x = self.metagrid[self.doing[i][0]][self.doing[i][1]]
next_state = np.array(x)
return next_state
def _gen_gen_reward_condition(self):
# Calculate next state strain energy
self.model.restore()
self.model.gen_all()
self.next_strain_e = self.model.U_full
def _game_get_reward(self,agent):
self.reward[agent] += 1000*(self.strain_e[0]-self.next_strain_e[0])/(self.int_strain_e[0])
if self.game_step == self.end_step:
self.done_counter = 1
return self.reward[agent],self.done_counter
def reset(self):
self.state = []
self.action = []
self.reward = []
self.doing = []
self.doing = [[1,1],[len(self.gen_model.n_u_name_div)-2,len(self.gen_model.n_u_name_div[0])-2],
[1,len(self.gen_model.n_u_name_div[0])-2],
[len(self.gen_model.n_u_name_div)-2,1]
]
for i in range(self.num_agents):
self.reward.append(0)
#self.doing.append([0,0])
self.next_state = []
self.done = []
self.game_step = 1
self.xmax = 0
self.xmin = 0
self.ymax = 0
self.ymin = 0
self.zmax = 0
self.zmin = 0
self.dymax = 0
self.dymin = 0
self.sdyval = 0
self.dmax0 = 0
self.dmaxset = 0
self.dtxmax = 0
self.dtxmin = 0
self.dtymax = 0
self.dtymin = 0
self.dtzmax = 0
self.dtzmin = 0
self.dtxmax0 = 0
self.dtymax0 = 0
self.dtzmax0 = 0
self.int_strain_e = 0
self.strain_e = 0
self.next_strain_e =0
self.reward_counter = []
for i in range(self.num_agents):
self.reward.append(0)
self.reward_counter.append(0)
self.done_counter = 0
self.bad = 0
def step(self):
self.state = self.next_state
self.action = []
#self.reward = []
for i in range(len(self.reward)):
self.reward[i] = 0
self.next_state = []
self.done = []
self.game_step += 1
#=============================================================================
# GAME 5
class Game5:
def __init__(self,end_step,alpha,max_y_val,model,num_agents=1,render=0,tell_action=False):
self.name = 'GAME 5'
self.description = 'AGENT HAS 2 SUB ACTIONS: MOVE NODE (UP DOWN), MOVE TO SURROUNDING NODES (LEFT RIGHT UP DOWN)'
self.objective = 'REDUCE TOTAL SURFACE'
self.tell_action =tell_action
self.num_agents = num_agents
self.gen_model = model
self.model = model.model
self.num_x = model.num_x
self.num_z = model.num_z
self.render = render # if render==0 no render
self.game_step = 1
self.game_type = 0
self.end_step = end_step # when will the game end
self.alpha = alpha # magnitude of adjusting node as a factor of span
self.y_step = self.alpha*self.gen_model.span
#=======================
#State Action'
# for MADDPG
#=======================
self.state = []
self.action = [] #get action from rl or data file
self.reward = []
self.next_state = []
self.done = []
#=======================
# Game rules
#=======================
self.doing = [] # list of doing node of each agent
'''
self.doing = [[1,1],[len(self.gen_model.n_u_name_div)-2,len(self.gen_model.n_u_name_div[0])-2],
[1,len(self.gen_model.n_u_name_div[0])-2],
[len(self.gen_model.n_u_name_div)-2,1]
]
'''
self.doing = [[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]]
self.metagrid = []
for i in range(self.num_z):
self.metagrid.append([])
for j in range(self.num_x):
self.metagrid[-1].append([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]) # Maximum num_state in this suite is 23 / このスイートの最大num_stateは23です
self.xmax = 0
self.xmin = 0
self.ymax = 0
self.ymin = 0
self.zmax = 0
self.zmin = 0
self.sval = 0.001 # small noise
# =========================================
# deformation
# =========================================
self.dymax = 0
self.dymin = 0
self.sdyval = 0
self.dmax0 = 0
self.dmaxset = 0
self.dtxmax = 0
self.dtxmin = 0
self.dtymax = 0
self.dtymin = 0
self.dtzmax = 0
self.dtzmin = 0
self.dtxmax0 = 0
self.dtymax0 = 0
self.dtzmax0 = 0
self.max_y_val = max_y_val
self.bound = [self.y_step,self.y_step,1,1,1,1] #Adjusting has bound of self.max_y_val,djusting has bound of self.max_y_val, Moving is 1 or 0
self.int_strain_e = 0
self.strain_e = 0
self.next_strain_e =0
self.reward_counter = []
for i in range(self.num_agents):
self.reward.append(0)
self.reward_counter.append(0)
self.done_counter = 0
print('-------------------------------------------------------')
print(self.description)
print(self.objective)
print('GAME WILL BE ENDED AFTER {} STEP'.format(self.end_step))
print('-------------------------------------------------------')
def set_gametype(self,game_type):
self.game_type = game_type
def _update_metagrid(self):
# update structure
self.model.restore()
self.model.gen_all()
xlist = []
ylist = []
zlist = []
dylist = []
dtxlist = []
dtylist = []
dtzlist = []
for i in range(len(self.model.nodes)):
xlist.append(self.model.nodes[i].coord[0])
ylist.append(self.model.nodes[i].coord[1])
zlist.append(self.model.nodes[i].coord[2])
dylist.append(self.model.nodes[i].global_d[1][0])
dtxlist.append(self.model.nodes[i].global_d[3][0])
dtylist.append(self.model.nodes[i].global_d[4][0])
dtzlist.append(self.model.nodes[i].global_d[5][0])
self.xmax = max(xlist)
self.xmin = min(xlist)
self.ymax = max(ylist)
self.ymin = min(ylist)
self.zmax = max(zlist)
self.zmin = min(zlist)
self.sdyval = self.sval*self.dymin
self.dtxmax = max(dtxlist)
self.dtxmin = min(dtxlist)
self.dtymax = max(dtylist)
self.dtymin = min(dtylist)
self.dtzmax = max(dtzlist)
self.dtzmin = min(dtzlist)
self.dymax = max(dylist)
self.dymin = min(dylist)
if self.dmaxset == 0:
self.dmax0 = abs(min(dylist))
self.dtxmax0 = max([abs(min(dtxlist)),abs(max(dtxlist))])
self.dtymax0 = max([abs(min(dtylist)),abs(max(dtylist))])
self.dtzmax0 = max([abs(min(dtzlist)),abs(max(dtzlist))])
self.dmaxset = 1
for i in range(self.num_z):
for j in range(self.num_x):
dtxi,dtzi,dtxj_up,dtxj_down,dtxj_left,dtxj_right,dtzj_up,dtzj_down,dtzj_left,dtzj_right,di,dj_up,dj_down,dj_left,dj_right,n_up,n_down,n_left,n_right,pos1,pos2,bc,geo = state_data(i,j,self)
'''
self.metagrid[i][j] = [[dtxi],[dtzi],[dtxj_up],[dtxj_down],[dtxj_left],[dtxj_right],[dtzj_up],[dtzj_down],[dtzj_left],[dtzj_right],[di],[dj_up],[dj_down],[dj_left],[dj_right],[n_up],[n_down],[n_left],[n_right],[pos1],[pos2],[bc],[geo]]
'''
self.metagrid[i][j] = [
[n_up],[n_down],[n_left],[n_right],
[pos1],[pos2],
[geo]
]
def _game_gen_state_condi(self):
self.gen_model.gen_surface1() # Calculate total surface / 総表面積を計算する
self.strain_e = self.gen_model.surface_1 # Current total surface of this structure / この構造の総表面積
if self.game_step == 1: # Initial total length of this structure / この構造の初期の全長
self.int_strain_e = self.gen_model.surface_1
else:
pass
def _game_get_1_state(self,do,multi=False):
if multi:
pass
else:
self._update_metagrid()
# do = [i,j]
# metagrid[z,x]
'''
if self.game_type==0:
print('There is no game type')
elif self.game_type==1:
# Theta-ij Pos Bc(numstate = 7)
x = self.metagrid[do[0]][do[1]][5:-1]
elif self.game_type==2:
# Z/Zmax Pos Bc (numstate = 4)
x = self.metagrid[do[0]][do[1]][9:]
elif self.game_type==3:
# Theta-ij di dj (numstate = 9)
x = self.metagrid[do[0]][do[1]][:9]
elif self.game_type==4:
# Theta-ij di dj Z/Zmax Pos Bc (numstate = 13)
x = self.metagrid[do[0]][do[1]]
'''
x = self.metagrid[do[0]][do[1]]
state = np.array(x)
return state
def _game_get_next_state(self,do,action,i=0,multi=False):
num = [action[0][0],action[0][1],action[0][2],action[0][3],action[0][4],action[0][5]]
adjnum = [action[0][0],action[0][1]]
movenum = [action[0][2],action[0][3],action[0][4],action[0][5]]
movenum = movenum.index(max(movenum))
adjnum = adjnum.index(max(adjnum))
num = num.index(max(num))
# next_state = f(action)
# Interprete action
if adjnum == 0:
step = action[0][0]*self.bound[0]
if self.gen_model.n_u_name_div[do[0]][do[1]].res[1] !=1:
if self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] + step <= self.max_y_val:
self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] += step
if self.tell_action == True:
print('Z+:{}'.format(action[0][0]*self.bound[0]))
else:
pass
else:
pass
elif adjnum == 1:
step = action[0][1]*self.bound[1]
if self.gen_model.n_u_name_div[do[0]][do[1]].res[1] != 1:
if self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] - step >= 0:
self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] -= step
if self.tell_action == True:
print('Z-:{}'.format(action[0][1]*self.bound[1]))
else:
pass
else:
pass
if movenum == 0:
# move right x+1
# do[z,x]
if (do[1]+1 != (len(self.gen_model.n_u_name_div[0]))):
self.doing[i][1] = do[1]+1
else:
pass
elif movenum == 1:
# move left x-1
# do[z,x]
if (do[1] != 0):
self.doing[i][1] = do[1]-1
else:
pass
elif movenum == 2:
# move up z-1
# do[z,x]
if (do[0] != 0):
self.doing[i][0] = do[0]-1
else:
pass
elif movenum == 3:
# move down z+1
# do[z,x]
if (do[0]+1 != (len(self.gen_model.n_u_name_div))):
self.doing[i][0] = do[0]+1
else:
pass
#announce1 = ['z_up','z_down']
announce2 = ['move right','move left','move up','move down']
if self.tell_action == True:
#print(announce1[adjnum-1]) # print out action
print(announce2[movenum-1]) # print out action
if multi:
pass
else:
self._update_metagrid()
if self.game_type==0:
print('There is no game type')
elif self.game_type==1:
# Theta-ij Pos Bc(numstate = 7)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]][5:-1]
elif self.game_type==2:
# Z/Zmax Pos Bc (numstate = 4)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]][9:]
elif self.game_type==3:
# Theta-ij di dj (numstate = 9)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]][:9]
elif self.game_type==4:
# Theta-ij di dj Z/Zmax Pos Bc (numstate = 13)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]]
next_state = np.array(x)
return next_state
def _game_get_next_state_maddpg(self,do,i):
'''
if self.game_type==0:
print('There is no game type')
elif self.game_type==1:
# Theta-ij Pos Bc(numstate = 7)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]][5:-1]
elif self.game_type==2:
# Z/Zmax Pos Bc (numstate = 4)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]][9:]
elif self.game_type==3:
# Theta-ij di dj (numstate = 9)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]][:9]
elif self.game_type==4:
# Theta-ij di dj Z/Zmax Pos Bc (numstate = 13)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]]
'''
x = self.metagrid[self.doing[i][0]][self.doing[i][1]]
next_state = np.array(x)
return next_state
# Function to calculate value use in the reward system / 報酬制度での価値利用を計算する機能
def _gen_gen_reward_condition(self):
self.gen_model.gen_surface1() # Calculate next state's total surface / 次の状態の総表面積を計算する
self.next_strain_e = self.gen_model.surface_1# Total surface of this structure in the next_state after agents do actions / エージェントが行動を実行した後のnext_state内のこの構造の総表面積
# Function to calculate reward for each agent / 各エージェントの報酬を計算する機能
def _game_get_reward(self,agent):
self.reward[agent] += 1000*(self.strain_e[0]-self.next_strain_e[0])/(self.int_strain_e[0]) # Reward rule / 報酬規定
if self.game_step == self.end_step: # Check if game is end / ゲームが終了したかどうかを確認する
self.done_counter = 1
return self.reward[agent],self.done_counter
def reset(self):
self.state = []
self.action = []
self.reward = []
self.doing = []
self.doing = [[1,1],[len(self.gen_model.n_u_name_div)-2,len(self.gen_model.n_u_name_div[0])-2],
[1,len(self.gen_model.n_u_name_div[0])-2],
[len(self.gen_model.n_u_name_div)-2,1]
]
for i in range(self.num_agents):
self.reward.append(0)
self.next_state = []
self.done = []
self.game_step = 1
self.xmax = 0
self.xmin = 0
self.ymax = 0
self.ymin = 0
self.zmax = 0
self.zmin = 0
self.dymax = 0
self.dymin = 0
self.sdyval = 0
self.dmax0 = 0
self.dmaxset = 0
self.dtxmax = 0
self.dtxmin = 0
self.dtymax = 0
self.dtymin = 0
self.dtzmax = 0
self.dtzmin = 0
self.dtxmax0 = 0
self.dtymax0 = 0
self.dtzmax0 = 0
self.int_strain_e = 0
self.strain_e = 0
self.next_strain_e =0
self.reward_counter = []
for i in range(self.num_agents):
self.reward.append(0)
self.reward_counter.append(0)
self.done_counter = 0
def step(self):
self.state = self.next_state
self.action = []
#self.reward = []
for i in range(len(self.reward)):
self.reward[i] = 0
self.next_state = []
self.done = []
self.game_step += 1
#=============================================================================
# GAME 6
class Game6:
def __init__(self,end_step,alpha,max_y_val,model,num_agents=1,render=0,tell_action=False):
self.name = 'GAME 6'
self.description = 'AGENT HAS 2 SUB ACTIONS: MOVE NODE (UP DOWN), MOVE TO SURROUNDING NODES (LEFT RIGHT UP DOWN)'
self.objective = 'REDUCE STRAIN ENERGY'
self.tell_action =tell_action
self.num_agents = num_agents
self.gen_model = model
self.model = model.model
self.num_x = model.num_x
self.num_z = model.num_z
self.render = render # if render==0 no render
self.game_step = 1
self.game_type = 0
self.end_step = end_step # when will the game end
self.alpha = alpha # magnitude of adjusting node as a factor of span
self.y_step = self.alpha*self.gen_model.span
#=======================
#State Action'
# for MADDPG
#=======================
self.state = []
self.action = [] #get action from rl or data file
self.reward = []
self.next_state = []
self.done = []
#=======================
# Game rules
#=======================
self.doing = [] # list of doing node of each agent
'''
self.doing = [[1,1],[len(self.gen_model.n_u_name_div)-2,len(self.gen_model.n_u_name_div[0])-2],
[1,len(self.gen_model.n_u_name_div[0])-2],
[len(self.gen_model.n_u_name_div)-2,1]
]
'''
self.doing = [[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]]
self.metagrid = []
for i in range(self.num_z):
self.metagrid.append([])
for j in range(self.num_x):
self.metagrid[-1].append([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]) # Maximum num_state in this suite is 23 / このスイートの最大num_stateは23です
self.xmax = 0
self.xmin = 0
self.ymax = 0
self.ymin = 0
self.zmax = 0
self.zmin = 0
self.sval = 0.001 # small noise
# =========================================
# deformation
# =========================================
self.dymax = 0
self.dymin = 0
self.sdyval = 0
self.dmax0 = 0
self.dmaxset = 0
self.dtxmax = 0
self.dtxmin = 0
self.dtymax = 0
self.dtymin = 0
self.dtzmax = 0
self.dtzmin = 0
self.dtxmax0 = 0
self.dtymax0 = 0
self.dtzmax0 = 0
self.max_y_val = max_y_val
self.bound = [self.y_step,self.y_step,1,1,1,1] #Adjusting has bound of self.max_y_val,djusting has bound of self.max_y_val, Moving is 1 or 0
self.int_strain_e = 0
self.strain_e = 0
self.next_strain_e =0
self.reward_counter = []
for i in range(self.num_agents):
self.reward.append(0)
self.reward_counter.append(0)
self.done_counter = 0
print('-------------------------------------------------------')
print(self.description)
print(self.objective)
print('GAME WILL BE ENDED AFTER {} STEP'.format(self.end_step))
print('-------------------------------------------------------')
def set_gametype(self,game_type):
self.game_type = game_type
def _update_metagrid(self):
# update structure
self.model.restore()
self.model.gen_all()
xlist = []
ylist = []
zlist = []
dylist = []
dtxlist = []
dtylist = []
dtzlist = []
for i in range(len(self.model.nodes)):
xlist.append(self.model.nodes[i].coord[0])
ylist.append(self.model.nodes[i].coord[1])
zlist.append(self.model.nodes[i].coord[2])
dylist.append(self.model.nodes[i].global_d[1][0])
dtxlist.append(self.model.nodes[i].global_d[3][0])
dtylist.append(self.model.nodes[i].global_d[4][0])
dtzlist.append(self.model.nodes[i].global_d[5][0])
self.xmax = max(xlist)
self.xmin = min(xlist)
self.ymax = max(ylist)
self.ymin = min(ylist)
self.zmax = max(zlist)
self.zmin = min(zlist)
self.sdyval = self.sval*self.dymin
self.dtxmax = max(dtxlist)
self.dtxmin = min(dtxlist)
self.dtymax = max(dtylist)
self.dtymin = min(dtylist)
self.dtzmax = max(dtzlist)
self.dtzmin = min(dtzlist)
self.dymax = max(dylist)
self.dymin = min(dylist)
if self.dmaxset == 0:
self.dmax0 = abs(min(dylist))
self.dtxmax0 = max([abs(min(dtxlist)),abs(max(dtxlist))])
self.dtymax0 = max([abs(min(dtylist)),abs(max(dtylist))])
self.dtzmax0 = max([abs(min(dtzlist)),abs(max(dtzlist))])
self.dmaxset = 1
for i in range(self.num_z):
for j in range(self.num_x):
dtxi,dtzi,dtxj_up,dtxj_down,dtxj_left,dtxj_right,dtzj_up,dtzj_down,dtzj_left,dtzj_right,di,dj_up,dj_down,dj_left,dj_right,n_up,n_down,n_left,n_right,pos1,pos2,bc,geo = state_data(i,j,self)
self.metagrid[i][j] = [[dtxi],[dtzi],[dtxj_up],[dtxj_down],[dtxj_left],[dtxj_right],[dtzj_up],[dtzj_down],[dtzj_left],[dtzj_right],[di],[dj_up],[dj_down],[dj_left],[dj_right],[n_up],[n_down],[n_left],[n_right],[pos1],[pos2],[bc],[geo]]
'''
self.metagrid[i][j] = [
[n_up],[n_down],[n_left],[n_right],
[pos1],[pos2],
[geo]
]
'''
def _game_gen_state_condi(self):
# Calculate strain energy
self.model.restore()
self.model.gen_all()
self.strain_e = self.model.U_full
# Calculate initial strain energy
if self.game_step == 1:
self.int_strain_e = self.model.U_full
else:
pass
def _game_get_1_state(self,do,multi=False):
if multi:
pass
else:
self._update_metagrid()
# do = [i,j]
# metagrid[z,x]
'''
if self.game_type==0:
print('There is no game type')
elif self.game_type==1:
# Theta-ij Pos Bc(numstate = 7)
x = self.metagrid[do[0]][do[1]][5:-1]
elif self.game_type==2:
# Z/Zmax Pos Bc (numstate = 4)
x = self.metagrid[do[0]][do[1]][9:]
elif self.game_type==3:
# Theta-ij di dj (numstate = 9)
x = self.metagrid[do[0]][do[1]][:9]
elif self.game_type==4:
# Theta-ij di dj Z/Zmax Pos Bc (numstate = 13)
x = self.metagrid[do[0]][do[1]]
'''
x = self.metagrid[do[0]][do[1]]
state = np.array(x)
return state
def _game_get_next_state(self,do,action,i=0,multi=False):
num = [action[0][0],action[0][1],action[0][2],action[0][3],action[0][4],action[0][5]]
adjnum = [action[0][0],action[0][1]]
movenum = [action[0][2],action[0][3],action[0][4],action[0][5]]
movenum = movenum.index(max(movenum))
adjnum = adjnum.index(max(adjnum))
num = num.index(max(num))
# next_state = f(action)
# Interprete action
if adjnum == 0:
step = action[0][0]*self.bound[0]
if self.gen_model.n_u_name_div[do[0]][do[1]].res[1] !=1:
if self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] + step <= self.max_y_val:
self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] += step
if self.tell_action == True:
print('Z+:{}'.format(action[0][0]*self.bound[0]))
else:
pass
else:
pass
elif adjnum == 1:
step = action[0][1]*self.bound[1]
if self.gen_model.n_u_name_div[do[0]][do[1]].res[1] != 1:
if self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] - step >= 0:
self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] -= step
if self.tell_action == True:
print('Z-:{}'.format(action[0][1]*self.bound[1]))
else:
pass
else:
pass
if movenum == 0:
# move right x+1
# do[z,x]
if (do[1]+1 != (len(self.gen_model.n_u_name_div[0]))):
self.doing[i][1] = do[1]+1
else:
pass
elif movenum == 1:
# move left x-1
# do[z,x]
if (do[1] != 0):
self.doing[i][1] = do[1]-1
else:
pass
elif movenum == 2:
# move up z-1
# do[z,x]
if (do[0] != 0):
self.doing[i][0] = do[0]-1
else:
pass
elif movenum == 3:
# move down z+1
# do[z,x]
if (do[0]+1 != (len(self.gen_model.n_u_name_div))):
self.doing[i][0] = do[0]+1
else:
pass
#announce1 = ['z_up','z_down']
announce2 = ['move right','move left','move up','move down']
if self.tell_action == True:
#print(announce1[adjnum-1]) # print out action
print(announce2[movenum-1]) # print out action
if multi:
pass
else:
self._update_metagrid()
if self.game_type==0:
print('There is no game type')
elif self.game_type==1:
# Theta-ij Pos Bc(numstate = 7)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]][5:-1]
elif self.game_type==2:
# Z/Zmax Pos Bc (numstate = 4)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]][9:]
elif self.game_type==3:
# Theta-ij di dj (numstate = 9)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]][:9]
elif self.game_type==4:
# Theta-ij di dj Z/Zmax Pos Bc (numstate = 13)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]]
next_state = np.array(x)
return next_state
def _game_get_next_state_maddpg(self,do,i):
'''
if self.game_type==0:
print('There is no game type')
elif self.game_type==1:
# Theta-ij Pos Bc(numstate = 7)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]][5:-1]
elif self.game_type==2:
# Z/Zmax Pos Bc (numstate = 4)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]][9:]
elif self.game_type==3:
# Theta-ij di dj (numstate = 9)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]][:9]
elif self.game_type==4:
# Theta-ij di dj Z/Zmax Pos Bc (numstate = 13)
x = self.metagrid[self.doing[i][0]][self.doing[i][1]]
'''
x = self.metagrid[self.doing[i][0]][self.doing[i][1]]
next_state = np.array(x)
return next_state
def _gen_gen_reward_condition(self):
# Calculate next state strain energy
self.model.restore()
self.model.gen_all()
self.next_strain_e = self.model.U_full
def _game_get_reward(self,agent):
self.reward[agent] += 1000*(self.strain_e[0]-self.next_strain_e[0])/(self.int_strain_e[0])
if self.game_step == self.end_step:
self.done_counter = 1
return self.reward[agent],self.done_counter
def reset(self):
self.state = []
self.action = []
self.reward = []
self.doing = []
self.doing = [[1,1],[len(self.gen_model.n_u_name_div)-2,len(self.gen_model.n_u_name_div[0])-2],
[1,len(self.gen_model.n_u_name_div[0])-2],
[len(self.gen_model.n_u_name_div)-2,1]
]
for i in range(self.num_agents):
self.reward.append(0)
self.next_state = []
self.done = []
self.game_step = 1
self.xmax = 0
self.xmin = 0
self.ymax = 0
self.ymin = 0
self.zmax = 0
self.zmin = 0
self.dymax = 0
self.dymin = 0
self.sdyval = 0
self.dmax0 = 0
self.dmaxset = 0
self.dtxmax = 0
self.dtxmin = 0
self.dtymax = 0
self.dtymin = 0
self.dtzmax = 0
self.dtzmin = 0
self.dtxmax0 = 0
self.dtymax0 = 0
self.dtzmax0 = 0
self.int_strain_e = 0
self.strain_e = 0
self.next_strain_e =0
self.reward_counter = []
for i in range(self.num_agents):
self.reward.append(0)
self.reward_counter.append(0)
self.done_counter = 0
def step(self):
self.state = self.next_state
self.action = []
#self.reward = []
for i in range(len(self.reward)):
self.reward[i] = 0
self.next_state = []
self.done = []
self.game_step += 1
'''
#=============================================================================
# GAME TEMPLATE
class game1:
def __init__(self, num_agents =1):
self.name = 'GAME 1'
self.description = 'No description yet'
self.num_agents = num_agents
#=======================
#State Action'
# for MADDPG
#=======================
self.state = []
self.action = [] #get action from rl or data file
self.reward = []
self.next_state = []
self.done = []
def _game_get_1_state(self):
# state is make from some randomness
state = None
return state
def _game_gen_1_state(self):
x =[]
for i in range(self.num_agents):
x.append(_game_get_1_state())
self.state.append(x)
def _game_get_next_state(self,action=None):
# next_state = f(action)
next_state = None
done = None
return next_state,done
def _game_gen_next_state(self):
x = []
y = []
for i in range(self.num_agents):
x.append(_game_get_next_state()[0])
y.append(_game_get_next_state()[1])
self.next_state.append(x)
self.done.append(y)
def _game_get_reward(self):
# reward = f(state,next_state)
reward = None
return reward
def _game_gen_reward(self):
# gen reward from current state
x = []
for i in range(self.num_agents):
x.append(_game_get_reward(self.state[-1][i],self.next_state[-1][i]))
self.reward.append(x)
def reset(self):
self.state = []
self.action = []
self.reward = []
self.next_state = []
self.done = []
def step(self):
self.state = self.next_state
self.action = []
self.reward = []
self.next_state = []
self.done = []
#=============================================================================
'''
| StarcoderdataPython |
8016299 | <filename>config.py
# Device id can be obtained by calling MCP_GetDeviceId on the Wii U
# Serial number can be found on the back of the Wii U
DEVICE_ID = 1234567890
SERIAL_NUMBER = "..."
SYSTEM_VERSION = 0x220
REGION = 4 #EUR
COUNTRY = "NL"
USERNAME = "..." #Nintendo network id
PASSWORD = "..." #Nintendo network password
FRIEND_NAME = "..." #Nintendo network id
ATTACKER_IP = [192, 168, 178, 161] # IP of the device which runs the python scripts
TCP_SERVER_PORT = 12345 # A free usable port on the attacking device
CODE_BIN_PATH = 'code.bin' # Path to the code.bin payload that will be executed
CODE_BIN_TARGET_ADDR = 0x011DE200 # Address where the payload should be copied to
CODE_BIN_ENTRYPOINT = CODE_BIN_TARGET_ADDR # Absolute address of the entrypoint of the copied payload
| StarcoderdataPython |
6558300 | """
@author: <NAME>, and Conte
"""
import numpy as np
import scipy.stats as stats
class Uniform:
# Method with in this uniform class
def __init__(self, lower, upper): # method recieves instance as first argument automatically
# the below are the instance variables
self.lower = lower
self.upper = upper
# Method to generate random numbers
def generate_rns(self, N):
return (self.upper-self.lower)*np.random.rand(N)+self.lower
# Method to compute log of the pdf at x
def log_pdf_eval(self, x):
if (x-self.upper)*(x-self.lower)<=0:
lp = np.log(1/(self.upper-self.lower))
else:
lp = -np.Inf
return lp
class Halfnormal:
def __init__(self,sig):
self.sig = sig
def generate_rns(self,N):
return self.sig*np.abs(np.random.randn(N))
def log_pdf_eval(self,x):
if x>=0:
lp = -np.log(self.sig)+0.5*np.log(2/np.pi)-((x*x)/(2*self.sig*self.sig))
else:
lp = -np.Inf
return lp
class Normal:
def __init__(self,mu,sig):
self.mu = mu
self.sig = sig
def generate_rns(self,N):
return self.sig*np.random.randn(N) + self.mu
def log_pdf_eval(self,x):
lp = -0.5*np.log(2*np.pi)-np.log(self.sig)-0.5*(((x-self.mu)/self.sig)**2)
return lp
class TrunNormal:
def __init__(self,mu,sig,a,b):
self.mu = mu
self.sig = sig
self.a = a
self.b = b
def generate_rns(self,N):
return stats.truncnorm((self.a-self.mu)/self.sig, (self.b-self.mu)/self.sig, loc=self.mu, scale=self.sig).rvs(N)
def log_pdf_eval(self,x):
lp = stats.truncnorm((self.a-self.mu)/self.sig, (self.b-self.mu)/self.sig, loc=self.mu, scale=self.sig).logpdf(x)
return lp
class mvNormal:
def __init__(self,mu,E):
self.mu = mu
self.E = E
self.d = len(mu)
self.logdetE = np.log(np.linalg.det(self.E))
self.Einv = np.linalg.inv(E)
def generate_rns(self,N):
return np.random.multivariate_normal(self.mu,self.E,N)
def log_pdf_eval(self,x):
xc = (x-self.mu)
return -(0.5*self.d*np.log(2*np.pi)) -(0.5*self.logdetE) -(0.5* np.transpose(xc) @ self.Einv @ xc) | StarcoderdataPython |
3300006 | from state import State
from info import Info
from math import pi
import numpy as np
class Choose(State):
def __init__(self, name):
super().__init__(name)
self.itemsPos = [
(26, 16),
(26, 28),
(26, 41),
(26, 53)
]
self.bestItemsSorted = [
(17, 122, 132), # duplicator
(23, 161, 99), # fire wand
(36, 230, 63), # garlic
(19, 135, 120), # magic wand
(16, 109, 143), # bible
(13, 34, 206), # laurel
(13, 17, 215), # cross
(15, 0, 240), # vandalier
(36, 227, 64), # rune tracer
(15, 0, 237), # axe
(14, 0, 234), # empty tome
(16, 116, 137), # spinach
(24, 164, 97), # spellbinder
(13, 0, 221), # candelabrador
(13, 9, 218), # clover
(20, 141, 114), # whip
(22, 151, 107), # clock lancet
(33, 211, 68), # knife
(15, 0, 240), # peachone
(15, 0, 240), # ebony wings
(18, 129, 126), # santa water
(29, 193, 77), # armor
(21, 147, 110), # wings
(32, 208, 69), # bone
(13, 38, 203), # lighting ring
(14, 0, 231), # attractorb
(13, 38, 179), # pentagram
(13, 83, 166), # hollow heart
(30, 196, 75), # pummarola
(35, 224, 64), # bracer
(18, 125, 129), # crown
]
self.queueSteps = []
def tick(self, info: Info):
self.pressSpaceBar = False
self.move = True
if len(self.queueSteps) == 0:
bestItem = 100
itemPos = 0
it = 0
for pixel in self.itemsPos:
if pixel in self.bestItemsSorted:
index = self.bestItemsSorted.index(pixel)
if index < bestItem:
index = bestItem
itemPos = it
it += 1
# go up 3 times
for i in range(3):
self.queueSteps.append(pi/2)
# go down x times (0 , 1, 2, or 3 times)
for i in range(itemPos):
self.queueSteps.append(3 * pi / 2)
self.queueSteps.append(0)
else:
self.movement = self.queueSteps[0]
if self.movement == 0: self.pressSpaceBar = True
self.queueSteps.pop(0)
return self.move, self.movement, self.pressSpaceBar
| StarcoderdataPython |
3371420 | import argparse
import bioc
import json
import itertools
from collections import defaultdict,Counter
def getID_FromLongestTerm(text, lookupDict):
"""
Given a span of text, this method will tokenize and then identify the set of entities that exist in the text. It will prioritise long terms first in order to reduce ambiguity.
Example: For the text "the cheese pizza is good" and the lookup dict of {("cheese","pizza"):1, ("pizza",):2}, the algorithm will identify "cheese pizza" as an entity with ID 1. Those tokens will then be removed, and so "pizza" will not be identified.
:param text: The text span to be searched. It will be tokenized using the very naive method of splitting by whitespace.
:param lookupDict: Dictionary that maps tokenized terms to a particular ID
:type text: str
:type lookupDict: dict with tuple keys
:returns: IDs of all entities identified in text
:rtype: set
"""
terms = set()
# Lowercase and do very naive tokenization of the text
np = text.lower().split()
# The length of each search string will decrease from the full length
# of the text down to 1
for l in reversed(range(1, len(np)+1)):
# We move the search window through the text
for i in range(len(np)-l+1):
# Extract that window of text
s = tuple(np[i:i+l])
# Search for it in the dictionary
if s in lookupDict:
sTxt = " ".join(np[i:i+l])
for wordlistid,tid in lookupDict[s]:
#myTxt = "%s\t%s" % (tid,sTxt)
#terms.add((wordlistid,myTxt))
terms.add((wordlistid,sTxt))
# If found, save the ID(s) in the dictionar
#terms.update(lookupDict[s])
# And blank it out
np[i:i+l] = [ "" for _ in range(l) ]
return terms
if __name__ == '__main__':
parser = argparse.ArgumentParser('Count cooccurrences between different terms within passages of a BioC-format corpus file. This uses a very naive method for tokenization (a simple white-space split) which should be replaced with a CoreNLP/Spacy implementation for real-world usage.')
parser.add_argument('--biocFile',required=True,type=str,help='BioC file to use')
parser.add_argument('--wordlist1',required=True,type=str,help='First wordlist to use (in JSON format)')
parser.add_argument('--wordlist2',required=True,type=str,help='Second wordlist to use (in JSON format)')
parser.add_argument('--outFile',required=True,type=str,help='File to output cooccurrence counts to')
args = parser.parse_args()
print("Loading wordlist...")
with open(args.wordlist1) as f:
wordlist1 = json.load(f)
with open(args.wordlist2) as f:
wordlist2 = json.load(f)
lookup = defaultdict(set)
print("Tokenizing wordlists...")
for id,terms in wordlist1.items():
for term in terms:
tokenized = term.lower().split()
lookup[tuple(tokenized)].add( (1,id) )
for id,terms in wordlist2.items():
for term in terms:
tokenized = term.lower().split()
lookup[tuple(tokenized)].add( (2,id) )
print("Processing document...")
cooccurrences = Counter()
with bioc.iterparse(args.biocFile) as parser:
collection_info = parser.get_collection_info()
for docid,document in enumerate(parser):
if docid != 0 and (docid % 1000) == 0:
print(" %d documents complete" % docid)
for passage in document.passages:
# Identify which terms appear in the passage (and find the longest terms first in order to avoid ambiguity)
termids = getID_FromLongestTerm(passage.text,lookup)
term1ids = [ id for wordlistid,id in termids if wordlistid == 1 ]
term2ids = [ id for wordlistid,id in termids if wordlistid == 2 ]
termids = sorted(termids)
for a,b in itertools.product(term1ids,term2ids):
cooccurrences[(a,b)] += 1
print("Writing cooccurrences to file")
with open(args.outFile,'w') as outF:
for (a,b),count in cooccurrences.items():
outF.write("%s\t%s\t%d\n" % (a,b,count))
print("Complete.")
| StarcoderdataPython |
6647184 | <filename>scripts/get_vocab.py<gh_stars>10-100
import argparse
import functools
import itertools
import sys
from collections import Counter
from multiprocessing import Pool
def count(lines):
voc = Counter()
for l in lines:
voc.update(l.split())
return voc
def group(lines, group):
groups = []
chunk, remain = divmod(len(lines), group)
chunk2, remain2 = divmod(remain, group)
chunk += chunk2
idxs = [i * chunk for i in range(group)]
idxs = idxs + [len(lines)]
for i, j in itertools.zip_longest(idxs[:-1], idxs[1:]):
groups.append(lines[i:j])
return groups
def parallel(r, n, buffer_size):
pool = Pool(n)
voc = Counter()
while True:
buffer = list(itertools.islice(r, buffer_size))
if buffer:
groups = group(buffer, n)
vocs = pool.map(count, groups)
voc = functools.reduce(lambda voc1, voc2: voc2.update(voc1) or voc2, [voc] + vocs)
if len(buffer) < buffer_size:
break
return voc
def main(args):
corpus = args.corpus
limit = args.limit
occur = args.occur
sys.stderr.write("corpus: %s, limit: %r, occur: %r\n" % (corpus or 'stdin', limit, occur))
if limit is None:
limit = sys.maxsize
if occur is None:
occur = 0
vocab = Counter()
if corpus:
r = open(corpus)
else:
r = sys.stdin
if args.parallel and args.parallel > 1:
vocab = parallel(r, args.parallel, args.buffer)
else:
for l in r:
vocab.update(l.split())
if corpus:
r.close()
num_tok = sum(vocab.values())
num_in_vocab = 0
word2cnt = vocab.most_common()
for i, (word, cnt) in enumerate(word2cnt):
if cnt < occur or i >= limit:
break
sys.stdout.write("%s %d\n" % (word, cnt))
num_in_vocab += cnt
coverage = num_in_vocab / num_tok
sys.stderr.write(f"coverage: {coverage}\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--corpus", type=str, help='text format')
parser.add_argument("--limit", type=int)
parser.add_argument("--occur", type=int)
parser.add_argument("--parallel", type=int)
parser.add_argument("--buffer", type=int, default=200000)
args = parser.parse_args()
main(args)
| StarcoderdataPython |
1957539 | from gensim.models import word2vec
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import accuracy_score, f1_score
from sklearn.metrics import classification_report
from sklearn.externals import joblib
import numpy as np
import pandas as pd
np.set_printoptions(threshold=np.inf)
#读取文件
test_df = pd.read_csv("test.csv")
train_df = pd.read_csv("train.csv")
dev_df = pd.read_csv("dev.csv")
#设置数值
embed_size = 200 # how big is each word vector
max_features = 800 # how many unique words to use (i.e num rows in embedding vector)
maxlen = 128 # max number of words in a question to use
train_X = train_df["x_train"].values
val_X = dev_df["x_valid"].values
test_X = test_df["x_test"].values
## Tokenize the sentences
tokenizer = Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(train_X))
train_X = tokenizer.texts_to_sequences(train_X)
val_X = tokenizer.texts_to_sequences(val_X)
test_X = tokenizer.texts_to_sequences(test_X)
## Pad the sentences
train_X = pad_sequences(train_X, maxlen=maxlen)
val_X = pad_sequences(val_X, maxlen=maxlen)
test_X = pad_sequences(test_X, maxlen=maxlen)
#train_y = train_df['label'].values
#val_y = dev_df['label'].values
#test_y = test_df['label'].values
word_index = tokenizer.word_index
nb_words = len(word_index) # 200593
print(nb_words)
train_y = pd.get_dummies(train_df['label']).values
val_y = pd.get_dummies(dev_df['label']).values
test_y = pd.get_dummies(test_df['label']).values
word_index = tokenizer.word_index
nb_words = len(word_index) # 200593
print(nb_words)
print(train_X.shape)
print(val_X.shape)
print(test_X.shape)
print(train_y.shape)
print(val_y.shape)
print(test_y.shape)
#BiLSTM
def BiLSTM_model(test_X, test_y):
model = joblib.load("senticorp_results.pkl")
#评价模型
result = model.predict(test_X) # 预测样本属于每个类别的概率
y_pred = result.argmax(axis=1)
Y_test = test_y.argmax(axis=1)
print('accuracy %s' % accuracy_score(y_pred, Y_test))
target_names = ['1', '2', '3', '4', '5']
print(classification_report(Y_test, y_pred, target_names=target_names))
BiLSTM_model( test_X, test_y)
| StarcoderdataPython |
6531836 | from __future__ import absolute_import
from setuptools import setup
setup(
name='BoardServer',
version='0.1dev',
author='<NAME>',
author_email='<EMAIL>',
packages=['boardserver'],
scripts=['bin/board-serve.py'],
entry_points={'jrb_board.games': []},
install_requires=['gevent', 'six'],
license='LICENSE',
description="A generic board game socket server.",
)
| StarcoderdataPython |
3254156 | <filename>episodes/v1.1/src/rounding_property_test.py
from rounding import percentages
from hypothesis import given, settings, note
from rounding.test_helpers import portfolios
def noround(portfolio):
return {
k: round(v / sum(portfolio.values()) * 100, 4) for k, v in portfolio.items()
}
examples = 1_000
@settings(max_examples=examples)
@given(portfolios())
def test_rounding_should_add_up_to_100(portfolio):
pcts = percentages(portfolio)
total = sum(pcts.values())
assert total == 100
@settings(max_examples=examples)
@given(portfolios())
def test_percentages_should_be_all_positive(portfolio):
pcts = percentages(portfolio)
is_positive = [v > 0 for _, v in pcts.items()]
assert all(is_positive)
@settings(max_examples=examples)
@given(portfolios())
def test_error_should_be_minimal(portfolio):
pcts = percentages(portfolio)
raw = noround(portfolio)
errors = [pcts[k] - raw[k] for k in pcts.keys()]
is_reasonable = [abs(e) < 1 for e in errors]
assert all(is_reasonable)
portfolio_that_fails = {
"APPL": 625616.1693652052,
"BAR": 617938.9028649231,
"BAZ": 618342.162300139,
"BTC": 619632.897639023,
"ETH": 620706.1826511598,
"FOO": 617512.6030206202,
"SHIB": 6888900.851008516,
"TSLA": 618844.7209371484,
}
| StarcoderdataPython |
5072915 | <reponame>videoflow/videoflow-contrib
import os
from collections import defaultdict
from os import path as osp
import numpy as np
import torch
from scipy.interpolate import interp1d
def bbox_overlaps(boxes, query_boxes):
"""
Parameters
----------
- boxes: (N, 4) ndarray or tensor or variable
- query_boxes: (K, 4) ndarray or tensor or variable
Returns
-------
- overlaps: (N, K) overlap between boxes and query_boxes
"""
if isinstance(boxes, np.ndarray):
boxes = torch.from_numpy(boxes)
query_boxes = torch.from_numpy(query_boxes)
out_fn = lambda x: x.numpy() # If input is ndarray, turn the overlaps back to ndarray when return
else:
out_fn = lambda x: x
box_areas = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1)
query_areas = (query_boxes[:, 2] - query_boxes[:, 0] + 1) * (query_boxes[:, 3] - query_boxes[:, 1] + 1)
iw = (torch.min(boxes[:, 2:3], query_boxes[:, 2:3].t()) - torch.max(boxes[:, 0:1],
query_boxes[:, 0:1].t()) + 1).clamp(min=0)
ih = (torch.min(boxes[:, 3:4], query_boxes[:, 3:4].t()) - torch.max(boxes[:, 1:2],
query_boxes[:, 1:2].t()) + 1).clamp(min=0)
ua = box_areas.view(-1, 1) + query_areas.view(1, -1) - iw * ih
overlaps = iw * ih / ua
return out_fn(overlaps)
def interpolate(tracks):
interpolated = {}
for i, track in tracks.items():
interpolated[i] = {}
frames = []
x0 = []
y0 = []
x1 = []
y1 = []
for f, bb in track.items():
frames.append(f)
x0.append(bb[0])
y0.append(bb[1])
x1.append(bb[2])
y1.append(bb[3])
if len(frames) > 1:
x0_inter = interp1d(frames, x0)
y0_inter = interp1d(frames, y0)
x1_inter = interp1d(frames, x1)
y1_inter = interp1d(frames, y1)
for f in range(min(frames), max(frames) + 1):
bb = np.array([x0_inter(f), y0_inter(f), x1_inter(f), y1_inter(f)])
interpolated[i][f] = bb
else:
interpolated[i][frames[0]] = np.array([x0[0], y0[0], x1[0], y1[0]])
return interpolated
def bbox_transform_inv(boxes, deltas):
# Input should be both tensor or both Variable and on the same device
if len(boxes) == 0:
return deltas.detach() * 0
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = deltas[:, 0::4]
dy = deltas[:, 1::4]
dw = deltas[:, 2::4]
dh = deltas[:, 3::4]
pred_ctr_x = dx * widths.unsqueeze(1) + ctr_x.unsqueeze(1)
pred_ctr_y = dy * heights.unsqueeze(1) + ctr_y.unsqueeze(1)
pred_w = torch.exp(dw) * widths.unsqueeze(1)
pred_h = torch.exp(dh) * heights.unsqueeze(1)
pred_boxes = torch.cat(
[_.unsqueeze(2) for _ in [pred_ctr_x - 0.5 * pred_w,
pred_ctr_y - 0.5 * pred_h,
pred_ctr_x + 0.5 * pred_w,
pred_ctr_y + 0.5 * pred_h]], 2).view(len(boxes), -1)
return pred_boxes
def clip_boxes(boxes, im_shape):
"""
Clip boxes to image boundaries.
boxes must be tensor or Variable, im_shape can be anything but Variable
"""
if not hasattr(boxes, 'data'):
boxes_ = boxes.numpy()
boxes = boxes.view(boxes.size(0), -1, 4)
boxes = torch.stack([
boxes[:, :, 0].clamp(0, im_shape[1] - 1),
boxes[:, :, 1].clamp(0, im_shape[0] - 1),
boxes[:, :, 2].clamp(0, im_shape[1] - 1),
boxes[:, :, 3].clamp(0, im_shape[0] - 1)
], 2).view(boxes.size(0), -1)
return boxes
def get_center(pos):
x1 = pos[0, 0]
y1 = pos[0, 1]
x2 = pos[0, 2]
y2 = pos[0, 3]
return torch.Tensor([(x2 + x1) / 2, (y2 + y1) / 2]).cuda()
def get_width(pos):
return pos[0, 2] - pos[0, 0]
def get_height(pos):
return pos[0, 3] - pos[0, 1]
def make_pos(cx, cy, width, height):
return torch.Tensor([[
cx - width / 2,
cy - height / 2,
cx + width / 2,
cy + height / 2
]]).cuda()
def warp_pos(pos, warp_matrix):
p1 = torch.Tensor([pos[0, 0], pos[0, 1], 1]).view(3, 1)
p2 = torch.Tensor([pos[0, 2], pos[0, 3], 1]).view(3, 1)
p1_n = torch.mm(warp_matrix, p1).view(1, 2)
p2_n = torch.mm(warp_matrix, p2).view(1, 2)
return torch.cat((p1_n, p2_n), 1).view(1, -1).cuda() | StarcoderdataPython |
4889643 | <reponame>kmshin1397/ETSimulations
class ParticleSet:
"""Represents a set of particles of the same kind within a TEM-Simulator run.
This class directly corresponds to the particleset segments defined within a configuration file
for the TEM-Simulator software.
Attributes:
name: The name of the particle (within the TEM-Simulator configurations) which this
particle set consists of
source: The source MRC or PDB file of the particle associated with the particle set
coordinates: A list of XYZ coordinates within the sample volume at which the particles in
the set should be located
orientations: A list of ZXZ Euler angles (external) to rotate the particles in the set.
num_particles: The number of particles in the set
key: Flag to indicate that this is part of the particles of interest (the ones that will be
averaged), versus say just fake gold fiducials added to facilitate processing
Methods:
add_coordinate(coord): Append an XYZ coordinate to the list of particle coordinates
add_orientation(orientation): Append an ZXZ Euler angle rotation to the list of particle
orientations
add_source(source): Set the particle source file for the particle set
"""
def __init__(self, name, key=False):
self.name = name
self.source = None
self.coordinates_to_simulate = []
self.coordinates_to_save = []
self.orientations_to_simulate = []
self.orientations_to_save = []
self.noisy_orientations = []
self.num_particles = 0
# Flag to indicate that this is part of the particles of interest (the one that will be
# averaged)
self.key = key
def add_coordinate_to_simulate(self, coord):
"""Append an XYZ coordinate to the list of particle coordinates to simulate"""
self.coordinates_to_simulate.append(coord)
def add_coordinate_to_save(self, coord):
"""Append an XYZ coordinate to the list of particle coordinates to record"""
self.coordinates_to_save.append(coord)
def add_orientation_to_simulate(self, orientation, noisy_version=None):
"""
Append a ZXZ orientation to the list of particle orientations to simulate.
Args:
orientation: The true orientation to pass along to the TEM-Simulator
noisy_version: A noisy version of the orientation to record for processing purposes
Returns: None
"""
self.orientations_to_simulate.append(orientation)
if noisy_version:
self.noisy_orientations.append(noisy_version)
def add_orientation_to_save(self, orientation):
"""
Append a ZXZ orientation to the list of particle orientations to record. This can be used to save orientations
other than the ones sent to TEM-Simulator in the metadata
Args:
orientation: The orientation to save in the metadata
Returns: None
"""
self.orientations_to_save.append(orientation)
def add_source(self, source):
"""Set the particle source file for the particle set"""
self.source = source
| StarcoderdataPython |
3243289 | import unittest
#from extracttext import ExtractText
class Test_getTextFromUrl(unittest.TestCase):
""" extractText = ExtractText()
def test_ret_type(self):
textFromUrl = self.extractText.getTextFromUrl(
'https://sara-sabr.github.io/ITStrategy/home.html')
assert isinstance(
textFromUrl, str), 'getTextFromUrl did not return a string'
"""
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
177063 | <reponame>musyoku/chainer-gqn-playground<filename>gqn/mathematics.py
import math
def yaw(eye, center):
eye_x, eye_z = eye[0], eye[2]
center_x, center_z = center[0], center[2]
eye_direction = (center_x - eye_x, center_z - eye_z)
frontward_direction = (0, 1)
norm_eye = math.sqrt(eye_direction[0] * eye_direction[0] +
eye_direction[1] * eye_direction[1])
cos = (eye_direction[0] * frontward_direction[0] +
eye_direction[1] * frontward_direction[1]) / (norm_eye * 1.0)
rad = math.acos(cos)
if eye_direction[0] < 0:
rad = -rad
return rad
def pitch(eye, center):
eye_direction = (center[0] - eye[0], center[1] - eye[1],
center[2] - eye[2])
radius = math.sqrt(eye_direction[0]**2 + eye_direction[2]**2)
rad = math.atan(eye_direction[1] / (radius + 1e-16))
return rad | StarcoderdataPython |
1728801 | from typing import List
class ShellSort:
def sort(self, items: List) -> None:
size = len(items)
h = 1
while h < size / 3:
h = 3 * h + 1
while h >= 1:
for i in range(h, size):
j = i
while j >= h and items[j] < items[j-h]:
items[j-h], items[j] = items[j], items[j-h]
j -= h
h = h // 3
| StarcoderdataPython |
8063589 | <filename>mycfo/discussion_forum/page/discussion_forum/discussion_forum.py
from __future__ import unicode_literals
import frappe
from frappe.utils import cint
from frappe.website.render import resolve_path
from frappe import _
from frappe.website.render import clear_cache
from frappe.utils import today, cint, global_date_format, get_fullname, strip_html_tags,flt
from frappe.website.utils import find_first_image
from markdown2 import markdown
import datetime
import math
from mycfo.mycfo_utils import get_central_delivery
from frappe.utils import get_url
STANDARD_USERS = ("Guest", "Administrator")
no_cache = 1
no_sitemap = 1
@frappe.whitelist(allow_guest=True)
def get_data(category=None,user=None,assigned_to_me=None,page_no=0,limit=3):
"""Returns processed HTML page for a standard listing."""
conditions = []
assign_condition = []
todo_owner = ''
if page_no:
offset = (cint(page_no) * cint(limit))
else:
offset = 0
#next_start = cint(limit_start) + cint(limit_page_length)
if user:
conditions.append('t1.post_by="%s"' % frappe.db.escape(user))
if category:
conditions.append('t1.blog_category="%s"' % frappe.db.escape(category))
if assigned_to_me:
assign_condition.append('left join `tabToDo`t3 on t1.name = t3.reference_name ')
todo_owner = ' and t3.owner="%s" '% frappe.db.escape(frappe.session.user)
limit_query = " limit %(start)s offset %(page_len)s"%{"start": limit, "page_len":offset }
query = """\
select
distinct t1.name,t1.title, t1.blog_category, t1.published_on,
t1.published_on as creation,
ifnull(t1.intro, t1.content) as content,
t2.employee_name,t1.post_by,
(select count(name) from `tabComment` where
comment_doctype='Discussion Topic' and comment_docname=t1.name and comment_type="Comment") as comments
from `tabDiscussion Topic` t1 left join
`tabEmployee` t2
on t1.post_by = t2.name
{assign_condition}
where ifnull(t1.published,0)=1
{condition} {to_do_and}
order by published_on desc, name asc""".format(
condition= (" and " + " and ".join(conditions)) if conditions else "",
to_do_and = todo_owner,
assign_condition = (" and ".join(assign_condition)) if assign_condition else "",
)
posts = frappe.db.sql(query+ limit_query, as_dict=1)
for post in posts:
post.published = global_date_format(post.creation)
post.content = strip_html_tags(post.content[:140])
post.assigned = check_if_assigned(post)
if not post.comments:
post.comment_text = _('No comments yet')
elif post.comments==1:
post.comment_text = _('1 comment')
else:
post.comment_text = _('{0} comments').format(str(post.comments))
total_records = get_total_topics(query)
paginate = True if total_records > limit else False
total_pages = math.ceil(total_records/flt(limit))
return posts,total_pages,int(page_no)+1,paginate if posts else {}
def check_if_assigned(post):
assigned = frappe.db.get_value("ToDo",
{"owner":frappe.session.user,"reference_type":"Discussion Topic","reference_name":post.name},"name")
return 1 if assigned else 0
def get_total_topics(query):
executable_query = frappe.db.sql(query, as_list=1)
return len([topic for topic in executable_query if topic])
# def get_total_topics(conditions):
# condition = (" and " + " and ".join(conditions)) if conditions else ""
# return frappe.db.sql("""select count(*) from `tabDiscussion Topic` as t1
# where ifnull(t1.published,0)=1 {0}""".format(condition),as_list=1)[0][0] or 0
@frappe.whitelist(allow_guest=True)
def get_post(topic_name):
topic = frappe.get_doc("Discussion Topic",topic_name).as_dict()
topic.update({
"comment_list":get_comments(topic_name),
"employee_name":frappe.db.get_value("Employee",topic.post_by,"employee_name")
})
return topic
@frappe.whitelist(allow_guest=True)
def get_comments(topic_name,page_no=0,is_sorted='false', limit=3):
if is_sorted=="true":
comment_list = get_sorted_comment_list("Discussion Topic",topic_name,page_no,limit)
else:
comment_list = get_comment_list("Discussion Topic",topic_name,page_no,limit)
total_records = get_comment_count(topic_name)
paginate = True if total_records > limit else False
total_pages = math.ceil(total_records/flt(limit))
page_no = int(page_no) + 1
for comment in comment_list:
ratings = get_rating_details(comment)
comment["creation"] = comment.creation.strftime('%d-%m-%Y,%I:%M %p')
comment.update({
"average_rating":ratings.get("avg",0.0),
"ratings":ratings.get("ratings",0),
"user_rating":ratings.get("user_rating"),
"no_of_users":ratings.get("number_of_users"),
"get_attachments": get_attachments("Comment",comment['name']) ,
"comment": comment['comment'].replace("\n","<br>")
})
print frappe.request.url
return comment_list,total_pages,page_no,paginate,is_sorted
@frappe.whitelist(allow_guest=True)
def get_attachments(dt, dn):
print "in atachment"
return frappe.get_all("File", fields=["name", "file_name", "file_url"],
filters = {"attached_to_name": dn, "attached_to_doctype": dt})
@frappe.whitelist(allow_guest=True)
def sort_comments(topic_name,page_no=0,limit=3):
comment_list = get_sorted_comment_list("Discussion Topic",topic_name,page_no,limit)
total_records = get_comment_count(topic_name)
paginate = True if total_records > limit else False
total_pages = math.ceil(total_records/flt(limit))
page_no = int(page_no) + 1
for comment in comment_list:
ratings = get_rating_details(comment)
comment["creation"] = comment.creation.strftime('%d-%m-%Y,%I:%M %p')
comment.update({
"average_rating":ratings.get("avg",0.0),
"ratings":ratings.get("ratings",0),
"user_rating":ratings.get("user_rating"),
"no_of_users":ratings.get("number_of_users"),
"get_attachments": get_attachments("Comment",comment['name'])
})
comment_list.sort(key=lambda x: x['average_rating'],reverse=True)
return comment_list,total_pages,page_no,paginate
def get_comment_count(topic_name):
return frappe.get_list("Comment",fields=["count(*)"],
filters={"comment_type":"Comment","comment_docname":topic_name},as_list=1,ignore_permissions=1)[0][0] or 0
def get_sorted_comment_list(doctype, name,page_no,limit):
if page_no:
offset = (cint(page_no) * cint(limit))
else:
offset = 0
return frappe.db.sql("""select
name,comment, comment_by_fullname, creation, comment_by, name as cname,
CASE WHEN 5!=6 then (select avg(ratings) from `tabTopic Ratings` where comment=cname)
ELSE " "
END AS ratings
from `tabComment` where comment_doctype=%s
and ifnull(comment_type, "Comment")="Comment"
and comment_docname=%s order by ratings desc limit %s offset %s""",(doctype,name,limit,offset), as_dict=1) or []
def get_comment_list(doctype, name,page_no,limit):
if page_no:
offset = (cint(page_no) * cint(limit))
else:
offset = 0
return frappe.db.sql("""select
name,comment, comment_by_fullname, creation, comment_by
from `tabComment` where comment_doctype=%s
and ifnull(comment_type, "Comment")="Comment"
and comment_docname=%s order by creation desc limit %s offset %s""",(doctype,name,limit,offset), as_dict=1) or []
def get_rating_details(comment):
ratings = {}
if comment.get("name"):
comment = comment.get("name")
ratings["avg"] = round(frappe.get_list("Topic Ratings", fields=["ifnull(avg(ratings),0.0)"],
filters={ "comment":comment}, as_list=True)[0][0],2)
ratings["ratings"] = frappe.db.sql("""select count(*) from
`tabTopic Ratings` where comment='{0}'""".format(comment),as_list=1)[0][0]
ratings["user_rating"] = frappe.db.get_value("Topic Ratings",{"comment":comment,"user":frappe.session.user},"ratings")
ratings['number_of_users'] = frappe.db.sql("""select count(distinct user) from `tabTopic Ratings` where comment = '{0}'""".format(comment),as_list=1)[0][0]
return ratings
@frappe.whitelist(allow_guest=True)
def add_comment(comment,topic_name):
print comment,"comment"
import datetime
frappe.get_doc({
"doctype":"Comment",
"comment_by": frappe.session.user,
"comment_doctype":"Discussion Topic",
"comment_docname": topic_name,
"comment": comment,
"comment_type":"Comment"
}).insert(ignore_permissions=True)
@frappe.whitelist(allow_guest=True)
def add_rating(rating,comment,topic_name):
comment_doc = frappe.get_doc("Comment",comment)
if(comment_doc.comment_by==frappe.session.user):
frappe.throw("You can not rate your own comments")
import datetime
frappe.get_doc({
"doctype":"Topic Ratings",
"user": frappe.session.user,
"comment":comment,
"ratings":flt(rating,1)
}).insert(ignore_permissions=True)
ratings = get_rating_details({"name":comment})
comments = {}
comments.update({
"average_rating":ratings.get("avg",0.0),
"ratings":ratings.get("ratings",0),
"user_rating":ratings.get("user_rating")
})
return comments
@frappe.whitelist()
def assign_topic(args=None):
"""add in someone's to do list
args = {
"assign_to": ,
"doctype": ,
"name": ,
"description":
}
"""
if not args:
args = frappe.local.form_dict
from frappe.utils import nowdate
emp_list = eval(args['assign_to'])
for employee in emp_list:
d = frappe.get_doc({
"doctype":"ToDo",
"owner": employee,
"reference_type": args['doctype'],
"reference_name": args['name'],
"description": args.get('description'),
"priority": args.get("priority", "Medium"),
"status": "Open",
"date": args.get('date', nowdate()),
"assigned_by": frappe.session.user,
}).insert(ignore_permissions=True)
# set assigned_to if field exists
if frappe.get_meta(args['doctype']).get_field("assigned_to"):
for employee in emp_list:
frappe.db.set_value(args['doctype'], args['name'], "assigned_to", employee)
# notify
if not args.get("no_notification"):
from frappe.desk.form.assign_to import notify_assignment
notify_assignment(d.assigned_by, d.owner, d.reference_type, d.reference_name, action='ASSIGN', description=args.get("description"), notify=1)
send_mail_to_mycfo_users(emp_list, args["name"])
return
# def user_query(doctype, txt, searchfield, start, page_len, filters):
# from frappe.desk.reportview import get_match_cond
# txt = "%{}%".format(txt)
# return frappe.db.sql("""select name, concat_ws(' ', first_name, middle_name, last_name)
# from `tabUser`
# where ifnull(enabled, 0)=1
# and docstatus < 2
# and name not in ({standard_users})
# and user_type != 'Website User'
# and name in (select parent from `tabUserRole` where role='Employee')
# and ({key} like %s
# or concat_ws(' ', first_name, middle_name, last_name) like %s)
# {mcond}
# order by
# case when name like %s then 0 else 1 end,
# case when concat_ws(' ', first_name, middle_name, last_name) like %s
# then 0 else 1 end,
# name asc
# limit %s, %s""".format(standard_users=", ".join(["%s"]*len(STANDARD_USERS)),
# key=searchfield, mcond=get_match_cond(doctype)),
# tuple(list(STANDARD_USERS) + [txt, txt, txt, txt, start, page_len]))
def users_query(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select usr.name, usr.first_name
from `tabUser` usr
where usr.name != '{0}'
and usr.name != '{1}'
and usr.name not in ( select owner from `tabToDo`
where reference_type= "Discussion Topic" and reference_name= "{2}" and status="Open")
and (usr.name like '{txt}'
or usr.first_name like '{txt}' )
limit 20
""".format(filters['doc'],frappe.session.user,filters['doc_name'],txt= "%%%s%%" % txt),as_list=1)
@frappe.whitelist(allow_guest=True)
def get_categories():
return frappe.get_list("Discussion Category", fields=["name","title"],ignore_permissions=1)
def send_mail_to_mycfo_users(email_ids, title_name):
title, category, owner = frappe.db.get_value("Discussion Topic", title_name, ["title", "blog_category", "owner"])
template = "/templates/discussion_forum_templates/topic_assign_notification.html"
owner = frappe.db.get_value("User", owner, [" concat(first_name, ' ', last_name) "])
assignee = frappe.db.get_value("User", frappe.session.user, ["concat(first_name, ' ', ifnull(last_name,'') )"])
args = {"assignee" :assignee, "subject":title, "category":category, "host_url":get_url(), "owner":owner}
frappe.sendmail(recipients=email_ids, sender=None, subject="Discussion Topic Assigned to You",
message=frappe.get_template(template).render(args), cc=get_central_delivery())
| StarcoderdataPython |
5106953 | """
Test cases for the Comparisons class over the Chart elements
"""
import numpy as np
from holoviews import Dimension, Curve, Bars, Histogram, Scatter, Points, VectorField
from holoviews.element.comparison import ComparisonTestCase
class CurveComparisonTest(ComparisonTestCase):
def setUp(self):
"Variations on the constructors in the Elements notebook"
self.curve1 = Curve([(0.1*i, np.sin(0.1*i)) for i in range(100)])
self.curve2 = Curve([(0.1*i, np.sin(0.1*i)) for i in range(101)])
def test_curves_equal(self):
self.assertEqual(self.curve1, self.curve1)
def test_curves_unequal(self):
try:
self.assertEqual(self.curve1, self.curve2)
except AssertionError as e:
if not str(e).startswith("Curve not of matching length."):
raise self.failureException("Curve mismatch error not raised.")
class BarsComparisonTest(ComparisonTestCase):
def setUp(self):
"Variations on the constructors in the Elements notebook"
key_dims1=[Dimension('Car occupants')]
key_dims2=[Dimension('Cyclists')]
value_dims1=['Count']
self.bars1 = Bars([('one',8),('two', 10), ('three', 16)],
kdims=key_dims1, vdims=value_dims1)
self.bars2 = Bars([('one',8),('two', 10), ('three', 17)],
kdims=key_dims1, vdims=value_dims1)
self.bars3 = Bars([('one',8),('two', 10), ('three', 16)],
kdims=key_dims2, vdims=value_dims1)
def test_bars_equal_1(self):
self.assertEqual(self.bars1, self.bars1)
def test_bars_equal_2(self):
self.assertEqual(self.bars2, self.bars2)
def test_bars_equal_3(self):
self.assertEqual(self.bars3, self.bars3)
def test_bars_unequal_1(self):
try:
self.assertEqual(self.bars1, self.bars2)
except AssertionError as e:
if not 'not almost equal' in str(e):
raise Exception('Bars mismatched data error not raised. %s' % e)
def test_bars_unequal_keydims(self):
try:
self.assertEqual(self.bars1, self.bars3)
except AssertionError as e:
if not str(e) == 'Dimension names mismatched: Car occupants != Cyclists':
raise Exception('Bars key dimension mismatch error not raised.')
class HistogramComparisonTest(ComparisonTestCase):
def setUp(self):
"Variations on the constructors in the Elements notebook"
np.random.seed(1)
frequencies1, edges1 = np.histogram([np.random.normal() for i in range(1000)], 20)
self.hist1 = Histogram(frequencies1, edges1)
np.random.seed(2)
frequencies2, edges2 = np.histogram([np.random.normal() for i in range(1000)], 20)
self.hist2 = Histogram(frequencies2, edges2)
self.hist3 = Histogram(frequencies1, edges2)
self.hist4 = Histogram(frequencies2, edges1)
def test_histograms_equal_1(self):
self.assertEqual(self.hist1, self.hist1)
def test_histograms_equal_2(self):
self.assertEqual(self.hist2, self.hist2)
def test_histograms_unequal_1(self):
try:
self.assertEqual(self.hist1, self.hist2)
except AssertionError as e:
if not str(e).startswith("Histogram edges not almost equal to 6 decimals"):
raise self.failureException("Histogram edge data mismatch error not raised.")
def test_histograms_unequal_2(self):
try:
self.assertEqual(self.hist1, self.hist3)
except AssertionError as e:
if not str(e).startswith("Histogram edges not almost equal to 6 decimals"):
raise self.failureException("Histogram edge data mismatch error not raised.")
def test_histograms_unequal_3(self):
try:
self.assertEqual(self.hist1, self.hist4)
except AssertionError as e:
if not str(e).startswith("Histogram values not almost equal to 6 decimals"):
raise self.failureException("Histogram value data mismatch error not raised.")
class ScatterComparisonTest(ComparisonTestCase):
def setUp(self):
"Variations on the constructors in the Elements notebook"
self.scatter1 = Scatter([(1, i) for i in range(20)])
self.scatter2 = Scatter([(1, i) for i in range(21)])
self.scatter3 = Scatter([(1, i*2) for i in range(20)])
def test_scatter_equal_1(self):
self.assertEqual(self.scatter1, self.scatter1)
def test_scatter_equal_2(self):
self.assertEqual(self.scatter2, self.scatter2)
def test_scatter_equal_3(self):
self.assertEqual(self.scatter3, self.scatter3)
def test_scatter_unequal_data_shape(self):
try:
self.assertEqual(self.scatter1, self.scatter2)
except AssertionError as e:
if not str(e).startswith("Scatter not of matching length."):
raise self.failureException("Scatter data mismatch error not raised.")
def test_scatter_unequal_data_values(self):
try:
self.assertEqual(self.scatter1, self.scatter3)
except AssertionError as e:
if not str(e).startswith("Scatter not almost equal to 6 decimals"):
raise self.failureException("Scatter data mismatch error not raised.")
class PointsComparisonTest(ComparisonTestCase):
def setUp(self):
"Variations on the constructors in the Elements notebook"
self.points1 = Points([(1, i) for i in range(20)])
self.points2 = Points([(1, i) for i in range(21)])
self.points3 = Points([(1, i*2) for i in range(20)])
def test_points_equal_1(self):
self.assertEqual(self.points1, self.points1)
def test_points_equal_2(self):
self.assertEqual(self.points2, self.points2)
def test_points_equal_3(self):
self.assertEqual(self.points3, self.points3)
def test_points_unequal_data_shape(self):
try:
self.assertEqual(self.points1, self.points2)
except AssertionError as e:
if not str(e).startswith("Points not of matching length."):
raise self.failureException("Points count mismatch error not raised.")
def test_points_unequal_data_values(self):
try:
self.assertEqual(self.points1, self.points3)
except AssertionError as e:
if not str(e).startswith("Points not almost equal to 6 decimals"):
raise self.failureException("Points data mismatch error not raised.")
class VectorFieldComparisonTest(ComparisonTestCase):
def setUp(self):
"Variations on the constructors in the Elements notebook"
x,y = np.mgrid[-10:10,-10:10] * 0.25
sine_rings = np.sin(x**2+y**2)*np.pi+np.pi
exp_falloff1 = 1/np.exp((x**2+y**2)/8)
exp_falloff2 = 1/np.exp((x**2+y**2)/9)
self.vfield1 = VectorField([x,y,sine_rings, exp_falloff1])
self.vfield2 = VectorField([x,y,sine_rings, exp_falloff2])
def test_vfield_equal_1(self):
self.assertEqual(self.vfield1, self.vfield1)
def test_vfield_equal_2(self):
self.assertEqual(self.vfield2, self.vfield2)
def test_vfield_unequal_1(self):
try:
self.assertEqual(self.vfield1, self.vfield2)
except AssertionError as e:
if not str(e).startswith("VectorField not almost equal to 6 decimals"):
raise self.failureException("VectorField data mismatch error not raised.")
| StarcoderdataPython |
9610651 | '''
This file is part of Semantic SLAM
License: MIT
Author: <NAME>
Email: <EMAIL>
Web: https://1989Ryan.github.io/
'''
from pspnet import *
from std_msgs.msg import String
from sensor_msgs.msg import Image
from map_generator.msg import frame
from cv_bridge import CvBridge
import numpy as np
import tensorflow as tf
import rospy
import os
import sys
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
'''
Real time image segmentation, using PSPNet101. Dummy publisher which is Really Really Slow.
'''
class Semantic_Imformation_Publisher():
'''
To publish the semantic infomation which contains the categories of each pixel.
'''
def __init__(self):
'''
node initialization
'''
self._cv_bridge = CvBridge()
#self._session = tf.Session()
self.pspnet = PSPNet101(nb_classes=19, input_shape=(713, 713),
weights='pspnet101_cityscapes')
#init = tf.global_variables_initializer()
#self._session.run(init)
self.graph = tf.get_default_graph()
self._sub = rospy.Subscriber('image', Image, self.callback, queue_size = 1000)
self._pub = rospy.Publisher('/result', frame, queue_size = 1)
def callback(self, image_msg):
'''call back funcion, which will send the image and category of each pixel'''
cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "rgb8")
h_ori, w_ori = cv_image.shape[:2]
with self.graph.as_default():
probs = self.pspnet.model.predict(self.img_proc(cv_image))[0]
if cv_image.shape[0:1] != (713,713): # upscale prediction if necessary
h, w = probs.shape[:2]
probs = ndimage.zoom(probs, (1. * h_ori / h, 1. * w_ori / w, 1.),
order=1, prefilter=False)
rospy.loginfo("running")
cm = np.argmax(probs, axis=2).astype(np.uint8)
#print(probs)
#print(cm)
category = self._cv_bridge.cv2_to_imgmsg(cm)
probs = self._cv_bridge.cv2_to_imgmsg(probs)
f = frame()
f.image = image_msg
f.category = category
self._pub.publish(f)
def img_proc(self, img):
# Preprocess the image
img = misc.imresize(img, (713,713))
img = img - DATA_MEAN
img = img[:, :, ::-1] # RGB => BGR
img = img.astype('float32')
data = np.expand_dims(img, 0)
return data
def main(self):
rospy.spin()
if __name__ == '__main__':
rospy.init_node('Semantic_Information_Publisher')
tensor = Semantic_Imformation_Publisher()
tensor.main()
| StarcoderdataPython |
11388837 | <filename>skforecast/ForecasterAutoregCustom/tests/test_predict_interval.py
import numpy as np
import pandas as pd
from skforecast.ForecasterAutoregCustom import ForecasterAutoregCustom
from sklearn.linear_model import LinearRegression
def create_predictors(y):
'''
Create first 5 lags of a time series.
'''
lags = y[-1:-6:-1]
return lags
def test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_1_in_sample_residuals_is_True():
'''
Test output when regressor is LinearRegression and one step ahead is predicted
using in sample residuals.
'''
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
forecaster.fit(y=pd.Series(np.arange(10)))
forecaster.in_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10)
expected = pd.DataFrame(
np.array([[10., 20., 20.]]),
columns = ['pred', 'lower_bound', 'upper_bound'],
index = pd.RangeIndex(start=10, stop=11, step=1)
)
results = forecaster.predict_interval(steps=1, in_sample_residuals=True, n_boot=2)
pd.testing.assert_frame_equal(results, expected)
def test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_2_in_sample_residuals_is_True():
'''
Test output when regressor is LinearRegression and two step ahead is predicted
using in sample residuals.
'''
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
forecaster.fit(y=pd.Series(np.arange(10)))
forecaster.in_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10)
expected = pd.DataFrame(
np.array([[10., 20., 20.],
[11., 23., 23.]]),
columns = ['pred', 'lower_bound', 'upper_bound'],
index = pd.RangeIndex(start=10, stop=12, step=1)
)
results = forecaster.predict_interval(steps=2, in_sample_residuals=True, n_boot=2)
pd.testing.assert_frame_equal(results, expected)
def test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_1_in_sample_residuals_is_False():
'''
Test output when regressor is LinearRegression and one step ahead is predicted
using out sample residuals.
'''
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
forecaster.fit(y=pd.Series(np.arange(10)))
forecaster.out_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10)
expected = pd.DataFrame(
np.array([[10., 20., 20.]]),
columns = ['pred', 'lower_bound', 'upper_bound'],
index = pd.RangeIndex(start=10, stop=11, step=1)
)
results = forecaster.predict_interval(steps=1, in_sample_residuals=False, n_boot=2)
pd.testing.assert_frame_equal(results, expected)
def test_predict_interval_output_when_forecaster_is_LinearRegression_steps_is_2_in_sample_residuals_is_False():
'''
Test output when regressor is LinearRegression and two step ahead is predicted
using out sample residuals.
'''
forecaster = ForecasterAutoregCustom(
regressor = LinearRegression(),
fun_predictors = create_predictors,
window_size = 5
)
forecaster.fit(y=pd.Series(np.arange(10)))
forecaster.out_sample_residuals = np.full_like(forecaster.in_sample_residuals, fill_value=10)
expected = pd.DataFrame(
np.array([[10., 20., 20.],
[11., 23., 23.]]),
columns = ['pred', 'lower_bound', 'upper_bound'],
index = pd.RangeIndex(start=10, stop=12, step=1)
)
results = forecaster.predict_interval(steps=2, in_sample_residuals=False)
pd.testing.assert_frame_equal(results, expected) | StarcoderdataPython |
5193536 | from flask import Flask, jsonify, request
app = Flask(__name__)
@app.route('/echo', methods=['POST'])
def echo():
body = request.get_json()
return jsonify(body), 200
@app.route('/', methods=['GET'])
def get():
return 'Hellow get method', 200
| StarcoderdataPython |
6552326 | import random as r
from random import randint
import numpy as np
class Structures: # This class generates stochatic initial, transition, and observation matrices used in model.py
# columnxrows
N = 2
M = 3
A = [[0] * N] * N
B = [[0] * M] * N
pi = [[0] * N]
# Scales for randomizing stochastic matricies.
scale1 = 1.0 / N
scale2 = 1.0 / M
# Scales for 1/N for A structure
min1 = scale1 - 0.01
max1 = scale1 + 0.01
# Scales for 1/M for B structure
min2 = scale2 - 0.01
max2 = scale2 + 0.01
#Random seed uniform distirbution range
begin = 0.0
end = 1.0
piRowSum = 0
aRowSums = []
bRowSums = []
rowSum = 0
# Generate A NxN matrix of random numbers not yet stochastic
for i in range(0, len(A)):
for j in range(len(A[i])):
randomValue = min1 + (max1 - min1) * np.random.normal()
A[i][j] = randomValue
rowSum += A[i][j]
aRowSums.append(rowSum)
rowSum = 0
# Generate B NxM matrix of random numbers not yet stochastic
for i in range(0, len(B)):
for j in range(len(B[i])):
randomValue = min2 + (max2 - min2) * np.random.normal()
B[i][j] = randomValue
rowSum += B[i][j]
bRowSums.append(rowSum)
rowSum = 0
# Generate Pi 1xN matrix of random numbers not yet stochastic
for i in range(0, len(pi)):
randomValue = min1 + (max1 - min1) * np.random.normal()
pi.append(randomValue)
pi[i] = randomValue
for i in range(len(pi)):
piRowSum += pi[i]
misCalc = 0 #How off the sum of a row is from 1
reCalc = 0 #How much needs to be distributed or subtracted from each element
# Pi adjustment for stochasticity
if piRowSum > 1:
misCalc = piRowSum - 1
reCalc = misCalc / len(pi)
for i in range(0, len(pi)):
pi[i] -= reCalc
else:
misCalc = 1 - piRowSum
reCalc = misCalc / len(pi)
for i in range(0, len(pi)):
pi[i] += reCalc
# Reseting values
misCalc = 0 #How off the sum of a row is from 1
reCalc = 0 #How much needs to be distributed or subtracted from each element
# A adjustment for stochasticity
for i in range(0, len(A)):
if aRowSums[i] > 1:
misCalc = aRowSums[i] - 1
reCalc = misCalc / len(A[i])
for j in range(0, len(A[i])):
A[i][j] -= reCalc
else:
misCalc = 1 - aRowSums[i]
reCalc = misCalc / (len(A[i]))
for j in range(0, len(A[i])):
A[i][j] += reCalc
# Reseting values
misCalc = 0 #How off the sum of a row is from 1
reCalc = 0 #How much needs to be distributed or subtracted from each element
# B adjustment for stochasticity
for i in range(0, len(B)):
if bRowSums[i] > 1:
misCalc = bRowSums[i] - 1
reCalc = misCalc / len(B[i])
for j in range(0, len(B[i])):
B[i][j] -= reCalc
else:
misCalc = 1 - bRowSums[i]
reCalc = misCalc / (len(B[i]))
for j in range(0, len(B[i])):
B[i][j] += reCalc
# Reseting values
misCalc = 0 #How off the sum of a row is from 1
reCalc = 0 #How much needs to be distributed or subtracted from each element
################################### DEBUGGER ###################################
## Output raw generated random values to compare with stochastic checker
# print("--- RAW RANDOMLY GENERATED STRUCTURES --- \n A \n---", Structures.A, "\n ---")
# print("--- RAW RANDOMLY GENERATED STRUCTURES --- \n B \n---", Structures.B, "\n ---")
# print("--- RAW RANDOMLY GENERATED STRUCTURES --- \n Pi \n---", Structures.pi, "\n ---")
# Stochastoc check for each M row in all three output structures | if Raw Sum ~ 1, we're good!
# print("--- RAW SUM OF GENERATED STRUCTURES --- \n Pi \n---", Structures.piRowSum)
# print("--- RAW SUM OF GENERATED STRUCTURES --- \n A \n---", Structures.aRowSums)
# print("--- RAW SUM OF GENERATED STRUCTURES --- \n B \n---", Structures.bRowSums)
# Checking initial state, transition, and observation matricies
# print("------ SHAPE: A MATRIX ------ \n", np.shape(Structures.A),"\n ------------------" )
# print("------ LENGTH: A MATRIX ------ \n", len(Structures.A),"\n ------------------" )
# print("------ SHAPE: B MATRIX ------ \n", np.shape(Structures.B),"\n ------------------" )
# print("------ LENGTH: B MATRIX ------ \n", len(Structures.B),"\n ------------------" ) | StarcoderdataPython |
98243 | import minstrel.db
import minstrel.tracks
class TrackStorage():
def __init__(self, name, track_locations, uuid, created=None, deleted=None, updated=None):
self.created = created
self.name = name
self.track_locations = track_locations
self.updated = updated
self.uuid = uuid
def get_tracks(self):
return minstrel.db.all_tracks_for_storage(self.uuid)
def get(name):
storage = minstrel.db.get_track_storage(name=name)
return TrackStorage(track_locations=[], **storage)
def get_all():
tracks = minstrel.tracks.get_all_locations()
results = [TrackStorage(
name = row['name'],
track_locations = [track for track in tracks if track.store == row['uuid']],
uuid = row['uuid'],
) for row in minstrel.db.all_track_storages()]
return results
| StarcoderdataPython |
8123211 | <reponame>ParikhKadam/pyrwr<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from utils import iterator
from .pyrwr import PyRWR
class PageRank(PyRWR):
def __init__(self):
pass
def compute(self, c=0.15, epsilon=1e-6, max_iters=100,
handles_deadend=True):
'''
Compute the PageRank score vector (global ranking)
inputs
c : float
restart probability
epsilon : float
error tolerance for power iteration
max_iters : int
maximum number of iterations for power iteration
handles_deadend : bool
if true, it will handle the deadend issue in power iteration
otherwise, it won't, i.e., no guarantee for sum of RWR scores
to be 1 in directed graphs
outputs
r : ndarray
PageRank score vector
'''
self.normalize()
# q = np.ones((self.n, 1))
q = np.ones(self.n)
q = q / self.n
r, residuals = iterator.iterate(self.nAT, q, c, epsilon,
max_iters, handles_deadend)
return r
| StarcoderdataPython |
9628190 | <reponame>shouxian92/sqlite-diff-tool
#! Python 3.7
import sqlite3
import os
from datetime import datetime
from sqlite3 import Error
DEBUG = False
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
"""
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return None
def get_common_tables(old_conn, new_conn):
""" a comparison function which checks for tables with the same name
:param
old_conn: the connection to the old db
new_conn: the connection to the new db
:return: A list of table names
"""
list_table_query = "select name from sqlite_master where type = 'table'"
old_conn.row_factory = lambda cursor, row: row[0]
new_conn.row_factory = lambda cursor, row: row[0]
old_cursor = old_conn.cursor()
new_cursor = new_conn.cursor()
old_tables = old_cursor.execute(list_table_query).fetchall()
new_tables = new_cursor.execute(list_table_query).fetchall()
# no need for any fancy optimized algorithms since this is always O(n). list has no repeated items
return [value for value in new_tables if value in old_tables]
def format_sqlite_value(in_value):
""" will add quotes around it if its a string or convert the string to NULL if it is a python None value
:param
in_value: an object that is to be returned with or without quotes. NULL if None
"""
if type(in_value) in (int, float, bool):
return str(in_value)
elif in_value is None:
return "NULL"
else:
# escape strings with single-quotes
in_value = in_value.replace("'", "''")
return "'{}'".format(in_value)
def append_eql_condition(in_value, update = False):
""" appends an equal condition to a string but the function will
add quotes around it if its a string
:param
in_value: an object that is to be converted to the equals clause
"""
if in_value is None and not update:
return " is " + format_sqlite_value(in_value)
else:
return "=" + format_sqlite_value(in_value)
def get_primary_key(conn, table, columns):
""" attempts to reverse lookup the primary key by querying the table using the first column
and iteratively adding the columns that comes after it until the query returns a
unique row in the table.
:param
conn: an SQLite connection object
table: a string denoting the table name to query
columns: a list containing column names of the table
:return: the list of columns which makes up the primary key
"""
select_row_query = "SELECT * FROM `{}`".format(table)
count_row_query = "SELECT COUNT(*) FROM `{}` WHERE `{}`"
primary_key = []
row = conn.execute(select_row_query).fetchone()
if row is not None:
for i, column in enumerate(columns):
if i == 0:
count_row_query = count_row_query.format(table, column)
else:
count_row_query += " AND `{}`".format(column)
count_row_query += append_eql_condition(row[i])
primary_key.append(column)
count = conn.execute(count_row_query).fetchone()
if count[0] == 1:
return primary_key
# if no primary key was found then the primary key is made up of all columns
return columns
def equal_stmt_list_generator(columns, data, update = False):
statement_list = []
for col_idx, col_value in enumerate(data):
col_name = columns[col_idx]
statement_list.append('`{}`'.format(col_name) + append_eql_condition(col_value, update))
return statement_list
def generate_insert_query(table, pk, data, column = ''):
""" generates a UPDATE query with WHERE clauses given a table name and columns
and sets it to the given data
:params
table: the name of the table
pk: the primary key data for the table
data: the data that does not belong to the primary key
column: column names to generate for the insert statement. this is optional since some times we are 100% sure of the column order
:return: an INSERT statement string
"""
insert_query_template = "INSERT INTO `{}` {} VALUES ({});"
if data is None:
data_list = [format_sqlite_value(key) for key in pk]
else:
data_list = [format_sqlite_value(key) for key in pk] + [format_sqlite_value(d) for d in data]
return insert_query_template.format(table, column, ', '.join(data_list))
def generate_update_query(table, where_cols, where_data, update_cols, update_data):
""" generates a UPDATE query with WHERE clauses given a table name and columns
and sets it to the given data
:params
table: the name of the table
where_cols: the column names to use for adding WHERE clause
where_data: the data matching the where columns
update_cols: the columns names to use for setting the SET values
update_data: the data used to set the values of the update SET
:return: a UPDATE statement with WHERE clauses
"""
update_query_template = "UPDATE `{}` SET {} WHERE {};"
update_set = equal_stmt_list_generator(update_cols, update_data, True)
where_clauses = equal_stmt_list_generator(where_cols, where_data)
return update_query_template.format(table, ', '.join(update_set), ' AND '.join(where_clauses))
def generate_del_query(table, where_cols, row_data):
""" generates a DELETE query with WHERE clauses given a table name and columns
:params
table: the name of the table
where_cols: the column names to use for adding WHERE clause
row_data: the data matching the where columns
:return: a DELETE statement with WHERE clauses
"""
if len(where_cols) != len(row_data):
raise Exception("columns and row data are of different lengths")
delete_query_template = "DELETE FROM `{}` WHERE {};"
where_clauses = equal_stmt_list_generator(where_cols, row_data)
return delete_query_template.format(table, ' AND '.join(where_clauses))
def get_table_data_diff(old_conn, new_conn, old_db_filename, new_db_filename):
""" compares tables which exist in both DBs and checks to see
if there are any differences in rows between the two.
:param
old_conn: the connection to the old db
new_conn: the connection to the new db
"""
diff_statements = []
old_cursor = old_conn.cursor()
new_cursor = new_conn.cursor()
common_tables = get_common_tables(old_conn, new_conn)
pragma_table_info_query = "PRAGMA table_info(`{}`)"
select_column_rows_query = "SELECT {} FROM `{}`"
select_all_rows_query = "SELECT * FROM `{}`"
#attach_query = "ATTACH DATABASE `{}` AS `{}`".format(new_db_filename, "new")
#old_cursor.execute(attach_query)
equal = 0
notequal = 1
for table in common_tables:
old_schema = old_cursor.execute(pragma_table_info_query.format(table)).fetchall()
new_schema = new_cursor.execute(pragma_table_info_query.format(table)).fetchall()
if DEBUG:
print("### %s ###" % table)
# get difference in rows
if (old_schema == new_schema):
columns = [col[1] for col in old_schema]
orig_pk = get_primary_key(old_cursor, table, columns)
data_cols_array = list(set(columns) - set(orig_pk))
all_cols_are_pk = len(data_cols_array) == 0
new_insert_order = orig_pk.copy()
new_insert_order.extend(data_cols_array)
new_insert_order = "(" + ", ".join("`{}`".format(c) for c in new_insert_order) + ")"
if DEBUG:
print("primary: {} ({}), others: {} ({})".format(orig_pk, len(orig_pk), data_cols_array, len(data_cols_array)))
pk = ", ".join('`{0}`'.format(k) for k in orig_pk)
# this will generate the statement to select "primary key"
select_by_pk = select_column_rows_query.format(pk, table)
if all_cols_are_pk:
# get everything if the primary key are all the columns
select_rows_stmt = select_all_rows_query.format(table)
old_row_data = old_cursor.execute(select_rows_stmt).fetchall()
new_row_data = new_cursor.execute(select_rows_stmt).fetchall()
else:
data_cols = ", ".join('`{}`'.format(k) for k in data_cols_array)
select_rows_stmt = select_column_rows_query.format(data_cols, table)
old_row_data = old_cursor.execute(select_rows_stmt).fetchall()
new_row_data = new_cursor.execute(select_rows_stmt).fetchall()
old_rows_ids = old_cursor.execute(select_by_pk).fetchall()
new_rows_ids = new_cursor.execute(select_by_pk).fetchall()
if (old_rows_ids != new_rows_ids):
'''
1. old rows that do not exist in new table should be removed (NOT IN new_rows)
2. new rows that do not exist in old table should be added (NOT IN old_rows)
3. find rows with difference in data (TBD)
4. generate SQL statement for these
'''
notequal += 1
# hash row ids to make comparison
old_row_ids_hashed = [hash(rowid) for rowid in old_rows_ids]
new_row_ids_hashed = [hash(rowid) for rowid in new_rows_ids]
# hash data rows as well (why not)
old_row_data_hashed = [hash(old_row) for old_row in old_row_data]
new_row_data_hashed = [hash(new_row) for new_row in new_row_data]
old_hashmap = dict(zip(old_row_ids_hashed, old_row_data_hashed))
new_hashmap = dict(zip(new_row_ids_hashed, new_row_data_hashed))
new_hashmap_pk_unhashed = dict(zip(new_row_ids_hashed, new_rows_ids))
new_hashmap_data_unhashed = dict(zip(new_row_ids_hashed, new_row_data))
not_inside_count, inside_count, new_row_count, data_changed = (0, 0, 0, 0)
for index, old_hashed_pk in enumerate(old_row_ids_hashed):
# Attempts to get the row information from the hashed primary key
# returns false if not present in the dictionary
in_new_table = new_hashmap.get(old_hashed_pk, False)
where_cols = (data_cols_array, orig_pk) [all_cols_are_pk] #if all columns are pk, then data column is empty, switch to orig_pk
old_row = old_row_data[index]
if not in_new_table:
# generate delete statement
delete_where_string = generate_del_query(table, where_cols, old_row) + '\n'
if DEBUG:
print(delete_where_string)
diff_statements.append(delete_where_string)
not_inside_count += 1
else:
if in_new_table != old_hashmap[old_hashed_pk]:
if DEBUG:
print("Comparing hash in new table ({}) to old ({})".format(in_new_table, old_hashmap[old_hashed_pk]))
# go look for the data which belongs to the new data and then do an UPDATE SET statement for starters
#update_string = generate_update_query(table, where_cols, old_row, where_cols, new_hashmap_data_unhashed[old_hashed_pk]) + '\n'
#diff_statements.append(update_string)
data_changed += 1
inside_count += 1
for new_hash_pk in new_row_ids_hashed:
in_old_table = old_hashmap.get(new_hash_pk, False)
if not in_old_table:
# generate insert statement
new_pk = new_hashmap_pk_unhashed[new_hash_pk]
new_data = (new_hashmap_data_unhashed[new_hash_pk], None) [all_cols_are_pk] # pass nothing so we don't generate double insert values
diff_statements.append(generate_insert_query(table, new_pk, new_data, new_insert_order) + '\n')
new_row_count += 1
if DEBUG:
print("{} rows identical to new table".format(inside_count))
print("{} rows not in new table".format(not_inside_count))
print("{} new rows from new table".format(new_row_count))
print("{} rows were changed in new table".format(data_changed))
else:
equal += 1
if DEBUG:
print("Tables are identical")
if DEBUG:
print("Total - not equal: {}. equal: {}".format(notequal, equal))
return diff_statements
def write_to_file(old_name, new_name, diff_statements, ext = "sql"):
old_noext = os.path.splitext(old_name)[0]
new_noext = os.path.splitext(new_name)[0]
now = datetime.now()
timestamp = datetime.timestamp(now)
filename = "{}-{}-{}-diff.{}".format(old_noext, new_noext, timestamp, ext)
f = open(filename, "w+", encoding="utf-8")
f.writelines(diff_statements)
f.close()
return filename
def remove_dupes(seq):
# f7 in https://www.peterbe.com/plog/uniqifiers-benchmark
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
if __name__ == '__main__':
old = input("Please enter the name of the older database file: ")
old_conn = create_connection(old)
while(old_conn is None):
old = input("Invalid database file, please enter again: ")
old_conn = create_connection(old)
new = input("Please enter the name of the newer database file: ")
new_conn = create_connection(new)
while(old_conn is None):
old = input("Invalid database file, please enter again: ")
old_conn = create_connection(old)
print('Both files are found and valid SQLite files.. making comparisons..')
diff_statements = get_table_data_diff(old_conn, new_conn, old, new)
diff_statements = remove_dupes(diff_statements)
print('Comparison complete.')
if len(diff_statements) > 0:
filename = write_to_file(old, new, diff_statements)
print('Diff file generated - {}'.format(filename))
else:
print('No difference found for the two files.')
| StarcoderdataPython |
1726947 | <filename>mtdynamics/simulation_parameters.py
## Dictionary to store all parameters
simParameters = {}
## Simulation type choices
simParameters['record_data'] = True
simParameters['record_data_full'] = False
simParameters['plot_figures'] = True
simParameters['compare_results'] = False
simParameters['end_hydrolysis'] = True
simParameters['frame_rate_aim'] = float(0.25) # in seconds
simParameters['record_cap'] = False
simParameters['record_length'] = False
simParameters['steady_state_analysis'] = False
# Key simulation parameters
simParameters['EB'] = 0
#EB_growth_speed = np.array([[0, 1.68],[0.02, 2.79],[0.05, 2.79],[0.1, 3.36]])
if simParameters['EB'] == 0: #set growth speed (only used for our special case... remove later)
kBC = 0.1
D_tip = 2000
growth_speed = 1.8
elif simParameters['EB'] == 0.02:
growth_speed = 2.79
kBC = 0.20
D_tip = 3600
elif simParameters['EB'] == 0.05:
growth_speed = 2.79
kBC = 0.24
D_tip = 3420
elif simParameters['EB'] == 0.1:
kBC = 0.39
D_tip = 3100
#growth_speed = 3.36 #µm/min
growth_speed = 3.72
print('growth speed set to: ', growth_speed)
simParameters['growth_speed'] = growth_speed
#simParameters['growth_speed'] = 1.5 #µm/min
simParameters['dL_dimer'] = 0.008/13 # Eukaryotic tubulin dimer length in um
#simParameters['dL_dimer'] = 0.008/5 # Prokaryotic tubulin dimer length in um
simParameters['growth_rate_one'] = growth_speed/(60*simParameters['dL_dimer']) #rate of one dimer per s
## Best values for kBC and D_tip:
# 0 nM EB: 0.08 and 2900 OR 0.07 and 3720 || 0.07 and 3720
# 20 nM EB: 0.18 and 3500 OR 0.19 and 3580 || 0.20 and 3400
# 50 nM EB: 0.28 and 3000 OR 0.23 and 3840 || 0.24 and 3420
# 100 nM EB: 0.50 and 1700 OR 0.53 and 2100 || 0.39 and 3100
N_unstable = int(15);
simParameters['kBC'] = kBC #s^-1
simParameters['D_tip'] = D_tip #tip diffusion nm^2 /s
## tip noise parameters (relative noise if different when in A, B, C state...)
#noise_STD_A = float(1) #currently not used anyore --> TODO: remove from simulation!!
#noise_STD_B = float(1)
#noise_STD_C = float(1)
#noise_STD_seed = float(0) #no noise within seed
#simParameters['tip_noise_relative'] = [noise_STD_A,
# noise_STD_B, noise_STD_C, noise_STD_C, noise_STD_seed]
simParameters['unstable_cap_criteria'] = N_unstable
simParameters['seed_resolution'] = 0.005 #spatial resolution at seed (to distinguish catastrophes)
# Parameters for time-dependent cap criterium
simParameters['unstable_cap_time'] = False # Enable to simulate a time-dependent Cap threshold
simParameters['unstable_cap_start'] = N_unstable*2 # N_unstable*1.5
simParameters['unstable_cap_end'] = N_unstable # N_unstable*(1/1.5)
simParameters['unstable_cap_rate'] = 0.007
# Parameters for time-dependent tip diffusion
simParameters['D_tip_time'] = False # Enable to simulate a time-dependent D_tip
simParameters['D_tip_start'] = D_tip*(2/3) # 1) D_tip*(1/2), 2)
simParameters['D_tip_end'] = D_tip # 1) D_tip*(5/4), 2)
simParameters['D_tip_rate_T'] = 0.02 #1) 0.025
# Parameters for length-dependent tip diffusion
simParameters['D_tip_length'] = False # Enable to simulate a length-dependent D_tip
simParameters['D_tip_start'] = D_tip*(1/3) # 1) D_tip*(1/2), 2) 0
simParameters['D_tip_end'] = D_tip*(5/4) # 1) D_tip*(5/4), 2)
simParameters['D_tip_rate_L'] = 0.2 # rate per added dimer
# Parameters for time-dependent hydrolysis rate
simParameters['kBC_time'] = False # Enable to simulate a time-dependent D_tip
simParameters['kBC_start'] = kBC*(1/2) # kBC*(1/1.5)
simParameters['kBC_end'] = kBC # kBC*1.5
simParameters['kBC_rate'] = 0.005 #1) 0.01
# if barrier, then give distance in dimers, otherwise set to False
simParameters['barrier'] = False #550*13 (= 3.4 um) or False
simParameters['DXbarrier'] = 0.005 #0.01
simParameters['CAP_threshold'] = float(0)
## Parameters for simulation run time and accuracy
# Criteria for early stopping (if run too long...):
simParameters['no_cat'] = 500 #aim: number of catastrophes to simulate (if run not too long)
simParameters['max_MT_growth_time'] = 2000
simParameters['too_stable_check'] = 5 #if more than this #MTs grow longer than max_time
simParameters['total_max_time'] = 500*simParameters['no_cat'] # stop if simulated time > total_max_time
simParameters['P_max'] = 0.05 # maximum probability of hydrolysis event during one time step
simParameters['tip_window'] = 1.0 #1um window for comet
# Nucleation correction
simParameters['nucleation_threshold'] = 400 #400 subunits is equal to 250 nm (~ 2 pixels)
#memory and imaging options:
simParameters['show_fraction'] = 40 #for example figure --> must be <= min_length_run
simParameters['min_length_run'] = 40
simParameters['min_length_begin'] = 100 # duration of growth from seed
simParameters['take_EB_profiles'] = True
simParameters['washout'] = False #True
simParameters['washout_time'] = 160 # in seconds
#screening options:
simParameters['shift_ks_contact'] = 0.01
simParameters['shift_ks_cat'] = 0.01
# EB binding characteristics
simParameters['EB_KD'] = 122 # nM (Roth et al., 2018)
| StarcoderdataPython |
9618757 | from create_permutation_gif import animate_permutations
if __name__=='__main__':
permutations = [
[2, 3, 4, 1, 0],
]
imgs = animate_permutations(permutations, ["red", "blue", "yellow", "green", "purple"], ["1", "2", "3", "4", "5"], (500, 250), 30, 50, 20)
imgs[0].save('examples/example_1.gif', save_all=True, append_images=imgs[1:], loop=0, duration=50)
permutations = [
[0, 3, 2, 1, 4],
[2, 1, 4, 3, 0],
]
imgs = animate_permutations(permutations, ["red", "blue", "yellow", "green", "purple"], ["1", "2", "3", "4", "5"], (500, 250), 30, 50, 20)
imgs[0].save('examples/example_2.gif', save_all=True, append_images=imgs[1:], loop=0, duration=50)
permutations = [
[0, 3, 2, 1, 4],
[0, 1, 4, 3, 2],
[2, 1, 0, 3, 4],
]
imgs = animate_permutations(permutations, ["red", "blue", "yellow", "green", "purple"], ["1", "2", "3", "4", "5"], (500, 250), 30, 50, 20)
imgs[0].save('examples/example_3.gif', save_all=True, append_images=imgs[1:], loop=0, duration=50)
| StarcoderdataPython |
9777696 | <filename>WEKO3AutoTest/03/Autotest03_091.py
__file__
import pytest
import configparser
from playwright.sync_api import sync_playwright
from os import path
config_ini = configparser.ConfigParser()
config_ini.read( "conf.ini", encoding = "utf-8" )
print("SET_TIMEOUT = " + config_ini['DEFAULT']['SETTIMEOUT'])
print("SET_WAIT = " + config_ini['DEFAULT']['SETWAIT'])
print("SET_WAIT = " + config_ini['DEFAULT']['SETWFDAY'])
print("ACTIVE_LOCK = " + config_ini['DEFAULT']['ACTIVELOCK'])
print("WEKO_URL = " + config_ini['DEFAULT']['WEKOURL'])
SET_TIMEOUT = config_ini['DEFAULT']['SETTIMEOUT']
SET_WAIT = config_ini['DEFAULT']['SETWAIT']
SET_WFDAY = config_ini['DEFAULT']['SETWFDAY']
ACTIVE_LOCK = config_ini['DEFAULT']['ACTIVELOCK']
WEKO_URL = config_ini['DEFAULT']['WEKOURL']
def run(playwright):
browser = playwright.chromium.launch(headless=False)
context = browser.new_context(ignore_https_errors=True)
# Open new page
page = context.new_page()
# Go to https://localhost/
page.goto(WEKO_URL,timeout=int(SET_TIMEOUT))
# Click text=/.*Log in.*/
page.click("text=/.*Log in.*/")
# assert page.url == "https://localhost/login/?next=%2F"
# Fill input[name="email"]
page.fill("input[name=\"email\"]", "<EMAIL>")
# Fill input[name="password"]
page.fill("input[name=\"password\"]", "<PASSWORD>")
# Click text=/.*Log In.*/
page.click("text=/.*Log In.*/")
# assert page.url == "https://localhost/"
# Click text="Workflow"
page.click("text=\"Workflow\"")
# assert page.url == "https://localhost/workflow/"
if SET_WFDAY == "NEW":
# Click text=/.*New Activity.*/
page.click("text=/.*New Activity.*/")
with page.expect_navigation():
page.click("//tr[3]/td[4]/button[normalize-space(.)=' New']")
else:
# Go to https://localhost/workflow/activity/detail/A-20220203-00001
page.goto("https://localhost/workflow/activity/detail/A-" + SET_WFDAY,timeout=int(SET_TIMEOUT))
# Click div[id="activity_locked"] >> text="OK"
if ACTIVE_LOCK == "ON":
page.click("div[id=\"activity_locked\"] >> text=\"OK\"")
# assert page.url == "https://localhost/workflow/activity/detail/A-20220203-00001?status="
# Click input[name="pubdate"]
page.click("input[name=\"pubdate\"]")
# Click text="02"
page.click("text=\"02\"")
# Fill input[name="item_1617186331708.0.subitem_1551255647225"]
page.fill("input[name=\"item_1617186331708.0.subitem_1551255647225\"]", "登録テストアイテム1")
# Click input[name="item_1617186331708.0.subitem_1551255647225"]
page.click("input[name=\"item_1617186331708.0.subitem_1551255647225\"]")
# Select string:ja
page.select_option("//div[normalize-space(.)='jaja-Kanaenfritdeeszh-cnzh-twrulamseoarelko']/select", "string:ja")
# Select string:conference paper
page.select_option("//div[starts-with(normalize-space(.), 'conference paperdata paperdepartmental bulletin papereditorialjournal articlenew')]/select", "string:internal report")
# Resource Type が見える位置に画面を来させる為に、Version Typeをクリック
page.click("//*[@id='weko-records']/invenio-files-uploader/invenio-records/div[2]/div[8]/invenio-records-form/div/div/form/bootstrap-decorator[17]/fieldset/div/div[1]/a")
page.wait_for_timeout(int(SET_WAIT))
page.screenshot(path=f'{path.splitext(path.basename(__file__))[0]+"_1"}_capture.png')
page.click('//*[@id="weko-records"]/invenio-files-uploader/invenio-records/div[2]/div[9]/div/div[1]/div/button[2]')
page.wait_for_timeout(int(SET_WAIT)*2)
# Check //div[normalize-space(.)='Index E(Embargo)']/div[2]/input[normalize-space(@type)='checkbox']
page.check("//div[normalize-space(.)='Index E(Embargo)']/div[2]/input[normalize-space(@type)='checkbox']")
page.wait_for_timeout(int(SET_WAIT))
page.screenshot(path=f'{path.splitext(path.basename(__file__))[0]+"_2"}_capture.png')
# Click text=/.*Next.*/
page.click("text=/.*Next.*/")
# Click textarea[id="input-comment"]
page.click("textarea[id=\"input-comment\"]")
# Fill textarea[id="input-comment"]
page.fill("textarea[id=\"input-comment\"]", "AutoTest")
page.wait_for_timeout(int(SET_WAIT))
page.screenshot(path=f'{path.splitext(path.basename(__file__))[0]+"_3"}_capture.png')
# Click text=/.*Next.*/
page.click("text=/.*Next.*/")
# Click //div[normalize-space(.)='Index C(No OAI-PMH)']/div[1]
page.click("//div[normalize-space(.)='Index C(No OAI-PMH)']/div[1]")
# Click text="Index C-1"
page.click("text=\"Index C-1\"")
page.wait_for_timeout(int(SET_WAIT))
# Click text=/.*Next.*/
page.click("text=/.*Next.*/")
# Click //td[normalize-space(.)='JaLC DOI']
page.click("//td[normalize-space(.)='JaLC DOI']")
page.wait_for_timeout(int(SET_WAIT))
page.screenshot(path=f'{path.splitext(path.basename(__file__))[0]+"_4"}_capture.png')
# Close page
page.close()
# ---------------------
context.close()
browser.close()
return 0
def test_OK():
assert a == 0
with sync_playwright() as playwright:
a = run(playwright)
test_OK() | StarcoderdataPython |
3318012 | <reponame>aliyun/dingtalk-sdk
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.core import TeaCore
from alibabacloud_tea_openapi.client import Client as OpenApiClient
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_tea_util.client import Client as UtilClient
from alibabacloud_dingtalk.esign_2_0 import models as dingtalkesign__2__0_models
from alibabacloud_tea_util import models as util_models
from alibabacloud_openapi_util.client import Client as OpenApiUtilClient
class Client(OpenApiClient):
"""
*\
"""
def __init__(
self,
config: open_api_models.Config,
):
super().__init__(config)
self._endpoint_rule = ''
if UtilClient.empty(self._endpoint):
self._endpoint = 'api.dingtalk.com'
def create_process(
self,
request: dingtalkesign__2__0_models.CreateProcessRequest,
) -> dingtalkesign__2__0_models.CreateProcessResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.CreateProcessHeaders()
return self.create_process_with_options(request, headers, runtime)
async def create_process_async(
self,
request: dingtalkesign__2__0_models.CreateProcessRequest,
) -> dingtalkesign__2__0_models.CreateProcessResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.CreateProcessHeaders()
return await self.create_process_with_options_async(request, headers, runtime)
def create_process_with_options(
self,
request: dingtalkesign__2__0_models.CreateProcessRequest,
headers: dingtalkesign__2__0_models.CreateProcessHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.CreateProcessResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.initiator_user_id):
body['initiatorUserId'] = request.initiator_user_id
if not UtilClient.is_unset(request.task_name):
body['taskName'] = request.task_name
if not UtilClient.is_unset(request.sign_end_time):
body['signEndTime'] = request.sign_end_time
if not UtilClient.is_unset(request.redirect_url):
body['redirectUrl'] = request.redirect_url
if not UtilClient.is_unset(request.files):
body['files'] = request.files
if not UtilClient.is_unset(request.participants):
body['participants'] = request.participants
if not UtilClient.is_unset(request.ccs):
body['ccs'] = request.ccs
if not UtilClient.is_unset(request.source_info):
body['sourceInfo'] = request.source_info
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.CreateProcessResponse(),
self.do_roarequest('CreateProcess', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/process/startAtOnce', 'json', req, runtime)
)
async def create_process_with_options_async(
self,
request: dingtalkesign__2__0_models.CreateProcessRequest,
headers: dingtalkesign__2__0_models.CreateProcessHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.CreateProcessResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.initiator_user_id):
body['initiatorUserId'] = request.initiator_user_id
if not UtilClient.is_unset(request.task_name):
body['taskName'] = request.task_name
if not UtilClient.is_unset(request.sign_end_time):
body['signEndTime'] = request.sign_end_time
if not UtilClient.is_unset(request.redirect_url):
body['redirectUrl'] = request.redirect_url
if not UtilClient.is_unset(request.files):
body['files'] = request.files
if not UtilClient.is_unset(request.participants):
body['participants'] = request.participants
if not UtilClient.is_unset(request.ccs):
body['ccs'] = request.ccs
if not UtilClient.is_unset(request.source_info):
body['sourceInfo'] = request.source_info
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.CreateProcessResponse(),
await self.do_roarequest_async('CreateProcess', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/process/startAtOnce', 'json', req, runtime)
)
def get_sign_detail(
self,
task_id: str,
) -> dingtalkesign__2__0_models.GetSignDetailResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetSignDetailHeaders()
return self.get_sign_detail_with_options(task_id, headers, runtime)
async def get_sign_detail_async(
self,
task_id: str,
) -> dingtalkesign__2__0_models.GetSignDetailResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetSignDetailHeaders()
return await self.get_sign_detail_with_options_async(task_id, headers, runtime)
def get_sign_detail_with_options(
self,
task_id: str,
headers: dingtalkesign__2__0_models.GetSignDetailHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetSignDetailResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetSignDetailResponse(),
self.do_roarequest('GetSignDetail', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/signTasks/{task_id}', 'json', req, runtime)
)
async def get_sign_detail_with_options_async(
self,
task_id: str,
headers: dingtalkesign__2__0_models.GetSignDetailHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetSignDetailResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetSignDetailResponse(),
await self.do_roarequest_async('GetSignDetail', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/signTasks/{task_id}', 'json', req, runtime)
)
def get_attachs_approval(
self,
instance_id: str,
) -> dingtalkesign__2__0_models.GetAttachsApprovalResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetAttachsApprovalHeaders()
return self.get_attachs_approval_with_options(instance_id, headers, runtime)
async def get_attachs_approval_async(
self,
instance_id: str,
) -> dingtalkesign__2__0_models.GetAttachsApprovalResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetAttachsApprovalHeaders()
return await self.get_attachs_approval_with_options_async(instance_id, headers, runtime)
def get_attachs_approval_with_options(
self,
instance_id: str,
headers: dingtalkesign__2__0_models.GetAttachsApprovalHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetAttachsApprovalResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.tsign_open_app_id):
real_headers['tsignOpenAppId'] = headers.tsign_open_app_id
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetAttachsApprovalResponse(),
self.do_roarequest('GetAttachsApproval', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/dingInstances/{instance_id}/attachments', 'json', req, runtime)
)
async def get_attachs_approval_with_options_async(
self,
instance_id: str,
headers: dingtalkesign__2__0_models.GetAttachsApprovalHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetAttachsApprovalResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.tsign_open_app_id):
real_headers['tsignOpenAppId'] = headers.tsign_open_app_id
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetAttachsApprovalResponse(),
await self.do_roarequest_async('GetAttachsApproval', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/dingInstances/{instance_id}/attachments', 'json', req, runtime)
)
def process_start(
self,
request: dingtalkesign__2__0_models.ProcessStartRequest,
) -> dingtalkesign__2__0_models.ProcessStartResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.ProcessStartHeaders()
return self.process_start_with_options(request, headers, runtime)
async def process_start_async(
self,
request: dingtalkesign__2__0_models.ProcessStartRequest,
) -> dingtalkesign__2__0_models.ProcessStartResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.ProcessStartHeaders()
return await self.process_start_with_options_async(request, headers, runtime)
def process_start_with_options(
self,
request: dingtalkesign__2__0_models.ProcessStartRequest,
headers: dingtalkesign__2__0_models.ProcessStartHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.ProcessStartResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.initiator_user_id):
body['initiatorUserId'] = request.initiator_user_id
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.task_name):
body['taskName'] = request.task_name
if not UtilClient.is_unset(request.redirect_url):
body['redirectUrl'] = request.redirect_url
if not UtilClient.is_unset(request.files):
body['files'] = request.files
if not UtilClient.is_unset(request.participants):
body['participants'] = request.participants
if not UtilClient.is_unset(request.ccs):
body['ccs'] = request.ccs
if not UtilClient.is_unset(request.source_info):
body['sourceInfo'] = request.source_info
if not UtilClient.is_unset(request.auto_start):
body['autoStart'] = request.auto_start
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.ProcessStartResponse(),
self.do_roarequest('ProcessStart', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/processes/startUrls', 'json', req, runtime)
)
async def process_start_with_options_async(
self,
request: dingtalkesign__2__0_models.ProcessStartRequest,
headers: dingtalkesign__2__0_models.ProcessStartHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.ProcessStartResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.initiator_user_id):
body['initiatorUserId'] = request.initiator_user_id
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.task_name):
body['taskName'] = request.task_name
if not UtilClient.is_unset(request.redirect_url):
body['redirectUrl'] = request.redirect_url
if not UtilClient.is_unset(request.files):
body['files'] = request.files
if not UtilClient.is_unset(request.participants):
body['participants'] = request.participants
if not UtilClient.is_unset(request.ccs):
body['ccs'] = request.ccs
if not UtilClient.is_unset(request.source_info):
body['sourceInfo'] = request.source_info
if not UtilClient.is_unset(request.auto_start):
body['autoStart'] = request.auto_start
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.ProcessStartResponse(),
await self.do_roarequest_async('ProcessStart', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/processes/startUrls', 'json', req, runtime)
)
def approval_list(
self,
task_id: str,
) -> dingtalkesign__2__0_models.ApprovalListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.ApprovalListHeaders()
return self.approval_list_with_options(task_id, headers, runtime)
async def approval_list_async(
self,
task_id: str,
) -> dingtalkesign__2__0_models.ApprovalListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.ApprovalListHeaders()
return await self.approval_list_with_options_async(task_id, headers, runtime)
def approval_list_with_options(
self,
task_id: str,
headers: dingtalkesign__2__0_models.ApprovalListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.ApprovalListResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.ApprovalListResponse(),
self.do_roarequest('ApprovalList', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/approvals/{task_id}', 'json', req, runtime)
)
async def approval_list_with_options_async(
self,
task_id: str,
headers: dingtalkesign__2__0_models.ApprovalListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.ApprovalListResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.ApprovalListResponse(),
await self.do_roarequest_async('ApprovalList', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/approvals/{task_id}', 'json', req, runtime)
)
def get_auth_url(
self,
request: dingtalkesign__2__0_models.GetAuthUrlRequest,
) -> dingtalkesign__2__0_models.GetAuthUrlResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetAuthUrlHeaders()
return self.get_auth_url_with_options(request, headers, runtime)
async def get_auth_url_async(
self,
request: dingtalkesign__2__0_models.GetAuthUrlRequest,
) -> dingtalkesign__2__0_models.GetAuthUrlResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetAuthUrlHeaders()
return await self.get_auth_url_with_options_async(request, headers, runtime)
def get_auth_url_with_options(
self,
request: dingtalkesign__2__0_models.GetAuthUrlRequest,
headers: dingtalkesign__2__0_models.GetAuthUrlHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetAuthUrlResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.redirect_url):
body['redirectUrl'] = request.redirect_url
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetAuthUrlResponse(),
self.do_roarequest('GetAuthUrl', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/auths/urls', 'json', req, runtime)
)
async def get_auth_url_with_options_async(
self,
request: dingtalkesign__2__0_models.GetAuthUrlRequest,
headers: dingtalkesign__2__0_models.GetAuthUrlHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetAuthUrlResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.redirect_url):
body['redirectUrl'] = request.redirect_url
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetAuthUrlResponse(),
await self.do_roarequest_async('GetAuthUrl', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/auths/urls', 'json', req, runtime)
)
def get_corp_console(self) -> dingtalkesign__2__0_models.GetCorpConsoleResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetCorpConsoleHeaders()
return self.get_corp_console_with_options(headers, runtime)
async def get_corp_console_async(self) -> dingtalkesign__2__0_models.GetCorpConsoleResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetCorpConsoleHeaders()
return await self.get_corp_console_with_options_async(headers, runtime)
def get_corp_console_with_options(
self,
headers: dingtalkesign__2__0_models.GetCorpConsoleHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetCorpConsoleResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetCorpConsoleResponse(),
self.do_roarequest('GetCorpConsole', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/corps/consoles', 'json', req, runtime)
)
async def get_corp_console_with_options_async(
self,
headers: dingtalkesign__2__0_models.GetCorpConsoleHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetCorpConsoleResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetCorpConsoleResponse(),
await self.do_roarequest_async('GetCorpConsole', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/corps/consoles', 'json', req, runtime)
)
def get_file_info(
self,
file_id: str,
) -> dingtalkesign__2__0_models.GetFileInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetFileInfoHeaders()
return self.get_file_info_with_options(file_id, headers, runtime)
async def get_file_info_async(
self,
file_id: str,
) -> dingtalkesign__2__0_models.GetFileInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetFileInfoHeaders()
return await self.get_file_info_with_options_async(file_id, headers, runtime)
def get_file_info_with_options(
self,
file_id: str,
headers: dingtalkesign__2__0_models.GetFileInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetFileInfoResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetFileInfoResponse(),
self.do_roarequest('GetFileInfo', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/files/{file_id}', 'json', req, runtime)
)
async def get_file_info_with_options_async(
self,
file_id: str,
headers: dingtalkesign__2__0_models.GetFileInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetFileInfoResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetFileInfoResponse(),
await self.do_roarequest_async('GetFileInfo', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/files/{file_id}', 'json', req, runtime)
)
def channel_orders(
self,
request: dingtalkesign__2__0_models.ChannelOrdersRequest,
) -> dingtalkesign__2__0_models.ChannelOrdersResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.ChannelOrdersHeaders()
return self.channel_orders_with_options(request, headers, runtime)
async def channel_orders_async(
self,
request: dingtalkesign__2__0_models.ChannelOrdersRequest,
) -> dingtalkesign__2__0_models.ChannelOrdersResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.ChannelOrdersHeaders()
return await self.channel_orders_with_options_async(request, headers, runtime)
def channel_orders_with_options(
self,
request: dingtalkesign__2__0_models.ChannelOrdersRequest,
headers: dingtalkesign__2__0_models.ChannelOrdersHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.ChannelOrdersResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.order_id):
body['orderId'] = request.order_id
if not UtilClient.is_unset(request.item_code):
body['itemCode'] = request.item_code
if not UtilClient.is_unset(request.item_name):
body['itemName'] = request.item_name
if not UtilClient.is_unset(request.quantity):
body['quantity'] = request.quantity
if not UtilClient.is_unset(request.pay_fee):
body['payFee'] = request.pay_fee
if not UtilClient.is_unset(request.order_create_time):
body['orderCreateTime'] = request.order_create_time
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.ChannelOrdersResponse(),
self.do_roarequest('ChannelOrders', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/orders/channel', 'json', req, runtime)
)
async def channel_orders_with_options_async(
self,
request: dingtalkesign__2__0_models.ChannelOrdersRequest,
headers: dingtalkesign__2__0_models.ChannelOrdersHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.ChannelOrdersResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.order_id):
body['orderId'] = request.order_id
if not UtilClient.is_unset(request.item_code):
body['itemCode'] = request.item_code
if not UtilClient.is_unset(request.item_name):
body['itemName'] = request.item_name
if not UtilClient.is_unset(request.quantity):
body['quantity'] = request.quantity
if not UtilClient.is_unset(request.pay_fee):
body['payFee'] = request.pay_fee
if not UtilClient.is_unset(request.order_create_time):
body['orderCreateTime'] = request.order_create_time
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.ChannelOrdersResponse(),
await self.do_roarequest_async('ChannelOrders', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/orders/channel', 'json', req, runtime)
)
def resale_order(
self,
request: dingtalkesign__2__0_models.ResaleOrderRequest,
) -> dingtalkesign__2__0_models.ResaleOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.ResaleOrderHeaders()
return self.resale_order_with_options(request, headers, runtime)
async def resale_order_async(
self,
request: dingtalkesign__2__0_models.ResaleOrderRequest,
) -> dingtalkesign__2__0_models.ResaleOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.ResaleOrderHeaders()
return await self.resale_order_with_options_async(request, headers, runtime)
def resale_order_with_options(
self,
request: dingtalkesign__2__0_models.ResaleOrderRequest,
headers: dingtalkesign__2__0_models.ResaleOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.ResaleOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.order_id):
body['orderId'] = request.order_id
if not UtilClient.is_unset(request.quantity):
body['quantity'] = request.quantity
if not UtilClient.is_unset(request.order_create_time):
body['orderCreateTime'] = request.order_create_time
if not UtilClient.is_unset(request.service_start_time):
body['serviceStartTime'] = request.service_start_time
if not UtilClient.is_unset(request.service_stop_time):
body['serviceStopTime'] = request.service_stop_time
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.ResaleOrderResponse(),
self.do_roarequest('ResaleOrder', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/orders/resale', 'json', req, runtime)
)
async def resale_order_with_options_async(
self,
request: dingtalkesign__2__0_models.ResaleOrderRequest,
headers: dingtalkesign__2__0_models.ResaleOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.ResaleOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.order_id):
body['orderId'] = request.order_id
if not UtilClient.is_unset(request.quantity):
body['quantity'] = request.quantity
if not UtilClient.is_unset(request.order_create_time):
body['orderCreateTime'] = request.order_create_time
if not UtilClient.is_unset(request.service_start_time):
body['serviceStartTime'] = request.service_start_time
if not UtilClient.is_unset(request.service_stop_time):
body['serviceStopTime'] = request.service_stop_time
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.ResaleOrderResponse(),
await self.do_roarequest_async('ResaleOrder', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/orders/resale', 'json', req, runtime)
)
def cancel_corp_auth(
self,
request: dingtalkesign__2__0_models.CancelCorpAuthRequest,
) -> dingtalkesign__2__0_models.CancelCorpAuthResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.CancelCorpAuthHeaders()
return self.cancel_corp_auth_with_options(request, headers, runtime)
async def cancel_corp_auth_async(
self,
request: dingtalkesign__2__0_models.CancelCorpAuthRequest,
) -> dingtalkesign__2__0_models.CancelCorpAuthResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.CancelCorpAuthHeaders()
return await self.cancel_corp_auth_with_options_async(request, headers, runtime)
def cancel_corp_auth_with_options(
self,
request: dingtalkesign__2__0_models.CancelCorpAuthRequest,
headers: dingtalkesign__2__0_models.CancelCorpAuthHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.CancelCorpAuthResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.CancelCorpAuthResponse(),
self.do_roarequest('CancelCorpAuth', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/auths/cancel', 'json', req, runtime)
)
async def cancel_corp_auth_with_options_async(
self,
request: dingtalkesign__2__0_models.CancelCorpAuthRequest,
headers: dingtalkesign__2__0_models.CancelCorpAuthHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.CancelCorpAuthResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.CancelCorpAuthResponse(),
await self.do_roarequest_async('CancelCorpAuth', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/auths/cancel', 'json', req, runtime)
)
def get_file_upload_url(
self,
request: dingtalkesign__2__0_models.GetFileUploadUrlRequest,
) -> dingtalkesign__2__0_models.GetFileUploadUrlResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetFileUploadUrlHeaders()
return self.get_file_upload_url_with_options(request, headers, runtime)
async def get_file_upload_url_async(
self,
request: dingtalkesign__2__0_models.GetFileUploadUrlRequest,
) -> dingtalkesign__2__0_models.GetFileUploadUrlResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetFileUploadUrlHeaders()
return await self.get_file_upload_url_with_options_async(request, headers, runtime)
def get_file_upload_url_with_options(
self,
request: dingtalkesign__2__0_models.GetFileUploadUrlRequest,
headers: dingtalkesign__2__0_models.GetFileUploadUrlHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetFileUploadUrlResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.content_md_5):
body['contentMd5'] = request.content_md_5
if not UtilClient.is_unset(request.content_type):
body['contentType'] = request.content_type
if not UtilClient.is_unset(request.file_name):
body['fileName'] = request.file_name
if not UtilClient.is_unset(request.file_size):
body['fileSize'] = request.file_size
if not UtilClient.is_unset(request.convert_2pdf):
body['convert2Pdf'] = request.convert_2pdf
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetFileUploadUrlResponse(),
self.do_roarequest('GetFileUploadUrl', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/files/uploadUrls', 'json', req, runtime)
)
async def get_file_upload_url_with_options_async(
self,
request: dingtalkesign__2__0_models.GetFileUploadUrlRequest,
headers: dingtalkesign__2__0_models.GetFileUploadUrlHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetFileUploadUrlResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.content_md_5):
body['contentMd5'] = request.content_md_5
if not UtilClient.is_unset(request.content_type):
body['contentType'] = request.content_type
if not UtilClient.is_unset(request.file_name):
body['fileName'] = request.file_name
if not UtilClient.is_unset(request.file_size):
body['fileSize'] = request.file_size
if not UtilClient.is_unset(request.convert_2pdf):
body['convert2Pdf'] = request.convert_2pdf
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetFileUploadUrlResponse(),
await self.do_roarequest_async('GetFileUploadUrl', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/files/uploadUrls', 'json', req, runtime)
)
def get_isv_status(self) -> dingtalkesign__2__0_models.GetIsvStatusResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetIsvStatusHeaders()
return self.get_isv_status_with_options(headers, runtime)
async def get_isv_status_async(self) -> dingtalkesign__2__0_models.GetIsvStatusResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetIsvStatusHeaders()
return await self.get_isv_status_with_options_async(headers, runtime)
def get_isv_status_with_options(
self,
headers: dingtalkesign__2__0_models.GetIsvStatusHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetIsvStatusResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetIsvStatusResponse(),
self.do_roarequest('GetIsvStatus', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/corps/appStatus', 'json', req, runtime)
)
async def get_isv_status_with_options_async(
self,
headers: dingtalkesign__2__0_models.GetIsvStatusHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetIsvStatusResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetIsvStatusResponse(),
await self.do_roarequest_async('GetIsvStatus', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/corps/appStatus', 'json', req, runtime)
)
def get_flow_docs(
self,
task_id: str,
) -> dingtalkesign__2__0_models.GetFlowDocsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetFlowDocsHeaders()
return self.get_flow_docs_with_options(task_id, headers, runtime)
async def get_flow_docs_async(
self,
task_id: str,
) -> dingtalkesign__2__0_models.GetFlowDocsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetFlowDocsHeaders()
return await self.get_flow_docs_with_options_async(task_id, headers, runtime)
def get_flow_docs_with_options(
self,
task_id: str,
headers: dingtalkesign__2__0_models.GetFlowDocsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetFlowDocsResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetFlowDocsResponse(),
self.do_roarequest('GetFlowDocs', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/flowTasks/{task_id}/docs', 'json', req, runtime)
)
async def get_flow_docs_with_options_async(
self,
task_id: str,
headers: dingtalkesign__2__0_models.GetFlowDocsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetFlowDocsResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetFlowDocsResponse(),
await self.do_roarequest_async('GetFlowDocs', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/flowTasks/{task_id}/docs', 'json', req, runtime)
)
def users_realname(
self,
request: dingtalkesign__2__0_models.UsersRealnameRequest,
) -> dingtalkesign__2__0_models.UsersRealnameResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.UsersRealnameHeaders()
return self.users_realname_with_options(request, headers, runtime)
async def users_realname_async(
self,
request: dingtalkesign__2__0_models.UsersRealnameRequest,
) -> dingtalkesign__2__0_models.UsersRealnameResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.UsersRealnameHeaders()
return await self.users_realname_with_options_async(request, headers, runtime)
def users_realname_with_options(
self,
request: dingtalkesign__2__0_models.UsersRealnameRequest,
headers: dingtalkesign__2__0_models.UsersRealnameHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.UsersRealnameResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.redirect_url):
body['redirectUrl'] = request.redirect_url
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.UsersRealnameResponse(),
self.do_roarequest('UsersRealname', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/users/realnames', 'json', req, runtime)
)
async def users_realname_with_options_async(
self,
request: dingtalkesign__2__0_models.UsersRealnameRequest,
headers: dingtalkesign__2__0_models.UsersRealnameHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.UsersRealnameResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.redirect_url):
body['redirectUrl'] = request.redirect_url
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.UsersRealnameResponse(),
await self.do_roarequest_async('UsersRealname', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/users/realnames', 'json', req, runtime)
)
def get_flow_detail(
self,
task_id: str,
) -> dingtalkesign__2__0_models.GetFlowDetailResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetFlowDetailHeaders()
return self.get_flow_detail_with_options(task_id, headers, runtime)
async def get_flow_detail_async(
self,
task_id: str,
) -> dingtalkesign__2__0_models.GetFlowDetailResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetFlowDetailHeaders()
return await self.get_flow_detail_with_options_async(task_id, headers, runtime)
def get_flow_detail_with_options(
self,
task_id: str,
headers: dingtalkesign__2__0_models.GetFlowDetailHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetFlowDetailResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetFlowDetailResponse(),
self.do_roarequest('GetFlowDetail', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/flowTasks/{task_id}', 'json', req, runtime)
)
async def get_flow_detail_with_options_async(
self,
task_id: str,
headers: dingtalkesign__2__0_models.GetFlowDetailHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetFlowDetailResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetFlowDetailResponse(),
await self.do_roarequest_async('GetFlowDetail', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/flowTasks/{task_id}', 'json', req, runtime)
)
def get_corp_info(self) -> dingtalkesign__2__0_models.GetCorpInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetCorpInfoHeaders()
return self.get_corp_info_with_options(headers, runtime)
async def get_corp_info_async(self) -> dingtalkesign__2__0_models.GetCorpInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetCorpInfoHeaders()
return await self.get_corp_info_with_options_async(headers, runtime)
def get_corp_info_with_options(
self,
headers: dingtalkesign__2__0_models.GetCorpInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetCorpInfoResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetCorpInfoResponse(),
self.do_roarequest('GetCorpInfo', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/corps/infos', 'json', req, runtime)
)
async def get_corp_info_with_options_async(
self,
headers: dingtalkesign__2__0_models.GetCorpInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetCorpInfoResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetCorpInfoResponse(),
await self.do_roarequest_async('GetCorpInfo', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/corps/infos', 'json', req, runtime)
)
def get_user_info(
self,
user_id: str,
) -> dingtalkesign__2__0_models.GetUserInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetUserInfoHeaders()
return self.get_user_info_with_options(user_id, headers, runtime)
async def get_user_info_async(
self,
user_id: str,
) -> dingtalkesign__2__0_models.GetUserInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetUserInfoHeaders()
return await self.get_user_info_with_options_async(user_id, headers, runtime)
def get_user_info_with_options(
self,
user_id: str,
headers: dingtalkesign__2__0_models.GetUserInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetUserInfoResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetUserInfoResponse(),
self.do_roarequest('GetUserInfo', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/users/{user_id}', 'json', req, runtime)
)
async def get_user_info_with_options_async(
self,
user_id: str,
headers: dingtalkesign__2__0_models.GetUserInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetUserInfoResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetUserInfoResponse(),
await self.do_roarequest_async('GetUserInfo', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/users/{user_id}', 'json', req, runtime)
)
def get_execute_url(
self,
request: dingtalkesign__2__0_models.GetExecuteUrlRequest,
) -> dingtalkesign__2__0_models.GetExecuteUrlResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetExecuteUrlHeaders()
return self.get_execute_url_with_options(request, headers, runtime)
async def get_execute_url_async(
self,
request: dingtalkesign__2__0_models.GetExecuteUrlRequest,
) -> dingtalkesign__2__0_models.GetExecuteUrlResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetExecuteUrlHeaders()
return await self.get_execute_url_with_options_async(request, headers, runtime)
def get_execute_url_with_options(
self,
request: dingtalkesign__2__0_models.GetExecuteUrlRequest,
headers: dingtalkesign__2__0_models.GetExecuteUrlHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetExecuteUrlResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.task_id):
body['taskId'] = request.task_id
if not UtilClient.is_unset(request.sign_container):
body['signContainer'] = request.sign_container
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.account):
body['account'] = request.account
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetExecuteUrlResponse(),
self.do_roarequest('GetExecuteUrl', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/process/executeUrls', 'json', req, runtime)
)
async def get_execute_url_with_options_async(
self,
request: dingtalkesign__2__0_models.GetExecuteUrlRequest,
headers: dingtalkesign__2__0_models.GetExecuteUrlHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetExecuteUrlResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.task_id):
body['taskId'] = request.task_id
if not UtilClient.is_unset(request.sign_container):
body['signContainer'] = request.sign_container
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.account):
body['account'] = request.account
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetExecuteUrlResponse(),
await self.do_roarequest_async('GetExecuteUrl', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/process/executeUrls', 'json', req, runtime)
)
def get_contract_margin(self) -> dingtalkesign__2__0_models.GetContractMarginResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetContractMarginHeaders()
return self.get_contract_margin_with_options(headers, runtime)
async def get_contract_margin_async(self) -> dingtalkesign__2__0_models.GetContractMarginResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetContractMarginHeaders()
return await self.get_contract_margin_with_options_async(headers, runtime)
def get_contract_margin_with_options(
self,
headers: dingtalkesign__2__0_models.GetContractMarginHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetContractMarginResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetContractMarginResponse(),
self.do_roarequest('GetContractMargin', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/margins', 'json', req, runtime)
)
async def get_contract_margin_with_options_async(
self,
headers: dingtalkesign__2__0_models.GetContractMarginHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetContractMarginResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetContractMarginResponse(),
await self.do_roarequest_async('GetContractMargin', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/margins', 'json', req, runtime)
)
def create_developers(
self,
request: dingtalkesign__2__0_models.CreateDevelopersRequest,
) -> dingtalkesign__2__0_models.CreateDevelopersResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.CreateDevelopersHeaders()
return self.create_developers_with_options(request, headers, runtime)
async def create_developers_async(
self,
request: dingtalkesign__2__0_models.CreateDevelopersRequest,
) -> dingtalkesign__2__0_models.CreateDevelopersResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.CreateDevelopersHeaders()
return await self.create_developers_with_options_async(request, headers, runtime)
def create_developers_with_options(
self,
request: dingtalkesign__2__0_models.CreateDevelopersRequest,
headers: dingtalkesign__2__0_models.CreateDevelopersHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.CreateDevelopersResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.notice_url):
body['noticeUrl'] = request.notice_url
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.CreateDevelopersResponse(),
self.do_roarequest('CreateDevelopers', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/developers', 'json', req, runtime)
)
async def create_developers_with_options_async(
self,
request: dingtalkesign__2__0_models.CreateDevelopersRequest,
headers: dingtalkesign__2__0_models.CreateDevelopersHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.CreateDevelopersResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.notice_url):
body['noticeUrl'] = request.notice_url
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.CreateDevelopersResponse(),
await self.do_roarequest_async('CreateDevelopers', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/developers', 'json', req, runtime)
)
def corp_realname(
self,
request: dingtalkesign__2__0_models.CorpRealnameRequest,
) -> dingtalkesign__2__0_models.CorpRealnameResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.CorpRealnameHeaders()
return self.corp_realname_with_options(request, headers, runtime)
async def corp_realname_async(
self,
request: dingtalkesign__2__0_models.CorpRealnameRequest,
) -> dingtalkesign__2__0_models.CorpRealnameResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.CorpRealnameHeaders()
return await self.corp_realname_with_options_async(request, headers, runtime)
def corp_realname_with_options(
self,
request: dingtalkesign__2__0_models.CorpRealnameRequest,
headers: dingtalkesign__2__0_models.CorpRealnameHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.CorpRealnameResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.redirect_url):
body['redirectUrl'] = request.redirect_url
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.CorpRealnameResponse(),
self.do_roarequest('CorpRealname', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/corps/realnames', 'json', req, runtime)
)
async def corp_realname_with_options_async(
self,
request: dingtalkesign__2__0_models.CorpRealnameRequest,
headers: dingtalkesign__2__0_models.CorpRealnameHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.CorpRealnameResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.user_id):
body['userId'] = request.user_id
if not UtilClient.is_unset(request.redirect_url):
body['redirectUrl'] = request.redirect_url
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.CorpRealnameResponse(),
await self.do_roarequest_async('CorpRealname', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/corps/realnames', 'json', req, runtime)
)
| StarcoderdataPython |
11254579 | import pytest
from genalog.text import preprocess
from genalog.text.alignment import GAP_CHAR
@pytest.mark.parametrize(
"token, replacement, desired_output",
[
("", "_", ""), # Do nothing to empty string
(" ", "_", " "), # Do nothing to whitespaces
(" \n\t", "_", " \n\t"),
("ascii", "_", "ascii"),
("a s\nc\tii", "_", "a s\nc\tii"),
("ascii·", "_", "ascii"), # Tokens with non-ASCII values
("·", "_", "_"), # Tokens with non-ASCII values
],
)
def test_remove_non_ascii(token, replacement, desired_output):
for code in range(128, 1000): # non-ASCII values
token.replace("·", chr(code))
output = preprocess.remove_non_ascii(token, replacement)
assert output == desired_output
@pytest.mark.parametrize(
"s, desired_output",
[
(" New \t \n", ["New"]),
# Mixed in gap char "@"
(" @ @", ["@", "@"]),
("New York is big", ["New", "York", "is", "big"]),
# Mixed multiple spaces and tabs
(" New York \t is \t big", ["New", "York", "is", "big"]),
# Mixed in punctuation
("New .York is, big !", ["New", ".York", "is,", "big", "!"]),
# Mixed in gap char "@"
("@N@ew York@@@is,\t big@@@@@", ["@N@ew", "York@@@is,", "big@@@@@"]),
],
)
def test_tokenize(s, desired_output):
output = preprocess.tokenize(s)
assert output == desired_output
@pytest.mark.parametrize(
"tokens, desired_output",
[
(
["New", "York", "is", "big"],
"New York is big",
),
# Mixed in punctuation
(
["New", ".York", "is,", "big", "!"],
"New .York is, big !",
),
# Mixed in gap char "@"
(
["@N@ew", "York@@@is,", "big@@@@@"],
"@N@ew York@@@is, big@@@@@",
),
],
)
def test_join_tokens(tokens, desired_output):
output = preprocess.join_tokens(tokens)
assert output == desired_output
@pytest.mark.parametrize(
"c, desired_output",
[
# Gap char
(GAP_CHAR, False),
# Alphabet char
("a", False),
("A", False),
# Punctuation
(".", False),
("!", False),
(",", False),
("-", False),
# Token separators
(" ", True),
("\n", True),
("\t", True),
],
)
def test__is_spacing(c, desired_output):
assert desired_output == preprocess._is_spacing(c)
@pytest.mark.parametrize(
"text, desired_output",
[
("", ""),
("w .", "w ."),
("w !", "w !"),
("w ?", "w ?"),
("w /.", "w /."),
("w /!", "w /!"),
("w /?", "w /?"),
("w1 , w2 .", "w1 , w2 ."),
("w1 . w2 .", "w1 . \nw2 ."),
("w1 /. w2 /.", "w1 /. \nw2 /."),
("w1 ! w2 .", "w1 ! \nw2 ."),
("w1 /! w2 /.", "w1 /! \nw2 /."),
("w1 ? w2 .", "w1 ? \nw2 ."),
("w1 /? w2 /.", "w1 /? \nw2 /."),
("U.S. . w2 .", "U.S. . \nw2 ."),
("w1 ??? w2 .", "w1 ??? w2 ."), # not splitting
("w1 !!! w2 .", "w1 !!! w2 ."),
("w1 ... . w2 .", "w1 ... . \nw2 ."),
("w1 ... /. w2 /.", "w1 ... /. \nw2 /."),
("w1 /. /. w2 .", "w1 /. /. \nw2 ."),
("w1 /. /.", "w1 /. \n/."),
("w1 /. /. ", "w1 /. /. \n"),
("w1 ? ? ? ? w2 .", "w1 ? ? ? ? \nw2 ."),
("w1 /? /? /? /? w2 /.", "w1 /? /? /? /? \nw2 /."),
("w1 ! ! ! ! w2 .", "w1 ! ! ! ! \nw2 ."),
("w1 /! /! /! /! w2 /.", "w1 /! /! /! /! \nw2 /."),
],
)
def test_split_sentences(text, desired_output):
assert desired_output == preprocess.split_sentences(text)
@pytest.mark.parametrize(
"token, desired_output",
[
("", False),
(" ", False),
("\n", False),
("\t", False),
(" \n \t", False),
("...", False),
("???", False),
("!!!", False),
(".", True),
("!", True),
("?", True),
("/.", True),
("/!", True),
("/?", True),
],
)
def test_is_sentence_separator(token, desired_output):
assert desired_output == preprocess.is_sentence_separator(token)
| StarcoderdataPython |
3475126 | <gh_stars>1-10
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def focal_loss(input_values, gamma):
"""Computes the focal loss"""
p = torch.exp(-input_values)
loss = (1 - p) ** gamma * input_values
return loss.mean()
class FocalLoss(nn.Module):
def __init__(self, weight=None, gamma=2):
super(FocalLoss, self).__init__()
assert gamma >= 0
self.gamma = gamma
self.weight = weight
def forward(self, x, target):
return focal_loss(F.cross_entropy(x, target, reduction='none', weight=self.weight), self.gamma)
def upper_triangle(matrix):
upper = torch.triu(matrix, diagonal=0)
#diagonal = torch.mm(matrix, torch.eye(matrix.shape[0]))
diagonal_mask = torch.eye(matrix.shape[0]).cuda()
return upper * (1.0 - diagonal_mask)
def regularizer(W, regularizer_hp, num_of_im, num_of_sk):
number_of_sketches = torch.from_numpy(num_of_im).float().cuda()
number_of_images = torch.from_numpy(num_of_sk).float().cuda()
# Regularization
# print("W shape:", W.shape)
# W is of shape [mc, hidden_layers]
mc = W.shape[0]
w_expand1 = W.unsqueeze(0)
w_expand2 = W.unsqueeze(1)
wx = (w_expand2 - w_expand1)**2
w_norm_mat = torch.sum((w_expand2 - w_expand1)**2, dim=-1)
w_norm_upper = upper_triangle(w_norm_mat)
mu = 2.0 / (mc**2 - mc) * torch.sum(w_norm_upper)
delta = number_of_sketches + number_of_images
delta = regularizer_hp/delta
residuals = upper_triangle((w_norm_upper - (mu + delta))**2)
rw = 2.0 / (mc**2 - mc) * torch.sum(residuals)
return rw
if __name__ == "__main__":
torch.manual_seed(0)
W=torch.rand(128,100,4096)
#rdx= torch.sum(torch.tensor([regularizer(W[i]) for i in range(W.shape[0])]))
#print(rdx)
for i in range(W.shape[0]):
print(regularizer(W[i],1))
| StarcoderdataPython |
3318733 | <gh_stars>0
# Generated by Django 3.1.2 on 2020-10-22 12:14
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('board', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='comment',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='recomments', to='board.comment'),
),
]
| StarcoderdataPython |
1971800 | import sys
import warnings
import importlib
import pythoncom
from importlib.abc import MetaPathFinder, Loader
from importlib.machinery import ModuleSpec
class PyWinAutoFinder(MetaPathFinder):
def find_spec(self, fullname, path, target=None): # pylint: disable=unused-argument
if fullname == 'pywinauto':
self.unregister()
spec = importlib.util.find_spec(fullname)
spec = ModuleSpec(
spec.name,
PyWinAutoLoader(),
origin=spec.origin,
loader_state=spec.loader_state,
is_package=spec.submodule_search_locations is not None,
)
return spec
@classmethod
def register(cls):
sys.meta_path = [cls()] + sys.meta_path
@classmethod
def unregister(cls):
sys.meta_path = [x for x in sys.meta_path if not isinstance(x, cls)]
class PyWinAutoLoader(Loader): # pylint: disable=abstract-method
def __init__(self):
self._original_has_coinit_flags = False
self._original_coinit_flags = None
def set_sys_coinit_flags(self):
self._original_has_coinit_flags = hasattr(sys, 'coinit_flags')
self._original_coinit_flags = getattr(sys, 'coinit_flags', None)
sys.coinit_flags = pythoncom.COINIT_APARTMENTTHREADED
def reset_sys_coinit_flags(self):
if not self._original_has_coinit_flags:
del sys.coinit_flags
else:
sys.coinit_flags = self._original_coinit_flags
def create_module(self, spec):
# https://github.com/pywinauto/pywinauto/issues/472
self.set_sys_coinit_flags()
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
PyWinAutoFinder.unregister()
module = importlib.import_module(spec.name)
return module
def exec_module(self, module):
pass
| StarcoderdataPython |
93831 | import random
import numpy as np
def read_data(pairs_file):
with open(pairs_file, 'r') as file:
tcrs = set()
peps = set()
all_pairs = []
for line in file:
tcr, pep, cd = line.strip().split('\t')
# print(tcr, pep)
# Proper tcr and peptides
if '*' in tcr or '*' in pep:
continue
if '/' in pep:
continue
tcrs.add(tcr)
peps.add(pep)
all_pairs.append((tcr, pep, cd))
train_pairs, test_pairs = train_test_split(all_pairs)
return all_pairs, train_pairs, test_pairs
def train_test_split(all_pairs):
train_pairs = []
test_pairs = []
for pair in all_pairs:
# 80% train, 20% test
p = np.random.binomial(1, 0.8)
if p == 1:
train_pairs.append(pair)
else:
test_pairs.append(pair)
return train_pairs, test_pairs
def positive_examples(pairs):
examples = []
for pair in pairs:
tcr, pep, cd = pair
weight = 1
examples.append((tcr, pep, cd, 'p', weight))
return examples
def negative_examples(pairs, all_pairs, size):
examples = []
i = 0
# Get tcr and peps lists
tcrs = [tcr for (tcr, pep, cd) in pairs]
peps = [pep for (tcr, pep, cd) in pairs]
while i < size:
pep = random.choice(peps)
for j in range(5):
tcr = random.choice(tcrs)
attach = (tcr, pep, 'CD4') in all_pairs\
or (tcr, pep, 'CD8') in all_pairs\
or (tcr, pep, 'NA') in all_pairs
if attach is False:
weight = 1
if (tcr, pep, 'NEG', 'n', weight) not in examples:
examples.append((tcr, pep, 'NEG', 'n', weight))
i += 1
return examples
def get_examples(pairs_file):
all_pairs, train_pairs, test_pairs = read_data(pairs_file)
train_pos = positive_examples(train_pairs)
train_neg = negative_examples(train_pairs, all_pairs, len(train_pos))
test_pos = positive_examples(test_pairs)
test_neg = negative_examples(test_pairs, all_pairs, len(test_pos))
return train_pos, train_neg, test_pos, test_neg
def load_data(pairs_file):
train_pos, train_neg, test_pos, test_neg = get_examples(pairs_file)
train = train_pos + train_neg
random.shuffle(train)
test = test_pos + test_neg
random.shuffle(test)
return train, test
def check():
pairs_file = 'McPAS-with_CD'
train, test = load_data(pairs_file)
print(train)
print(test)
print(len(train))
print(len(test))
# check()
| StarcoderdataPython |
336769 | """
Test only that the wrapper behaves nicely in all cases.
Injection itself is tested through inject.
"""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Any
import pytest
from antidote import world
from antidote._internal.utils import Default
from antidote._internal.wrapper import build_wrapper, Injection, InjectionBlueprint
from antidote.exceptions import DependencyNotFoundError
A = object()
B = object()
C = object()
@pytest.fixture(autouse=True, scope="module")
def empty_world():
with world.test.empty():
world.test.singleton("x", A)
yield
class Arg:
dependency: object
@dataclass
class Required(Arg):
dependency: object = field(default=None)
@dataclass
class Opt(Arg):
dependency: object = field(default=None)
def wrap(__func=None, **kwargs: Arg):
def wrapper(func):
return build_wrapper(
blueprint=InjectionBlueprint(
tuple(
[
Injection(
arg_name=arg_name,
required=isinstance(dependency, Required),
dependency=dependency.dependency,
default=Default.sentinel,
)
for arg_name, dependency in kwargs.items()
]
)
),
wrapped=func,
)
return __func and wrapper(__func) or wrapper
class Dummy:
@wrap(self=Required(None), x=Required("x"))
def method(self, x):
return self, x
@classmethod
@wrap(cls=Required(None), x=Required("x"))
def class_after(cls, x):
return cls, x
@staticmethod
@wrap(x=Required("x"))
def static_after(x):
return x
class Dummy2:
def method(self, x):
return self, x
Dummy2.method = wrap(Dummy2.__dict__["method"], self=Required(None), x=Required("x"))
@wrap(x=Required("x"))
def f(x):
return x
d = Dummy()
d2 = Dummy2()
@pytest.mark.parametrize(
"expected, func",
[
pytest.param(A, f, id="func"),
pytest.param((B, A), Dummy.method, id="method"),
pytest.param((Dummy, A), Dummy.class_after, id="classmethod after"),
pytest.param(A, Dummy.static_after, id="staticmethod after"),
pytest.param((d, A), d.method, id="instance method"),
pytest.param((Dummy, A), d.class_after, id="instance classmethod after"),
pytest.param(A, d.static_after, id="instance staticmethod after"),
pytest.param((d2, A), d2.method, id="post:instance method"),
pytest.param((B, A), Dummy2.method, id="post:method"),
],
)
def test_wrapper(expected, func: Any):
if expected == (B, A):
assert expected == func(B, A)
assert expected == func(B)
assert (B, C) == func(B, C)
assert (B, C) == func(B, x=C)
else:
assert expected == func(A)
assert expected == func()
if isinstance(expected, tuple):
new_expected = (expected[0], C)
else:
new_expected = C
assert new_expected == func(C)
assert new_expected == func(x=C)
def test_classmethod_wrapping():
def class_method(cls):
pass
class A:
method = wrap(classmethod(class_method))
assert class_method == A.__dict__["method"].__func__
assert A == A.method.__self__
def test_required_dependency_not_found():
@wrap(x=Required("unknown"))
def f(x):
return x
with pytest.raises(DependencyNotFoundError):
f()
def test_dependency_not_found():
@wrap(x=Opt("unknown"))
def f(x):
return x
with pytest.raises(TypeError):
f()
def test_multiple_injections():
xx = object()
yy = object()
zz = object()
@wrap(x=Required("xx"), y=Required("yy"), z=Opt("zz"))
def f(x, y, z=zz):
return x, y, z
world.test.singleton(dict(xx=xx, yy=yy))
assert (xx, yy, zz) == f()
assert (xx, A, zz) == f(y=A)
assert (xx, yy, A) == f(z=A)
assert (A, yy, zz) == f(x=A)
assert (A, yy, B) == f(A, z=B)
with pytest.raises(TypeError):
f(A, x=A)
def f():
pass
async def async_f():
pass
@pytest.mark.parametrize("func", [f, async_f])
def test_custom_attributes(func):
func.attr = "test"
func.another_attr = "another_attr"
wrapped = wrap(func)
assert "test" == wrapped.attr
wrapped.attr2 = "test2"
assert "test2" == wrapped.attr2
# After setting a new attribute, original ones should still be accessible.
assert "test" == wrapped.attr
wrapped.attr = "overridden test"
assert "overridden test" == wrapped.attr
# You should be able to remove new attributes
del wrapped.attr2
with pytest.raises(AttributeError):
wrapped.attr2
# but not existing ones
with pytest.raises(AttributeError):
del wrapped.another_attr
@pytest.mark.asyncio
async def test_async_wrapper():
with world.test.empty():
world.test.singleton(dict(a=A, b=B, c=C))
@wrap(x=Required("a"))
async def f(x):
return x
res = await f()
assert res == A
class Dummy:
@wrap(self=Required(), x=Required("a"))
async def method(self, x):
return x
@classmethod
@wrap(cls=Required(), x=Required("a"))
async def klass(cls, x):
return x
@staticmethod
@wrap(x=Required("a"))
async def static(x):
return x
d = Dummy()
print(d.__dict__)
res = await d.method()
assert res == A
res = await d.klass()
assert res == A
res = await d.static()
assert res == A
res = await Dummy.klass()
assert res == A
res = await Dummy.static()
assert res == A
@wrap(x=Required("a"), y=Required("b"), z=Opt("unknown"))
async def f(x, y, z=None):
return x, y, z
(x, y, z) = await f()
assert x == A
assert y == B
assert z is None
@wrap(x=Required("a"), y=Required("b"), z=Required("unknown"))
async def f(x, y, z):
pass
with pytest.raises(DependencyNotFoundError, match=".*unknown.*"):
await f()
@wrap(x=Required("a"), y=Required("b"), z=Required())
async def f(x, y, z):
pass
with pytest.raises(TypeError):
await f()
| StarcoderdataPython |
6684376 | from keras.utils import to_categorical
from keras.preprocessing import sequence
from mxnet import gluon
from sklearn.model_selection import train_test_split
from keras.models import Model, load_model
from keras.layers import Conv1D, GlobalMaxPooling1D, Dropout, Dense, Input, Embedding, MaxPooling1D, Flatten
from keras.callbacks import ModelCheckpoint
import numpy as np
import pickle
MAX_WORDS_IN_SEQ = 1000
EMBED_DIM = 100
MODEL_PATH = "models/spam_detect"
# Load Data
with open("data/dataset.pkl", 'rb') as f:
sequences, labels, word2index = pickle.load(f)
num_words = len(word2index)
print(f"Found {num_words} unique tokens")
data = sequence.pad_sequences(sequences, maxlen=MAX_WORDS_IN_SEQ, padding='post', truncating='post')
print(labels[:10])
labels = to_categorical(labels)
print(labels[:10])
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
x_train, x_test, y_train, y_test = train_test_split(data, labels, test_size=0.2)
# Building the model
input_seq = Input(shape=[MAX_WORDS_IN_SEQ, ], dtype='int32')
embed_seq = Embedding(num_words + 1, EMBED_DIM, embeddings_initializer='glorot_normal', input_length=MAX_WORDS_IN_SEQ)(
input_seq)
conv_1 = Conv1D(128, 5, activation='relu')(embed_seq)
conv_1 = MaxPooling1D(pool_size=5)(conv_1)
conv_2 = Conv1D(128, 5, activation='relu')(conv_1)
conv_2 = MaxPooling1D(pool_size=5)(conv_2)
conv_3 = Conv1D(128, 5, activation='relu')(conv_2)
conv_3 = MaxPooling1D(pool_size=35)(conv_3)
flat = Flatten()(conv_3)
flat = Dropout(0.25)(flat)
fc1 = Dense(128, activation='relu')(flat)
dense_1 = Dropout(0.25)(flat)
fc2 = Dense(2, activation='softmax')(fc1)
model = Model(input_seq, fc2)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
# Train the model
model.fit(
x_train,
y_train,
batch_size=128,
epochs=2,
callbacks=[ModelCheckpoint(MODEL_PATH, save_best_only=True)],
validation_data=[x_test, y_test]
)
model.save(MODEL_PATH)
class CnnClassifierModel(gluon.HybridBlock):
def __init__(self, **kwargs):
super(CnnClassifierModel, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = gluon.nn.Conv1D()
def hybrid_forward(self, F, x, *args, **kwargs):
pass
| StarcoderdataPython |
3376036 | '''
Created on May 22, 2015
@author: hsorby
'''
ELEMENT_OUTLINE_GRAPHIC_NAME = 'element_outline'
IMAGE_PLANE_GRAPHIC_NAME = 'image_plane'
| StarcoderdataPython |
1698290 | # -*- coding: utf-8 -*-
import hubblestack.modules.reg as reg
import hubblestack.utils.win_reg
from hubblestack.exceptions import CommandExecutionError
from tests.support.helpers import random_string
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase, skipIf
try:
import win32api
HAS_WIN32 = True
except ImportError:
HAS_WIN32 = False
UNICODE_KEY = "Unicode Key \N{TRADE MARK SIGN}"
UNICODE_VALUE = (
"Unicode Value " "\N{COPYRIGHT SIGN},\N{TRADE MARK SIGN},\N{REGISTERED SIGN}"
)
FAKE_KEY = "SOFTWARE\\{}".format(random_string("HubblestackTesting-", lowercase=False))
@skipIf(not HAS_WIN32, "Tests require win32 libraries")
class WinFunctionsTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for hubblestack.modules.reg
"""
def setup_loader_modules(self):
return {
reg: {
"__utils__": {
"reg.read_value": hubblestack.utils.win_reg.read_value,
}
}
}
def test_read_value_existing(self):
"""
Test the read_value function using a well known registry value
"""
ret = reg.read_value(
hive="HKLM",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion",
vname="ProgramFilesPath",
)
self.assertEqual(ret["vdata"], "%ProgramFiles%")
def test_read_value_default(self):
"""
Test the read_value function reading the default value using a well
known registry key
"""
ret = reg.read_value(
hive="HKLM", key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion"
)
self.assertEqual(ret["vdata"], "(value not set)")
def test_read_value_non_existing(self):
"""
Test the read_value function using a non existing value pair
"""
expected = {
"comment": "Cannot find fake_name in HKLM\\SOFTWARE\\Microsoft\\"
"Windows\\CurrentVersion",
"vdata": None,
"vname": "fake_name",
"success": False,
"hive": "HKLM",
"key": "SOFTWARE\\Microsoft\\Windows\\CurrentVersion",
}
self.assertDictEqual(
reg.read_value(
hive="HKLM",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion",
vname="fake_name",
),
expected,
)
def test_read_value_non_existing_key(self):
"""
Test the read_value function using a non existing registry key
"""
expected = {
"comment": "Cannot find key: HKLM\\{0}".format(FAKE_KEY),
"vdata": None,
"vname": "fake_name",
"success": False,
"hive": "HKLM",
"key": FAKE_KEY,
}
self.assertDictEqual(
reg.read_value(hive="HKLM", key=FAKE_KEY, vname="fake_name"), expected
)
def test_read_value_invalid_hive(self):
"""
Test the read_value function when passing an invalid hive
"""
self.assertRaises(
CommandExecutionError,
reg.read_value,
hive="BADHIVE",
key="SOFTWARE\\Microsoft",
vname="ProgramFilesPath",
)
def test_read_value_unknown_key_error(self):
"""
Tests the read_value function with an unknown key error
"""
mock_error = MagicMock(
side_effect=win32api.error(123, "RegOpenKeyEx", "Unknown error")
)
with patch("hubblestack.utils.win_reg.win32api.RegOpenKeyEx", mock_error):
self.assertRaises(
win32api.error,
reg.read_value,
hive="HKLM",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion",
vname="ProgramFilesPath",
)
def test_read_value_unknown_value_error(self):
"""
Tests the read_value function with an unknown value error
"""
mock_error = MagicMock(
side_effect=win32api.error(123, "RegQueryValueEx", "Unknown error")
)
with patch("hubblestack.utils.win_reg.win32api.RegQueryValueEx", mock_error):
self.assertRaises(
win32api.error,
reg.read_value,
hive="HKLM",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion",
vname="ProgramFilesPath",
)
| StarcoderdataPython |
3566512 | <reponame>elyase/polyaxon
from sanic import Sanic
from streams.resources.builds import build_logs
from streams.resources.experiment_jobs import experiment_job_logs, experiment_job_resources
from streams.resources.experiments import experiment_logs, experiment_resources
from streams.resources.health import health
from streams.resources.jobs import job_logs
app = Sanic(__name__)
app.add_route(health, '/_health')
EXPERIMENT_URL = '/v1/<username>/<project_name>/experiments/<experiment_id>'
EXPERIMENT_JOB_URL = EXPERIMENT_URL + '/jobs/<job_id>'
BUILD_URL = '/v1/<username>/<project_name>/builds/<build_id>'
JOB_URL = '/v1/<username>/<project_name>/jobs/<job_id>'
def add_url(endpoint, base_url, url):
app.add_websocket_route(endpoint, '{}/{}'.format(base_url, url))
app.add_websocket_route(endpoint, '/ws{}/{}'.format(base_url, url))
# Experiment Job urls
add_url(endpoint=experiment_job_resources, base_url=EXPERIMENT_JOB_URL, url='resources')
add_url(endpoint=experiment_job_logs, base_url=EXPERIMENT_JOB_URL, url='logs')
# Experiment urls
add_url(endpoint=experiment_resources, base_url=EXPERIMENT_URL, url='resources')
add_url(endpoint=experiment_logs, base_url=EXPERIMENT_URL, url='logs')
# Job urls
# add_url(endpoint=job_resources, base_url=EXPERIMENT_URL, url='resources')
add_url(endpoint=job_logs, base_url=JOB_URL, url='logs')
# Build Job urls
# add_url(endpoint=job_resources, base_url=EXPERIMENT_URL, url='resources')
add_url(endpoint=build_logs, base_url=BUILD_URL, url='logs')
@app.listener('after_server_start')
async def notify_server_started(app, loop): # pylint:disable=redefined-outer-name
app.job_resources_ws_mangers = {}
app.experiment_resources_ws_mangers = {}
app.job_logs_consumers = {}
app.experiment_logs_consumers = {}
@app.listener('after_server_stop')
async def notify_server_stopped(app, loop): # pylint:disable=redefined-outer-name
app.job_resources_ws_mangers = {}
app.experiment_resources_ws_manger = {}
consumer_keys = list(app.job_logs_consumers.keys())
for consumer_key in consumer_keys:
consumer = app.job_logs_consumers.pop(consumer_key, None)
consumer.stop()
consumer_keys = list(app.experiment_logs_consumers.keys())
for consumer_key in consumer_keys:
consumer = app.experiment_logs_consumers.pop(consumer_key, None)
consumer.stop()
| StarcoderdataPython |
242842 | from django.shortcuts import render, get_object_or_404, redirect
from .models import Products
from .forms import ProductForm
# Create your views here.
def dynamic_lookup_view(request, product_id):
try:
obj = get_object_or_404(Products, id = product_id)
except Products.DoesNotExist:
raise Http404
context = {
'my_objs':[obj],
}
return render(request, 'product/details.html', context)
# def product_create_view(request):
# context = {}
# return render(request, 'product/newproduct.html', context)
def product_delete_view(request, id):
obj = get_object_or_404(Products, id=id)
if request.method == 'POST':
obj.delete()
return redirect('/home/')
context ={
'object':obj,
}
return render(request, 'product/product_delete.html', context)
def product_create_view(request):
form = ProductForm(request.POST or None)
if form.is_valid():
form.save()
form = ProductForm()
context = {
'form':form
}
return render(request, 'product/newproduct.html', context)
def product_details_view(request):
obj = Products.objects.all()
# objs = []
# for i in range(len(obj)):
# objs.append(obj[i])
context = {
'my_objs':obj,
}
return render(request, 'product/details.html', context)
def product_list_view(request):
obj = Products.objects.all()
objs = []
for i in range(len(obj)):
objs.append(obj[i])
context = {
'my_objs':objs
}
return render(request, 'product/list.html', context) | StarcoderdataPython |
11339396 | from django.apps import AppConfig
class StarterkitConfig(AppConfig):
name = 'starterkit'
| StarcoderdataPython |
3237329 | # uncompyle6 version 3.3.5
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)]
# Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\pushbase\track_frozen_mode.py
# Compiled at: 2018-11-30 15:48:12
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.base import listens
from ableton.v2.control_surface.mode import ModesComponent
class TrackFrozenModesComponent(ModesComponent):
def __init__(self, default_mode=None, frozen_mode=None, *a, **k):
super(TrackFrozenModesComponent, self).__init__(*a, **k)
assert default_mode is not None
assert frozen_mode is not None
self.add_mode(b'default', default_mode)
self.add_mode(b'frozen', frozen_mode)
self._on_selected_track_is_frozen_changed.subject = self.song.view
if self.is_enabled():
self._update_selected_mode()
return
def _update_selected_mode(self):
self.selected_mode = b'frozen' if self.song.view.selected_track.is_frozen else b'default'
@listens(b'selected_track.is_frozen')
def _on_selected_track_is_frozen_changed(self):
self._update_selected_mode()
def update(self):
super(TrackFrozenModesComponent, self).update()
if self.is_enabled():
self._update_selected_mode() | StarcoderdataPython |
1927170 | def fun_callback(input, extend_input):
print('fun_callback sum :',input)
print('fun_callback extend_input :', extend_input)
return
def fun_call(one, two, f_callback,three):
result = one + two
f_callback(result, three)
return
first = 10
second = 20
third = 30
fun_call(first, second, fun_callback, third) | StarcoderdataPython |
213019 | <gh_stars>0
"""Useful functions for the Singularity containers
TODO:
- [x] figure out how to mount in other file-systems
-B dir1,dir2
Put to release notes:
`conda install -c bioconda singularity`
OR
`conda install -c conda-forge singularity`
"""
from __future__ import absolute_import
from __future__ import print_function
import six
import os
from kipoi_utils.utils import unique_list, makedir_exist_ok, is_subdir
from kipoi_conda import _call_command
import subprocess
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# Python wrapper for the Singularity CLI
# def assert_installed():
# """Make sure singularity is installed
# """
# pass
def singularity_pull(remote_path, local_path):
"""Run `singularity pull`
Args:
remote_path: singularity remote path. Example: shub://kipoi/models:latest
local_path: local file path to the ".sif" file
"""
makedir_exist_ok(os.path.dirname(local_path))
if os.path.exists(local_path):
logger.info("Container file {} already exists. Skipping `singularity pull`".
format(local_path))
else:
if os.environ.get('SINGULARITY_CACHEDIR'):
downloaded_path = os.path.join(os.environ.get('SINGULARITY_CACHEDIR'),
os.path.basename(local_path))
pull_dir = os.path.dirname(downloaded_path)
logger.info("SINGULARITY_CACHEDIR is set to {}".
format(os.environ.get('SINGULARITY_CACHEDIR')))
if os.path.exists(downloaded_path):
logger.info("Container file {} already exists. Skipping `singularity pull` and softlinking it".
format(downloaded_path))
if os.path.islink(local_path):
logger.info("Softlink {} already exists. Removing it".format(local_path))
os.remove(local_path)
logger.info("Soflinking the downloaded file: ln -s {} {}".
format(downloaded_path,
local_path))
os.symlink(downloaded_path, local_path)
return None
else:
pull_dir = os.path.dirname(local_path)
logger.info("Container file {} doesn't exist. Pulling the container from {}. Saving it to: {}".
format(local_path, remote_path, pull_dir))
cmd = ['singularity', 'pull', '--name', os.path.basename(local_path), remote_path]
logger.info(" ".join(cmd))
returncode = subprocess.call(cmd,
cwd=pull_dir)
if returncode != 0:
raise ValueError("Command: {} failed".format(" ".join(cmd)))
# softlink it
if os.environ.get('SINGULARITY_CACHEDIR'):
if os.path.islink(local_path):
logger.info("Softlink {} already exists. Removing it".format(local_path))
os.remove(local_path)
logger.info("Soflinking the downloaded file: ln -s {} {}".
format(downloaded_path,
local_path))
os.symlink(downloaded_path, local_path)
if not os.path.exists(local_path):
raise ValueError("Container doesn't exist at the download path: {}".format(local_path))
def singularity_exec(container, command, bind_directories=[], dry_run=False):
"""Run `singularity exec`
Args:
container: path to the singularity image (*.sif)
command: command to run (as a list)
bind_directories: Additional directories to bind
"""
if bind_directories:
options = ['-B', ",".join(bind_directories)]
else:
options = []
cmd = ['singularity', 'exec'] + options + [container] + command
logger.info(" ".join(cmd))
if dry_run:
return print(" ".join(cmd))
else:
returncode = subprocess.call(cmd,
stdin=subprocess.PIPE)
if returncode != 0:
raise ValueError("Command: {} failed".format(" ".join(cmd)))
# --------------------------------------------
# Figure out relative paths:
# - container path (e.g. shub://kipoi/models:latest)
# - local path (e.g. ~/.kipoi/envs/singularity/kipoi/models_latest.sif)
def container_remote_url(source='kipoi'):
if source == 'kipoi':
return 'shub://kipoi/models:latest'
else:
raise NotImplementedError("Containers for sources other than Kipoi are not yet implemented")
def container_local_path(remote_path):
from kipoi.config import _kipoi_dir
tmp = os.path.join(remote_path.split("://")[1])
if ":" in tmp:
relative_path, tag = tmp.split(":")
else:
relative_path = tmp
tag = 'latest'
return os.path.join(_kipoi_dir, "envs/singularity/", relative_path + "_" + tag + ".sif")
# ---------------------------------
def involved_directories(dataloader_kwargs, output_files=[], exclude_dirs=[]):
"""Infer the involved directories given dataloader kwargs
"""
dirs = []
# dataloader kwargs
for k, v in six.iteritems(dataloader_kwargs):
if os.path.exists(v):
dirs.append(os.path.dirname(os.path.abspath(v)))
# output files
for v in output_files:
dirs.append(os.path.dirname(os.path.abspath(v)))
# optionally exclude directories
def in_any_dir(fname, dirs):
return any([is_subdir(fname, os.path.expanduser(d))
for d in dirs])
dirs = [x for x in dirs
if not in_any_dir(x, exclude_dirs)]
return unique_list(dirs)
def create_conda_run():
"""Create conda_run bash script to ~/.kipoi/bin/conda_run
NOTE: this should be changed to `conda run` once conda=4.6.0 is released
https://github.com/conda/conda/issues/2379
"""
from kipoi.config import _kipoi_dir
crun = """#!/bin/bash
# Run a bash command in a new conda environment
set -e # stop on error
if [[ $# -lt 2 ]] ; then
echo "Usage: "
echo " conda_run <conda envrionment> <command> "
exit 0
fi
env=$1
cmd=${@:2}
echo "Running command in env: $env"
echo "Command: $cmd"
source activate $env
$cmd
source deactivate $env
"""
bin_dir = os.path.join(_kipoi_dir, 'bin')
makedir_exist_ok(bin_dir)
crun_path = os.path.join(bin_dir, 'conda_run')
with open(crun_path, 'w') as f:
f.write(crun)
# make it executable
subprocess.call(["chmod", "u+x", crun_path])
return crun_path
def singularity_command(kipoi_cmd, model, dataloader_kwargs, output_files=[], source='kipoi', dry_run=False):
remote_path = container_remote_url(source)
local_path = container_local_path(remote_path)
singularity_pull(remote_path, local_path)
assert kipoi_cmd[0] == 'kipoi'
# remove all spaces within each command
kipoi_cmd = [x.replace(" ", "").replace("\n", "").replace("\t", "") for x in kipoi_cmd]
# figure out the right environment name
stdout, stderr = _call_command('singularity', ['exec', local_path, 'kipoi', 'env', 'get', model], stdin=subprocess.PIPE)
env_name = stdout.decode().strip()
# create/get the `conda_run` command
conda_run = create_conda_run()
singularity_exec(local_path,
[conda_run, env_name] + kipoi_cmd,
# kipoi_cmd_conda,
bind_directories=involved_directories(dataloader_kwargs, output_files, exclude_dirs=['/tmp', '~']), dry_run=dry_run)
| StarcoderdataPython |
1779957 | <filename>notebooks/icos_jupyter_notebooks/station_characterization/gui.py
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 7 08:38:51 2020
@author: <NAME>
"""
from ipywidgets import Dropdown, SelectMultiple, FileUpload, HBox, Text, VBox, Button, Output, IntText, RadioButtons,IntProgress, GridspecLayout
from IPython.core.display import display, HTML
import settings
from icoscp.station import station as cpstation
import stationchar
import stc_functions
import os
from datetime import datetime
import json
from icoscp.stilt import stiltstation
stiltstations= stiltstation.find()
list_all_located = sorted([((v['geoinfo']['name']['common'] + ': ' + v['name'] + ' ('+ k + ')'),k) for k, v in stiltstations.items() if v['geoinfo']])
list_all_not_located = [(('In water' + ': ' + v['name'] + ' ('+ k + ')'),k) for k, v in stiltstations.items() if not v['geoinfo']]
list_all = list_all_not_located + list_all_located
list_all_icos_located = sorted([((v['geoinfo']['name']['common'] + ': ' + v['name'] + ' ('+ k + ')'),k) for k, v in stiltstations.items() if v['geoinfo'] if v['icos']])
list_all_icos_not_located = [(('In water' + ': ' + v['name'] + ' ('+ k + ')'),k) for k, v in stiltstations.items() if not v['geoinfo'] if v['icos']]
list_all_icos = list_all_icos_not_located + list_all_icos_located
# create a list (tuple) for the dropdown list of stations
#icoslist = sorted([(v['name'],k) for k,v in stiltstations.items() if v['icos']])
#stiltlist = sorted([(v['name'],k) for k,v in stiltstations.items() if not v['icos']])
#---------------------------------------------------------
# read or set the parameters
def get_settings():
s = settings.getDict()
try:
s['stationCode'] = station_choice.value
if stiltstations[s['stationCode']]['icos']:
s['icos'] = cpstation.get(s['stationCode'][0:3].upper()).info()
s['stilt'] = stiltstations[s['stationCode']]
s['startYear'] = s_year.value
s['startMonth'] = s_month.value
s['startDay'] = s_day.value
s['endYear'] = e_year.value
s['endMonth'] = e_month.value
s['endDay'] = e_day.value
s['timeOfDay'] = time_selection.value
s['binSize'] = bin_size.value
s['binInterval'] = interval.value
s['unit'] = unit_value.value
s['labelPolar'] = landcover_windrose_label.value
s['saveFigs'] = save_figs.value
s['figFormat'] = fig_format.value
except:
return
return s
def set_settings(s):
station_choice.value = s['stationCode']
s_year.value = s['startYear']
s_month.value = s['startMonth']
s_day.value = s['startDay']
e_year.value = s['endYear']
e_month.value = s['endMonth']
e_day.value = s['endDay']
time_selection.value = s['timeOfDay']
bin_size.value = s['binSize']
interval.value = s['binInterval']
unit_value.value = s['unit']
landcover_windrose_label.value = s['labelPolar']
save_figs.value = s['saveFigs']
try:
fig_format.value = s['figFormat']
except:
fig_format.value = 'pdf'
# observer functions
#---------------------------------------------------------
def change_stn_type(c):
update_button.disabled = True
# make sure the new 'options' are not selected..
unobserve()
if station_type.value=='STILT stations':
station_choice.options=list_all_located
else:
station_choice.options= list_all_icos
station_choice.value=None
# reset the data fields
s_year.options = []
e_year.options = []
s_month.options = []
e_month.options = []
s_day.options = []
e_day.options = []
# make sure future changes are observed again
observe()
def change_stn(c):
update_button.disabled = False
stn = c['new']
years = sorted(stiltstations[stn]['years'])
years = [int(x) for x in years]
s_year.options=years
e_year.options=years
#triggers "change_yr" --> months populated
def change_yr(c):
years = [x for x in s_year.options if x >= c['new']]
e_year.options = years
#extract month (remove last item, not a month)
stn = station_choice.value
month = sorted(stiltstations[stn][str(s_year.value)]['months'])
month = [int(x) for x in month]
s_month.options= month
e_month.options = month
def change_mt(c):
#the day widget populated depending on what month it is (different number of days)
month_days_30=[4,6,9,11]
month_days_31=[1,3,5,7,8,10,12]
if c['new'] in month_days_31:
s_day.options=list(range(1,32))
elif c['new'] in month_days_30:
s_day.options=list(range(1,31))
else:
s_day.options=list(range(1,29))
#when change start_month - change end month also (if same year)
if s_year.value==e_year.value :
month = [int(x) for x in s_month.options if x >= c['new']]
e_month.options=month
#when change start_month - change end day also (if same year and month OR the first time)
if s_year.value==e_year.value and s_month.value==e_month.value:
day = [x for x in s_day.options if x >= s_day.value]
e_day.options=day
def change_yr_end(c):
if s_year.value==e_year.value:
month = [x for x in s_month.options if x >= s_month.value]
e_month.options = month
else:
# if different from start year, all months are up for choice!
month = sorted(stiltstations[station_choice.value][str(e_year.value)]['months'])
month = [int(x) for x in month]
e_month.options = month
def change_day(c):
#when change the day... if the same month and year (start) - update
if s_year.value==e_year.value and s_month.value==e_month.value:
#print(s_day.options)
day = [int(x) for x in s_day.options if x >= s_day.value]
e_day.options = day
def change_month_end(c):
if s_year.value==e_year.value and e_month.value==s_month.value:
day = [x for x in s_day.options if x >= s_day.value]
e_day.options= day
else:
month_days_30=[4,6,9,11]
month_days_31=[1,3,5,7,8,10,12]
if c['new'] in month_days_31:
e_day.options=list(range(1,32))
elif c['new'] in month_days_30:
e_day.options=list(range(1,31))
else:
e_day.options=list(range(1,29))
def file_set_widgets(c):
uploaded_file = file_name.value
#check if there is content in the dictionary (uploaded file)
if bool(uploaded_file):
settings_file=uploaded_file[list(uploaded_file.keys())[0]]['content']
settings_json = settings_file.decode('utf8').replace("'", '"')
settings_dict = json.loads(settings_json)
set_settings(settings_dict)
#----------- start processing -----------------
def updateProgress(f, desc=''):
# custom progressbar updates
f.value += 1
if not desc:
f.description = 'step ' + str(f.value) + '/' + str(f.max)
else:
f.description = str(desc)
def update_func(button_c):
# Define update function
# This starts the process of creating the graphs
# and, depending on the parameters, saving figures and pdf
progress_bar.clear_output()
header_no_footprints.clear_output()
header_output.clear_output()
result_sensitivity.clear_output()
result_population.clear_output()
result_pointsource.clear_output()
result_land_cover_bar_graph.clear_output()
result_seasonal_table.clear_output()
header_advanced.clear_output()
result_landcover_windrose.clear_output()
result_multiple_variables_graph.clear_output()
update_button.disabled = True
with progress_bar:
f = IntProgress(min=0, max=10, style=style_bin)
display(f)
updateProgress(f, 'read footprint')
global stc
stc=stationchar.StationChar(get_settings())
if stc.fp is None:
with header_no_footprints:
display(HTML('<p style="font-size:16px">No footprints for selected date range.</p>'))
f.value = 10
else:
if stc.settings['saveFigs'] == 'yes':
now = datetime.now()
stc.settings['date/time generated'] = now.strftime("%Y%m%d_%H%M%S_")
output = os.path.join(os.path.expanduser('~'), 'output/station_characterisation', stc.settings['date/time generated']+stc.stationId)
if not os.path.exists(output):
os.makedirs(output)
stc.settings['output_folder'] = output
with header_output:
degree_sign=u'\N{DEGREE SIGN}'
station_name=stc.stationName
station_code=stc.settings['stationCode']
station_country=stc.country
station_lat=stc.lat
station_lon=stc.lon
maps_bin_size=stc.settings['binSize']
maps_bin_interval=stc.settings['binInterval']
#date and time:
date_and_time_string=stc_functions.date_and_time_string_for_title(stc.dateRange, stc.settings['timeOfDay'])
if 'icos' in stc.settings:
station_class=stc.stationClass
station_site_type=stc.siteType
model_height=stc.settings['stilt']['alt']
##
if stc.settings['icos']['siteType']=='mountain' or stc.settings['icos']['siteType']=='Mountain':
mountain_string = ' (might be different from station intake height since mountain station.'
else:
mountain_string = '.'
display(HTML('<p style="font-size:35px;font-weight:bold;"><br>' + station_name + \
' station characterisation</p><p style="font-size:18px;"><br>'+ station_name + ' (' + station_code +\
') is a class ' + str(station_class) + ' ICOS atmospheric station of the type ' + station_site_type.lower() + \
' located in ' + station_country + ' (latitude: ' + str("%.2f" % station_lat) +\
degree_sign + 'N, ' + 'longitude: ' + str("%.2f" % station_lon) +\
degree_sign + 'E). The model height is ' + str(model_height)+ ' meters above ground' + mountain_string + '<br></p>'))
else:
display(HTML('<p style="font-size:35px;font-weight:bold;"><br>' + station_name + \
' station characterisation</p><p style="font-size:16px;">' + station_name + ' (' + station_code +\
') is located in ' + station_country + ' (latitude: ' + str("%.2f" % station_lat) +\
degree_sign + 'N, ' + 'longitude: ' + str("%.2f" % station_lon) + degree_sign + 'E).<br></p>'))
#added information that is redundant in the titles
display(HTML('<p style="font-size:18px;">Date range: ' + date_and_time_string + '<br></p>'))
display(HTML('<p style="font-size:18px;">The map bins are ' + str(maps_bin_size) + ' degrees at ' +\
str(maps_bin_interval) + ' km increments</p>'))
updateProgress(f, 'calculate sensitivity')
with result_sensitivity:
fig, caption = stc_functions.polar_graph(stc, 'sensitivity')
stc.add_figure(1, fig, caption)
display(HTML('<p style="font-size:16px;text-align:center">' + caption + ' </p>'))
display(fig)
updateProgress(f, 'process pointsource')
with result_population:
fig, caption=stc_functions.polar_graph(stc, 'point source contribution', colorbar='Purples')
stc.add_figure(2, fig, caption)
display(HTML('<p style="font-size:16px;text-align:center">' + caption + ' </p>'))
display(fig)
updateProgress(f, 'process population')
with result_pointsource:
fig, caption =stc_functions.polar_graph(stc, 'population sensitivity', colorbar='Greens')
stc.add_figure(3, fig, caption)
display(HTML('<p style="font-size:16px;text-align:center">' + caption + ' </p>'))
display(fig)
updateProgress(f, 'get landcover')
with result_land_cover_bar_graph:
fig, caption=stc_functions.land_cover_bar_graph(stc)
stc.add_figure(4, fig, caption)
display(HTML('<p style="font-size:16px;text-align:center">' + caption + ' </p>'))
display(fig)
updateProgress(f, 'seasonal table')
with result_seasonal_table:
fig, caption=stc_functions.seasonal_table(stc)
stc.add_figure(5, fig, caption)
display(HTML('<p style="font-size:16px;text-align:center">' + caption + ' </p>'))
try:
display(fig)
except:
pass
with header_advanced:
display(HTML('<h2>Advanced figures</h2><br>\
Please read the <a href="./station_characterization/specifications.pdf" target="_blank">\
specifications document</a> before attempting to interpret the following figures.'))
updateProgress(f, 'landcover windrose')
with result_landcover_windrose:
fig, caption=stc_functions.land_cover_polar_graph(stc)
stc.add_figure(6, fig, caption)
display(HTML('<p style="font-size:16px;text-align:center">' + caption + ' </p>'))
display(fig)
updateProgress(f, 'multiple variables')
with result_multiple_variables_graph:
fig, caption= stc_functions.multiple_variables_graph(stc)
stc.add_figure(7, fig, caption)
display(HTML('<p style="font-size:16px;text-align:center">' + caption + ' </p>'))
display(fig)
if stc.settings['saveFigs'] == 'yes':
updateProgress(f, 'saving')
fmt=fig_format.value
stc_functions.save(stc, fmt)
# make sure the progress bar is filled..
updateProgress(f, 'finished')
f.value = 10
update_button.disabled = False
#-----------widgets definition -----------------
style_bin = {'description_width': 'initial'}
header_filename = Output()
with header_filename:
display(HTML('<p style="font-size:15px;font-weight:bold;">Load settings from file (optional): </p>'))
file_name= FileUpload(
accept='.json', # Accepted file extension e.g. '.txt', '.pdf', 'image/*', 'image/*,.pdf'
multiple=False # True to accept multiple files upload else False
)
station_type=RadioButtons(
options=['ICOS stations', 'STILT stations'],
value='ICOS stations',
description=' ',
disabled=False)
station_choice = Dropdown(options = list_all_icos,
description = 'Station',
value=None,
disabled= False)
#Create a Dropdown widget with year values (start year):
s_year = Dropdown(options = [],
description = 'Start Year',
disabled= False,)
#Create a Dropdown widget with month values (start month):
s_month = Dropdown(options = [],
description = 'Start Month',
disabled= False,)
#Create a Dropdown widget with year values (end year):
e_year = Dropdown(options = [],
description = 'End Year',
disabled= False,)
#Create a Dropdown widget with month values (end month):
e_month = Dropdown(options = [],
description = 'End Month',
disabled= False,)
s_day = Dropdown(options = [],
description = 'Start Day',
disabled = False,)
e_day = Dropdown(options = [],
description = 'End Day',
disabled = False,)
options_time_selection=[('0:00', 0), ('3:00', 3), ('06:00', 6), ('09:00', 9), ('12:00', 12), ('15:00', 15), ('18:00', 18), ('21:00', 21)]
time_selection= SelectMultiple(
options=options_time_selection,
value=[0, 3, 6, 9, 12, 15, 18, 21],
style=style_bin,
description='Time of day',
disabled=False)
bin_size = Dropdown(options = [15, 30, 60, 90, 180, 360],
description = 'Bin size (degrees)', style=style_bin,
disabled = False,)
interval = IntText(
value=100,
min=50,
max=500,
description='Interval (km)',
disabled=False,
step=50)
#selection percent/absolut:
unit_value=RadioButtons(
options=['percent', 'absolute'],
value='percent',
style=style_bin,
disabled=False)
#selection label landcover windrose:
landcover_windrose_label =RadioButtons(
options=['yes', 'no'],
value='yes',
description='Labels to the land cover polar graph:',
style=style_bin,
disabled=False)
save_figs=RadioButtons(
options=['yes', 'no'],
style=style_bin,
value='yes',
description= 'Save the output:',
disabled=False)
fig_format=RadioButtons(
options=['pdf', 'png'],
style=style_bin,
value='pdf',
description= 'Format figures:',
disabled=False)
#Create a Button widget to control execution:
update_button = Button(description='Run selection',
disabled=True,
button_style='danger', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',)
royal='#4169E1'
update_button.style.button_color=royal
update_button.layout.margin = '0px 0px 0px 160px' #top, right, bottom, left
#this is just text that is put in a Vbox (vertical box) ABOVE (verticla) the station selection
#("Select here station and time range")
header_station = Output()
with header_station:
display(HTML('<p style="font-size:15px;font-weight:bold;">Select station: </p>'))
header_date_time = Output()
with header_date_time:
display(HTML('<p style="font-size:15px;font-weight:bold;"><br>Select date and time: </p>'))
#added
header_bin_specifications = Output()
with header_bin_specifications:
display(HTML('<p style="font-size:15px;font-weight:bold;"><br>Select bin size and intervals: </p>'))
header_unit = Output()
with header_unit:
display(HTML('<p style="font-size:15px;font-weight:bold;"><br>Unit: </p><p style="font-size:12px;width: 250px;">\
Select representation of surface influence in <b>percent</b> for optimal display of a single station or <b>absolute</b> values for \
intercomparison between stations </p>'))
header_style = Output()
with header_style:
display(HTML('<p style="font-size:15px;font-weight:bold;"><br><br></p>'))
header_save_figs = Output()
#to make it align with the other texts.
with header_save_figs:
display(HTML('<p style="font-size:15px;font-weight:bold;"><br><br></p>'))
#vertical box with the heading (header_station) and the station dropdown
station_box = VBox([header_station,station_type,station_choice, header_date_time])
#NOTE vertical - start year above end year
year_box = VBox([s_year, e_year])
month_box = VBox([s_month, e_month])
day_box = VBox([s_day, e_day])
#the two vertical boxes next to each other in a horizontal box
#Add both time-related VBoxes to a HBox:
time_box = HBox([year_box, month_box, day_box])
#added
bin_box_1 = HBox([bin_size, interval])
h_box_1 = HBox([header_unit, header_style])
v_box_1 = VBox([header_unit, unit_value])
v_box_2 = VBox([header_style, landcover_windrose_label])
v_box_3 = VBox([header_save_figs, save_figs, fig_format])
bin_box_2 = HBox([v_box_1, v_box_2, v_box_3])
#Set font of all widgets in the form:
station_choice.layout.width = '603px'
time_box.layout.margin = '25px 0px 10px 0px'
year_box.layout.margin = '0px 0px 0px 0px'
#Initialize form output:
form_out = Output()
#Initialize results output widgets:
progress_bar = Output()
header_no_footprints = Output()
header_output = Output()
result_sensitivity = Output()
result_population = Output()
result_pointsource = Output()
result_land_cover_bar_graph = Output()
result_seasonal_table = Output()
header_advanced = Output()
result_landcover_windrose = Output()
result_multiple_variables_graph = Output()
#--------------------------------------------------------------------
# OBSERVERS - what happens when change ex. change start year (s_year)
def observe():
station_type.observe(change_stn_type, 'value')
station_choice.observe(change_stn, 'value')
s_year.observe(change_yr, 'value')
s_month.observe(change_mt, 'value')
s_day.observe(change_day, 'value')
e_year.observe(change_yr_end, 'value')
e_month.observe(change_month_end, 'value')
file_name.observe(file_set_widgets, 'value')
#Call update-function when button is clicked:
#added
#update_button_file.on_click(update_func_file)
update_button.on_click(update_func)
def unobserve():
station_type.unobserve(change_stn_type, 'value')
station_choice.unobserve(change_stn, 'value')
s_year.unobserve(change_yr, 'value')
s_month.unobserve(change_mt, 'value')
s_day.unobserve(change_day, 'value')
e_year.unobserve(change_yr_end, 'value')
e_month.unobserve(change_month_end, 'value')
# start observation
observe()
#--------------------------------------------------------------------
#Open form object:
with form_out:
h_box_1=HBox([header_output])
grid=GridspecLayout(2, 2)
grid[0:1, 0:1] = result_sensitivity
grid[0:1, 1:2] = result_population
grid[1:2, 0:1] = result_pointsource
grid[1:2, 1:2] = result_land_cover_bar_graph
#table much "thinner" - make HBox rather than in grid
h_box_2=HBox([result_seasonal_table])
#grid for the last two:
h_box_3=HBox([header_advanced])
grid_2 = GridspecLayout(1, 4)
grid_2[0:1, 0:2] = result_landcover_windrose
grid_2[0:1, 2:4] = result_multiple_variables_graph
update_buttons = HBox([file_name, update_button])
selection_menu = VBox([station_box, time_box, time_selection, header_bin_specifications, bin_box_1,bin_box_2, header_filename, update_buttons])
display(selection_menu, progress_bar, header_no_footprints, h_box_1, grid, h_box_2, h_box_3, grid_2)
#Display form:
display(form_out) | StarcoderdataPython |
9708467 | import os
from flask import Flask, current_app, send_file, redirect, url_for, json
from flask_dance.contrib.twitter import make_twitter_blueprint
from flask_cors import CORS
from werkzeug.contrib.fixers import ProxyFix
from flask_restful import Api as API
from flask_cors import CORS
from routes import route_dict
from routes.twitter import twitter_bp
from api import api_bp
app = Flask(__name__)
app = Flask(__name__, static_folder='./dist/static')
app.wsgi_app = ProxyFix(app.wsgi_app)
CORS(app)
app.register_blueprint(api_bp)
app.secret_key = "viper-probe2" # change the secret to clear the session!
app.register_blueprint(twitter_bp, url_prefix="/login")
api = API(app)
# Add routes from /routes/__init__.py
for route, resource in route_dict.items():
api.add_resource(resource, route)
app.run(host='0.0.0.0')
app.run(port=5000)
# To Run:
# python app.py
# or
# python -m flask app
| StarcoderdataPython |
99226 | import os, subprocess
from itertools import chain
from os.path import join
import pandas as pd
from Modules.Utils import run, make_dir
class FileManager:
"""Project non-specific class for handling local and cloud storage."""
def __init__(self, training=False):
"""create an empty local_paths variable and run self._initialize()"""
self.training = training
self.local_paths = {}
self.training_pids = None
self._initialize_fm()
def sync_training_dir(self, exclude=None, quiet=False):
"""sync the training directory bidirectionally, keeping the newer version of each file.
Args:
exclude (list of str): files/directories to exclude. Accepts both explicit file/directory names and
regular expressions. Expects a list, even if it's a list of length one. Default None.
quiet: if True, suppress the output of rclone copy. Default False
"""
print('syncing training directory')
local_training_dir = self.local_paths['training_dir']
cloud_training_dir = self._local_path_to_cloud_path(local_training_dir)
down = ['rclone', 'copy', '-u', '-c', cloud_training_dir, local_training_dir]
up = ['rclone', 'copy', '-u', '-c', local_training_dir, cloud_training_dir, '--exclude', '.*{/**,}']
if not quiet:
[com.insert(3, '-P') for com in [down, up]]
if exclude is not None:
[com.extend(list(chain.from_iterable(zip(['--exclude'] * len(exclude), exclude)))) for com in [down, up]]
[run(com) for com in [down, up]]
self.training_pids = pd.read_csv(self.local_paths['boxed_fish_csv'], index_col=0)['ProjectID'].unique()
def sync_model_dir(self, exclude=None, quiet=False):
"""sync the machine learning directory bidirectionally, keeping the newer version of each file.
Args:
exclude (list of str): files/directories to exclude. Accepts both explicit file/directory names and
regular expressions. Expects a list, even if it's a list of length one. Default None.
quiet: if True, suppress the output of rclone copy. Default False
"""
print('syncing model directory')
local_model_dir = self.local_paths['model_dir']
cloud_model_dir = self._local_path_to_cloud_path(local_model_dir)
down = ['rclone', 'copy', '-u', '-c', cloud_model_dir, local_model_dir]
up = ['rclone', 'copy', '-u', '-c', local_model_dir, cloud_model_dir, '--exclude', '.*{/**,}']
if not quiet:
[com.insert(3, '-P') for com in [down, up]]
if exclude is not None:
[com.extend(list(chain.from_iterable(zip(['--exclude'] * len(exclude), exclude)))) for com in [down, up]]
[run(com) for com in [down, up]]
def download(self, name, relative_path=None, fault_tolerant=False):
"""use rclone to download a file, untar if it is a .tar file, and update self.local_paths with the path
Args:
name: brief descriptor of the file or directory. Used as the key in the new self.local_paths entry
relative_path: path to file or directory, relative to the local_master / cloud_master directory
fault_tolerant: If True, print a warning (but do not raise an error) if the download fails
Returns:
the full path the to the newly downloaded file or directory
"""
if relative_path is None:
try:
relative_path = self._full_path_to_relative_path(self.local_paths[name])
except KeyError:
print('{} was not a valid key for the local_paths dictionary. '
'Please provide name and relative path instead'.format(name))
return
local_path = join(self.local_paths['master_dir'], relative_path)
if local_path != self.local_paths[name]:
print(
'{0} is already a key for the local_paths dict, but the relative path provided ({1}) differs from the '
'destination path previously defined ({2}). Either omit the relative_path keyword to download {2}, '
'or choose a unique name.'.format(name, local_path, self.local_paths[name]))
return
cloud_path = join(self.cloud_master_dir, relative_path)
run(['rclone', 'copyto', cloud_path, local_path], fault_tolerant=fault_tolerant)
if not os.path.exists(local_path):
if fault_tolerant:
print('download failed for {}. Continuing'.format(name))
else:
raise Exception('download failed for {}. Exiting'.format(name))
if os.path.splitext(local_path)[1] == '.tar':
run(['tar', '-xvf', '--skip-old-files', local_path, '-C', os.path.dirname(local_path)],
fault_tolerant=fault_tolerant)
local_path = os.path.splitext(local_path)[0]
assert os.path.exists(local_path), 'untarring failed for {}'.format(local_path)
os.remove(local_path + '.tar')
self.local_paths.update({name: local_path})
return local_path
def upload(self, name, tarred=False):
"""use rclone to upload a file
Args:
name: brief descriptor of the file or directory. must be a key from self.local_paths
tarred: if True, and 'name' references a directory, upload the directory as a tarfile
"""
try:
local_path = self.local_paths[name]
except KeyError:
print('{} is not a valid key to the local_paths dict. Please use a valid key'.format(name))
return
cloud_path = self._local_path_to_cloud_path(local_path)
if tarred:
# if a tarfile with the same name already exists in the cloud, download and untar it into the target
# directory to prevent overwriting pre-existing data
self.download(name, fault_tolerant=True)
output = subprocess.run(
['tar', '-cvf', local_path + '.tar', '-C', local_path, os.path.split(local_path)[0]],
capture_output=True, encoding='utf-8')
if output.returncode != 0:
print(output.stderr)
raise Exception('Error in tarring ' + local_path)
local_path += '.tar'
else:
output = subprocess.run(['rclone', 'copyto', local_path, cloud_path], capture_output=True, encoding='utf-8')
if output.returncode != 0:
raise Exception('Error in uploading file: ' + output.stderr)
def make_dir(self, name, path):
"""update the self.local_paths dict with {name: path}, and create the directory if it does not exist
Args:
name (str): brief file descriptor, to be used as key in the local_paths dict
path (str): local path of the directory to be created
Returns:
str: the path argument, unaltered
"""
self.local_paths.update({name: make_dir(path)})
return path
def _initialize_fm(self):
"""create all required local directories and set the paths for files generated later."""
# locate the cloud master directory
self.cloud_master_dir = self._locate_cloud_master_dir()
# create basic directory structure and define essential file-paths
self.make_dir('master_dir', join(os.getenv('HOME'), 'Temp', 'CichlidDetection'))
self.make_dir('analysis_states_dir',
join(self.local_paths['master_dir'], '__AnalysisStates', 'CichlidDetection'))
self.make_dir('data_dir', join(self.local_paths['master_dir'], '__ProjectData'))
self.make_dir('model_dir',
join(self.local_paths['master_dir'], '__MachineLearningModels', 'FishDetectionModels'))
self.make_dir('weights_dir', join(self.local_paths['model_dir'], 'Weights'))
self.local_paths.update({'weights_file': join(self.local_paths['weights_dir'], 'last.weights')})
# if training, create training-specific directories and file-paths as well
self.make_dir('training_dir', join(self.local_paths['master_dir'], '__TrainingData', 'CichlidDetection'))
self.make_dir('train_image_dir', join(self.local_paths['training_dir'], 'train_images'))
self.make_dir('test_image_dir', join(self.local_paths['training_dir'], 'test_images'))
self.make_dir('label_dir', join(self.local_paths['training_dir'], 'labels'))
self.make_dir('log_dir', join(self.local_paths['training_dir'], 'logs'))
self.make_dir('predictions_dir', join(self.local_paths['training_dir'], 'predictions'))
self.make_dir('training_figure_dir', join(self.local_paths['training_dir'], 'figures'))
self.make_dir('training_figure_data_dir', join(self.local_paths['training_figure_dir'], 'figure_data'))
self.make_dir('boxed_images_dir', join(self.local_paths['training_dir'], 'BoxedImages'))
self.local_paths.update({'train_list': join(self.local_paths['training_dir'], 'train_list.txt')})
self.local_paths.update({'test_list': join(self.local_paths['training_dir'], 'test_list.txt')})
self.local_paths.update({'train_log': join(self.local_paths['log_dir'], 'train_log.txt')})
self.local_paths.update({'val_log': join(self.local_paths['log_dir'], 'val_log.txt')})
self.local_paths.update({'batch_log': join(self.local_paths['log_dir'], 'batch_log.txt')})
self.local_paths.update({'ground_truth_csv': join(self.local_paths['training_dir'], 'ground_truth.csv')})
self.local_paths.update({'boxed_fish_csv': join(self.local_paths['training_dir'], 'BoxedFish.csv')})
# also sync the training directory and determine the unique project ID's from boxed_fish.csv
if self.training:
self.sync_training_dir()
def _locate_cloud_master_dir(self):
"""locate the required files in Dropbox.
Returns:
string: cloud_master_dir, the outermost Dropbox directory that will be used henceforth
dict: cloud_files, a dict of paths to remote files, keyed by brief descriptors
"""
# establish the correct remote
possible_remotes = run(['rclone', 'listremotes']).split()
if len(possible_remotes) == 1:
remote = possible_remotes[0]
elif 'cichlidVideo:' in possible_remotes:
remote = 'cichlidVideo:'
elif 'd:' in possible_remotes:
remote = 'd:'
else:
raise Exception('unable to establish rclone remote')
# establish the correct path to the CichlidPiData directory
root_dir = [r for r in run(['rclone', 'lsf', remote]).split() if 'McGrath' in r][0]
cloud_master_dir = join(remote + root_dir, 'Apps', 'CichlidPiData')
return cloud_master_dir
def _local_path_to_cloud_path(self, local_path):
return local_path.replace(self.local_paths['master_dir'], self.cloud_master_dir)
def _cloud_path_to_local_path(self, cloud_path):
return cloud_path.replace(self.cloud_master_dir, self.local_paths['master_dir'])
def _full_path_to_relative_path(self, full_path):
return full_path.replace(self.cloud_master_dir, '').replace(self.local_paths['master_dir'], '').strip('/')
class ProjectFileManager(FileManager):
"""Project specific class for managing local and cloud storage, Inherits from FileManager"""
def __init__(self, pid, file_manager=None, training=False):
"""initialize a new FileManager, unless an existing file manager was passed to the constructor to save time
Args:
pid (str): project id
file_manager (FileManager): optional. pass a pre-existing FileManager object to improve performance when
initiating numerous ProjectFileManagers
"""
# initiate the FileManager parent class unless the optional file_manager argument is used
self.training = training
if file_manager is None:
super().__init__(training=training)
# if the file_manager argument is used, manually inherit the required attributes
else:
self.local_paths = file_manager.local_paths.copy()
self.cloud_master_dir = file_manager.cloud_master_dir
self.training = training
self.pid = pid
# initialize project-specific directories
self._initialize_pfm()
def download_all(self, image_dir=True, video_dir=True, fault_tolerant=True):
required_files = ['video_points_numpy', 'video_crop_numpy']
for fname in required_files:
self.download(fname, fault_tolerant=fault_tolerant)
if image_dir:
self.download('image_dir', fault_tolerant=fault_tolerant)
if video_dir:
self.download('video_dir', fault_tolerant=fault_tolerant)
def update_annotations(self):
self.download(self.local_paths['boxed_fish_csv'])
def _initialize_pfm(self):
"""create project-specific directories and locate project-specific files in the cloud
Overwrites FileManager._initialize() method
"""
self.make_dir('project_dir', join(self.local_paths['data_dir'], self.pid))
self.make_dir('master_analysis_dir', join(self.local_paths['project_dir'], 'MasterAnalysisFiles'))
self.make_dir('summary_dir', join(self.local_paths['project_dir'], 'Summary'))
self.make_dir('summary_data_dir', join(self.local_paths['summary_dir'], 'data'))
self.make_dir('video_dir', join(self.local_paths['project_dir'], 'Videos'))
self.local_paths.update(
{'video_points_numpy': join(self.local_paths['master_analysis_dir'], 'VideoPoints.npy')})
self.local_paths.update({'video_crop_numpy': join(self.local_paths['master_analysis_dir'], 'VideoCrop.npy')})
self.local_paths.update({'detections_csv': join(self.local_paths['master_analysis_dir'], 'Detections.csv')})
self.local_paths.update(
{'labeled_frames_csv': join(self.local_paths['master_analysis_dir'], 'LabeledFrames.csv')})
if self.training:
self.local_paths.update(
{'image_dir': join(self.local_paths['boxed_images_dir'], '{}.tar'.format(self.pid))})
else:
self.make_dir('image_dir', join(self.local_paths['project_dir'], 'Images'))
| StarcoderdataPython |
1608323 | <reponame>Tshimanga/pytorch-lightning<filename>pl_examples/domain_templates/computer_vision_fine_tuning.py
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computer vision example on Transfer Learning.
This computer vision example illustrates how one could fine-tune a pre-trained
network (by default, a ResNet50 is used) using pytorch-lightning. For the sake
of this example, the 'cats and dogs dataset' (~60MB, see `DATA_URL` below) and
the proposed network (denoted by `TransferLearningModel`, see below) is
trained for 15 epochs.
The training consists of three stages.
From epoch 0 to 4, the feature extractor (the pre-trained network) is frozen except
maybe for the BatchNorm layers (depending on whether `train_bn = True`). The BatchNorm
layers (if `train_bn = True`) and the parameters of the classifier are trained as a
single parameters group with lr = 1e-2.
From epoch 5 to 9, the last two layer groups of the pre-trained network are unfrozen
and added to the optimizer as a new parameter group with lr = 1e-4 (while lr = 1e-3
for the first parameter group in the optimizer).
Eventually, from epoch 10, all the remaining layer groups of the pre-trained network
are unfrozen and added to the optimizer as a third parameter group. From epoch 10,
the parameters of the pre-trained network are trained with lr = 1e-5 while those of
the classifier is trained with lr = 1e-4.
Note:
See: https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
"""
import logging
from pathlib import Path
from typing import Union
import torch
import torch.nn.functional as F
from torch import nn, optim
from torch.optim.lr_scheduler import MultiStepLR
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from torchmetrics import Accuracy
from torchvision import models, transforms
from torchvision.datasets import ImageFolder
from torchvision.datasets.utils import download_and_extract_archive
import pytorch_lightning as pl
from pl_examples import cli_lightning_logo
from pytorch_lightning import LightningDataModule
from pytorch_lightning.callbacks.finetuning import BaseFinetuning
from pytorch_lightning.utilities import rank_zero_info
from pytorch_lightning.utilities.cli import LightningCLI
log = logging.getLogger(__name__)
DATA_URL = "https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip"
# --- Finetuning Callback ---
class MilestonesFinetuning(BaseFinetuning):
def __init__(self, milestones: tuple = (5, 10), train_bn: bool = False):
super().__init__()
self.milestones = milestones
self.train_bn = train_bn
def freeze_before_training(self, pl_module: pl.LightningModule):
self.freeze(modules=pl_module.feature_extractor, train_bn=self.train_bn)
def finetune_function(self, pl_module: pl.LightningModule, epoch: int, optimizer: Optimizer, opt_idx: int):
if epoch == self.milestones[0]:
# unfreeze 5 last layers
self.unfreeze_and_add_param_group(
modules=pl_module.feature_extractor[-5:], optimizer=optimizer, train_bn=self.train_bn
)
elif epoch == self.milestones[1]:
# unfreeze remaing layers
self.unfreeze_and_add_param_group(
modules=pl_module.feature_extractor[:-5], optimizer=optimizer, train_bn=self.train_bn
)
class CatDogImageDataModule(LightningDataModule):
def __init__(self, dl_path: Union[str, Path] = "data", num_workers: int = 0, batch_size: int = 8):
"""CatDogImageDataModule
Args:
dl_path: root directory where to download the data
num_workers: number of CPU workers
batch_size: number of sample in a batch
"""
super().__init__()
self._dl_path = dl_path
self._num_workers = num_workers
self._batch_size = batch_size
def prepare_data(self):
"""Download images and prepare images datasets."""
download_and_extract_archive(url=DATA_URL, download_root=self._dl_path, remove_finished=True)
@property
def data_path(self):
return Path(self._dl_path).joinpath("cats_and_dogs_filtered")
@property
def normalize_transform(self):
return transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
@property
def train_transform(self):
return transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
self.normalize_transform,
]
)
@property
def valid_transform(self):
return transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor(), self.normalize_transform])
def create_dataset(self, root, transform):
return ImageFolder(root=root, transform=transform)
def __dataloader(self, train: bool):
"""Train/validation loaders."""
if train:
dataset = self.create_dataset(self.data_path.joinpath("train"), self.train_transform)
else:
dataset = self.create_dataset(self.data_path.joinpath("validation"), self.valid_transform)
return DataLoader(dataset=dataset, batch_size=self._batch_size, num_workers=self._num_workers, shuffle=train)
def train_dataloader(self):
log.info("Training data loaded.")
return self.__dataloader(train=True)
def val_dataloader(self):
log.info("Validation data loaded.")
return self.__dataloader(train=False)
# --- Pytorch-lightning module ---
class TransferLearningModel(pl.LightningModule):
def __init__(
self,
backbone: str = "resnet50",
train_bn: bool = False,
milestones: tuple = (2, 4),
batch_size: int = 32,
lr: float = 1e-3,
lr_scheduler_gamma: float = 1e-1,
num_workers: int = 6,
**kwargs,
) -> None:
"""TransferLearningModel
Args:
backbone: Name (as in ``torchvision.models``) of the feature extractor
train_bn: Whether the BatchNorm layers should be trainable
milestones: List of two epochs milestones
lr: Initial learning rate
lr_scheduler_gamma: Factor by which the learning rate is reduced at each milestone
"""
super().__init__()
self.backbone = backbone
self.train_bn = train_bn
self.milestones = milestones
self.batch_size = batch_size
self.lr = lr
self.lr_scheduler_gamma = lr_scheduler_gamma
self.num_workers = num_workers
self.__build_model()
self.train_acc = Accuracy()
self.valid_acc = Accuracy()
self.save_hyperparameters()
def __build_model(self):
"""Define model layers & loss."""
# 1. Load pre-trained network:
model_func = getattr(models, self.backbone)
backbone = model_func(pretrained=True)
_layers = list(backbone.children())[:-1]
self.feature_extractor = nn.Sequential(*_layers)
# 2. Classifier:
_fc_layers = [nn.Linear(2048, 256), nn.ReLU(), nn.Linear(256, 32), nn.Linear(32, 1)]
self.fc = nn.Sequential(*_fc_layers)
# 3. Loss:
self.loss_func = F.binary_cross_entropy_with_logits
def forward(self, x):
"""Forward pass. Returns logits."""
# 1. Feature extraction:
x = self.feature_extractor(x)
x = x.squeeze(-1).squeeze(-1)
# 2. Classifier (returns logits):
x = self.fc(x)
return x
def loss(self, logits, labels):
return self.loss_func(input=logits, target=labels)
def training_step(self, batch, batch_idx):
# 1. Forward pass:
x, y = batch
y_logits = self.forward(x)
y_scores = torch.sigmoid(y_logits)
y_true = y.view((-1, 1)).type_as(x)
# 2. Compute loss
train_loss = self.loss(y_logits, y_true)
# 3. Compute accuracy:
self.log("train_acc", self.train_acc(y_scores, y_true.int()), prog_bar=True)
return train_loss
def validation_step(self, batch, batch_idx):
# 1. Forward pass:
x, y = batch
y_logits = self.forward(x)
y_scores = torch.sigmoid(y_logits)
y_true = y.view((-1, 1)).type_as(x)
# 2. Compute loss
self.log("val_loss", self.loss(y_logits, y_true), prog_bar=True)
# 3. Compute accuracy:
self.log("val_acc", self.valid_acc(y_scores, y_true.int()), prog_bar=True)
def configure_optimizers(self):
parameters = list(self.parameters())
trainable_parameters = list(filter(lambda p: p.requires_grad, parameters))
rank_zero_info(
f"The model will start training with only {len(trainable_parameters)} "
f"trainable parameters out of {len(parameters)}."
)
optimizer = optim.Adam(trainable_parameters, lr=self.lr)
scheduler = MultiStepLR(optimizer, milestones=self.milestones, gamma=self.lr_scheduler_gamma)
return [optimizer], [scheduler]
class MyLightningCLI(LightningCLI):
def add_arguments_to_parser(self, parser):
parser.add_class_arguments(MilestonesFinetuning, "finetuning")
parser.link_arguments("data.batch_size", "model.batch_size")
parser.link_arguments("finetuning.milestones", "model.milestones")
parser.link_arguments("finetuning.train_bn", "model.train_bn")
parser.set_defaults(
{
"trainer.max_epochs": 15,
"trainer.weights_summary": None,
"trainer.progress_bar_refresh_rate": 1,
"trainer.num_sanity_val_steps": 0,
}
)
def instantiate_trainer(self, *args):
finetuning_callback = MilestonesFinetuning(**self._get(self.config_init, "finetuning"))
self.trainer_defaults["callbacks"] = [finetuning_callback]
return super().instantiate_trainer(*args)
def cli_main():
MyLightningCLI(TransferLearningModel, CatDogImageDataModule, seed_everything_default=1234)
if __name__ == "__main__":
cli_lightning_logo()
cli_main()
| StarcoderdataPython |
5013316 | from argparse import ArgumentParser
import numpy as np
import torch
import imageio
from PIL import Image
import torchvision.transforms as transforms
from tqdm.auto import trange
from utils import to_image, image_to_tensor
from model import NeuralStyle
from utils import Params
CONTENT_LAYERS = ["conv_4"]
STYLE_LAYERS = ["conv_1", "conv_2", "conv_3", "conv_4", "conv_5"]
def style_transfer_image(args):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
image = Image.open(args.content_target)
size = image.size
content_target = image_to_tensor(image, size).to(device)
style_targets = [
image_to_tensor(Image.open(image), size).to(device)
for image in args.style_targets
]
n = len(style_targets)
style_weights = np.ones(n) / n if args.style_weights is None else args.style_weights
input_image = content_target.clone().to(device)
neural_style = NeuralStyle(content_layers=CONTENT_LAYERS, style_layers=STYLE_LAYERS)
neural_style.content_target = content_target
neural_style.set_style_targets(style_targets, style_weights)
output_image, _ = neural_style.transfer(
input_image=input_image,
epochs=args.epochs,
style_weight=args.style_weight,
content_weight=args.content_weight,
verbose=args.verbose,
)
to_image(output_image, size=size).save(args.output)
def style_transfer_video(args):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
loader = transforms.ToPILImage()
reader = imageio.get_reader(args.content_target)
frames = [
image_to_tensor(loader(reader.get_data(i)), (512, 512))
for i in range(reader.count_frames())
]
style_targets = [
image_to_tensor(Image.open(image), (512, 512)) for image in args.style_targets
]
style_weights = np.linspace(0, 1, num=len(frames))
neural_style = NeuralStyle(
content_layers=CONTENT_LAYERS, style_layers=STYLE_LAYERS
).to(device)
input_image = frames[0].to(device)
outputs = []
for i in trange(len(frames)):
neural_style.content_target = frames[i].to(device)
neural_style.set_style_targets(
style_targets, [1 - style_weights[i], style_weights[i]]
)
output_image = neural_style.transfer(
input_image=input_image,
epochs=args.epochs,
style_weight=args.style_weight,
content_weight=args.content_weight,
verbose=args.verbose,
)
# del frames[i]
input_image = output_image.clone().to(device)
outputs.append(output_image.to("cpu"))
del output_image
writer = imageio.get_writer("output.mp4", fps=reader.get_meta_data()["fps"])
shape = reader.get_data(0).shape[:2]
outputs = [to_image(output, (shape[1], shape[0])) for output in outputs]
for output in outputs:
writer.append_data(np.asarray(output))
writer.close()
def main():
parser = ArgumentParser()
parser.add_argument(
"--params_path", type=str, help="path to `params.json`", required=True
)
args = parser.parse_args()
params = Params(args.params_path)
print(params)
filetype = params.content_target.split(".")
if filetype[-1] in ["jpg", "jpeg", "png", "tiff"]:
style_transfer_image(params)
else:
style_transfer_video(params)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1778040 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 24 20:49:22 2019
MIT License
Copyright (c) 2019 <NAME> <EMAIL>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
'''
@brief Example function of a while loop. Get the values of tan(x),
possibly for plotting values.
'''
def WhileLoopExample():
import math
import os
x=0
# Iterate until we find a value of x that is the same value as the output
# of the tangent of x. Basically, where the input value equals the output
# value for the tangent function
while(x <= math.tan(x)):
print('x: ' + str(x))
print('Type x: ' + str(type(x)))
print('tan(x): ' + str(math.tan(x)))
print('\n')
x=x+0.1
os.sleep(1)
#print(type(x))
# Notice the values inside the loop are still updated, even after the
# condition fails.
print('Outside of Loop:')
print('x: ' + str(x))
print('tan(x): ' + str(math.tan(x)))
return None
'''
@brief Example function of using a
for loop using a list
'''
def ForLoopExample1():
# This is the collection we are iterating through. Any collection
# can be iterated through, such as tuples or lists.
iterationValues = [0,1,2,3,4,5]
# x is called the loop variable, and takes the place of each element
# in the collection as the loop traverses each element
for x in range(2,5):
print(x)
return None
'''
@brief Example function of using a
for loop using a range
'''
def ForLoopExample2():
# range() Creates a list of numbers starting at 0 and incrementing
# by 1 until it reaches the input value. The values remain less than the
# input value.
iterationValues = [10,11,12,13,14,15]
indices = range(len(iterationValues))
# TODO: create an index variable, then use that to iterate through list item
# iterationValues = [10,11,12,13,14,15]
for value in iterationValues:
# print(index)
print(iterationValues.index(value))
#print(iterationValues[index])
print('')
return None
#WhileLoopExample()
ForLoopExample1()
#ForLoopExample2() | StarcoderdataPython |
4948817 | <gh_stars>0
from .app import Webserver
if __name__ == '__main__':
Webserver.run()
| StarcoderdataPython |
6593824 | <reponame>DeanBiton/Beyond-07-team-4
def test_check_pytest():
assert True
| StarcoderdataPython |
8190764 | import requests
# traer la funcion html para convertir html a un archivo para aplicar xpath
import lxml.html as html
# Crear carpeta con fecha de hoy
import os
# Traer la fecha actual
import datetime
HOME_URL = 'https://www.larepublica.co/'
XPATH_LINK_TO_ARTICLE = '//div[@class="V_Trends"]/h2/a/@href'
XPATH_TITLE = '//h2/a[contains(@class, "") and not(@href)]/text()'
XPATH_SUMMARY = '//div[@class="lead"]/p/text()'
XPATH_BODY = '//div[@class="html-content"]/p[not(@class)]/text()'
def parse_notice(link, today):
try:
print(link)
response = requests.get(link)
if response.status_code == 200:
# Todo igual como en la función principal
notice = response.content.decode('utf-8')
parsed = html.fromstring(notice)
try:
title = parsed.xpath(XPATH_TITLE)
# Hay titulos que vienen entre comillas, con lo siguiente como sabes que TITLE es un string
# reemplazamos las comillas por NADA:
title = title.replace('\"', '')
title = title.replace('/', '')
title = title.replace(':', '')
title = title.replace('"', '')
print(title)
summary = parsed.xpath(XPATH_SUMMARY)[0]
body = parsed.xpath(XPATH_BODY)
# Este error se maneja porque hay noticia que no tienen SUMMARY y si pasa eso que no traiga
# esa noticia y pase a la siguiente:
except IndexError:
print('error creating notice files')
return
# WITH es un manejador contextual que si el archivo se cierra por el script que no funciona
# este manejador mantiene el archivo seguro para que no se corrompa.
# today es la carpeta que se creo en la main function y dentro va la nueva noticia que se guardara
# el segundo parametro es abrir el doc en modo escritura y el encoding para los caracteres especiales
with open(f'data/{today}/{title}.txt', 'w', encoding='utf-8') as f:
f.write(title)
f.write('\n\n')
f.write(summary)
f.write('\n\n')
for p in body:
f.write(p)
f.write('\n')
else:
raise ValueError(f'Error: {response.status_code}')
except ValueError as ve:
# si el requests es diferente de code 200
print(ve)
def parse_home():
try:
response = requests.get(HOME_URL)
if response.status_code == 200:
# Obtenemos el html y decode transforma caracteres especiales(ñ) en algo que python no tenga errores
home = response.content.decode()
# Toma el contenido html de home y lo transforma de forma que podemos usar XPATH
parsed = html.fromstring(home)
# Obtenemos una lista de resultados al aplicar XPATH ($x('...'))
link_to_notices = parsed.xpath(XPATH_LINK_TO_ARTICLE)
# print(link_to_notices)
# date trae una fecha y today() la de hoy. Y convertimos a cadenas de caracteres con el formato
today = datetime.date.today().strftime('%d-%m-%Y')
# Si no existe una carpeta con el nombre(fecha de hoy) entonces creala
local_path = f'./data/{today}'
if not os.path.isdir(local_path):
os.mkdir(local_path)
else:
print('folder already exist')
# por cada link la funcion entra y extrae la info de la noticia y lo guardara con fecha de hoy
for link in link_to_notices:
parse_notice(link, today)
else:
raise ValueError(f'Error: {response.status_code}')
except ValueError as ve:
print(ve)
def run():
parse_home()
if __name__ == '__main__':
run() | StarcoderdataPython |
1784005 | <reponame>profjsb/PyAdder
import unittest
import doctest
import sys
def additional_tests():
import bson
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(bson))
return suite
def all_tests_suite():
def get_suite():
return additional_tests(
unittest.TestLoader().loadTestsFromNames([
'adder.tests.test_one_number'
]))
suite = get_suite()
import adder
return suite
def main():
runner = unittest.TextTestRunner(verbosity=1 + sys.argv.count('-v'))
suite = all_tests_suite()
raise SystemExit(not runner.run(suite).wasSuccessful())
if __name__ == '__main__':
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
main()
| StarcoderdataPython |
4939421 | # -*- coding: utf-8 -*-
import unittest
import parser
import time, datetime
import pdb
class ParserTest(unittest.TestCase):
def setUp(self):
self.t = time.time()
self.date = datetime.datetime.fromtimestamp(self.t)
self.parser = parser.Parser(self.t)
pass
def tearDown(self):
pass
def testFoo(self):
self.assertEqual(1, 1, "testFoo fail")
def test_parse_week_offset(self):
sentence = u"周六去春游"
offsets = [6, 5, 4, 3, 2, 1, 0, -1]
self.assertEqual(self.parser.parse_week_offset(sentence),
offsets[self.date.isocalendar()[2]],
"parse_week_offset failed")
def test_parse_day_offset(self):
sentence = u"打算明天去春游"
self.assertEqual(self.parser.parse_day_offset(sentence),
1, "parse_day_offset failed")
def test_parse_moment(self):
sentence = u"晚上吃夜宵"
self.assertEqual(self.parser.parser_moment(sentence),
20, "parser_moment failed")
def test_parse_clock(self):
sentence = u"7点晨跑"
self.assertEqual(self.parser.parser_clock(sentence),
7, "parser_clock failed")
sentence = u"19点吃饭"
result = self.parser.parser_clock(sentence)
self.assertEqual(result,
19, "parser_clock failed, result:" + str(result))
sentence = u"19点半吃饭"
result = self.parser.parser_clock(sentence)
self.assertEqual(result,
19.5, "parser_clock failed, result:" + str(result))
def test_parse_due_time(self):
sentence = u"明天19点打球"
dt = self.parser.parse_due_time(sentence)
self.assertEqual(dt.hour, 19, "parse hour failed.")
new_dt = self.date + datetime.timedelta(days=1)
self.assertEqual(dt.day, new_dt.day)
def test_parse_list(self):
sentence = u"明天19点打球#l"
(b, l) = self.parser.parse_list(sentence)
self.assertTupleEqual((b, l), ("Life", "Inbox"),
"test parse list test failed.")
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4800670 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Rules for validating whether a conversational Finnish word
# can be a reduced form of a standard Finnish word.
def is_uoa(ch):
return ch in ("u", "o", "a")
def is_ie(ch):
return ch in ("i", "e")
def is_yoa(ch):
return ch in ("y", "ö", "ä")
def is_vowel(ch):
return ch in ("o", "o", "a", "i", "e", "y", "ö", "ä")
def is_consonant(ch):
return not is_vowel(ch)
class ChangeContext:
def __init__(self, left, center, right, more_left, more_right):
self.left = left
self.center = center
self.right = right
self.more_left = more_left
self.more_right = more_right
# Cache some features.
self.__left_endswith_vowel = None
self.__left_endswith_double_vowel = None
self.__right_startswith_vowel = None
def match_left(self, x):
if type(x) is tuple:
return self.left.sequence in x
else:
return self.left.sequence == x
def left_endswith(self, x):
return self.left.sequence.endswith(x)
def left_endswith_vowel(self):
if self.__left_endswith_vowel is None:
self.__left_endswith_vowel = is_vowel(self.left.sequence[-1])
return self.__left_endswith_vowel
def left_endswith_vowel_and(self, x):
seq = self.left.sequence
if type(x) is tuple:
return \
any((len(seq) >= len(pattern) + 1) and \
is_vowel(seq[-len(pattern) - 1]) and \
(seq.endswith(pattern)) for pattern in x)
else:
return \
(len(self.left.sequence) > len(x)) and \
is_vowel(self.left.sequence[-len(x) - 1]) and \
(self.left.sequence[-len(x):] == x)
# Left context ends in one of the back vowels (u, o, a) and
# given argument, meaning that suffixes will have a instead
# of ä and o instead of ö.
def left_endswith_uoa_and(self, x):
seq = self.left.sequence
if type(x) is tuple:
return \
any((len(seq) >= len(pattern) + 1) and \
is_uoa(seq[-len(pattern) - 1]) and \
(seq.endswith(pattern)) for pattern in x)
else:
return \
(len(self.left.sequence) > len(x)) and \
is_uoa(self.left.sequence[-len(x) - 1]) and \
(self.left.sequence[-len(x):] == x)
# Left context ends in one of the front vowels (i, e) and
# given argument, meaning that suffixes can have any vowels
# (depending what vowels exist in the beginning of the word).
def left_endswith_ie_and(self, x):
seq = self.left.sequence
if type(x) is tuple:
return \
any((len(seq) >= len(pattern) + 1) and \
is_ie(seq[-len(pattern) - 1]) and \
(seq.endswith(pattern)) for pattern in x)
else:
return \
(len(self.left.sequence) > len(x)) and \
is_ie(self.left.sequence[-len(x) - 1]) and \
(self.left.sequence[-len(x):] == x)
# Left context ends in one of the back vowels (y, ö, ä) and
# given argument, meaning that suffixes will have a instead
# of ä and o instead of ö.
def left_endswith_yoa_and(self, x):
seq = self.left.sequence
if type(x) is tuple:
return \
any((len(seq) >= len(pattern) + 1) and \
is_yoa(seq[-len(pattern) - 1]) and \
(seq.endswith(pattern)) for pattern in x)
else:
return \
(len(self.left.sequence) > len(x)) and \
is_yoa(self.left.sequence[-len(x) - 1]) and \
(self.left.sequence[-len(x):] == x)
def left_endswith_double_vowel(self):
if self.__left_endswith_double_vowel is None:
self.__left_endswith_double_vowel = \
(len(self.left.sequence) >= 2) and \
is_vowel(self.left.sequence[-2]) and \
is_vowel(self.left.sequence[-1])
return self.__left_endswith_double_vowel
def match_right(self, x):
if type(x) is tuple:
return self.right.sequence in x
else:
return self.right.sequence == x
def right_startswith(self, x):
return self.right.sequence.startswith(x)
def right_startswith_vowel(self):
if self.__right_startswith_vowel is None:
self.__right_startswith_vowel = is_vowel(self.right.sequence[0])
return self.__right_startswith_vowel
def match_delete(self, x):
if self.center.is_insert():
return False
if type(x) is tuple:
return self.center.delete in x
else:
return self.center.delete == x
def match_subst(self, x, y):
if type(x) is tuple:
if type(y) is tuple:
return (self.center.delete in x) and (self.center.insert in y)
else:
return (self.center.delete in x) and (self.center.insert == y)
else:
if type(y) is tuple:
return (self.center.delete == x) and (self.center.insert in y)
else:
return (self.center.delete == x) and (self.center.insert == y)
def validate_change(c):
# -lla => -l
# minul(la)ko
# kairal(la)
if c.left_endswith_uoa_and("l") and c.match_delete("la"):
return True
# kaupungil(la)han
# puolil(la)kaan
# siel(lä)kö
# puolel(la)
if c.left_endswith_ie_and("l") and c.match_delete(("la", "lä")):
return True
# mytyl(lä)
if c.left_endswith_yoa_and("l") and c.match_delete("lä"):
return True
# vielä => viel
# viel(ä)ki
if c.left_endswith("viel") and c.match_delete("ä"):
return True
# -ssa => -s
# satamas(sa)
# talos(sa)
if c.left_endswith_uoa_and("s") and c.match_delete("sa"):
return True
# helsingis(sä)kään
# talis(sa)
# ääres(sä)hän
if c.left_endswith_ie_and("s") and c.match_delete(("sa", "sä")):
return True
# vähäs(sä)
# töllös(sä)
if c.left_endswith_yoa_and("s") and c.match_delete(("sa", "sä")):
return True
# -sta => -st / -lta => -lt
# kannust(a)
# radiost(a)kin
# rannalt(a)
if c.left_endswith_uoa_and(("st", "lt")) and c.match_delete("a"):
return True
# amiksilt(a)
# pesist(ä)
# amiksest(a)
# hesest(ä)
if c.left_endswith_ie_and(("st", "lt")) and c.match_delete(("a", "ä")):
return True
# mytylt(ä)
# pyynnöst(ä)kään
# vähäst(ä)
if c.left_endswith_yoa_and(("st", "lt")) and c.match_delete("ä"):
return True
# -ksi => -ks
# kaks(i)ko
# miks(i)
if c.left_endswith("ks") and c.match_delete("i"):
return True
# -uun => -uu / -oon => -oo / ...
# ostetaa(n)han
# meree(n)
# ostettii(n)kohan
# mentii(n)kös
# varastoo(n)
# turkuu(n)
# hullyy(n)
# mennää(n)
# töllöö(n)
if c.left_endswith_double_vowel() and c.match_delete("n"):
return True
# -ua => -uu / -oa => -oo / ...
# suru -a+u
if c.left_endswith("u") and c.match_subst("a", "u"):
return True
# lumo -a+o va
# aino -a+o staan
if c.left_endswith("o") and c.match_subst("a", "o"):
return True
# biisi -ä+i
# spagetti -a+i
if c.left_endswith("i") and c.match_subst(("a", "ä"), "i"):
return True
# korke -a+e koulu
# skebbe -ä+e
if c.left_endswith("e") and c.match_subst(("a", "ä"), "e"):
return True
# pyry -ä+y
if c.left_endswith("y") and c.match_subst("ä", "y"):
return True
# henkilö -ä+ö
if c.left_endswith("ö") and c.match_subst("ä", "ö"):
return True
# -si => -s / -oi => -o / -ui => -u
# työpaikallas(i)
# taakses(i)
# kaipais(i)kaan
# pienemmäks(i)
# puhals(i)
# työns(i)
# vuos(i)
# laps(i)
# ymmärs(i)hän
# kaus(i)
# kehu(i)
# saatto(i)han
# juuttu(i)
# roikku(i)
# arvo(i)tus
if c.left_endswith(("s", "o", "u")) and c.match_delete("i"):
return True
# -kin => -ki / -hin => -hi / -nen => -ne / -sen => -se / -han => -ha / -hän => -hä
# oletki(n)
# mukavaaki(n)
# maihi(n)
# älyttömi(n)
# viisasha(n)
# miksiköhä(n)
# millaine(n)
# jonkunlaise(n)
# oireide(n)
if c.left_endswith(("ki", "hi", "mi", "ha", "hä", "ne", "se", "de")) and c.match_delete("n") and (c.right is None):
return True
if c.left_endswith(("mi", "de")) and c.match_delete("n"):
return True
# -ja => -i / -ia => -ii / -jä => -i
# suru -ja+i
# talo -ja+i kin
if c.left_endswith(("u", "o")) and c.match_subst("ja", "i"):
return True
# varsi -a+i
# märki -ä+i
if c.left_endswith("i") and c.match_subst(("a", "ä"), "i"):
return True
# artiste -ja+i han
# biise -jä+i
if c.left_endswith("e") and c.match_subst(("ja", "jä"), "i"):
return True
# pyry -jä+i
# mörkö -jä+i
if c.left_endswith(("y", "ö")) and c.match_subst("jä", "i"):
return True
# -ta => -t / -tä => -t
# kerjuut(a)
# ainoot(a)
if c.left_endswith_uoa_and("t") and c.match_delete("a"):
return True
# vaikeet(a)
# mait(a)
# syit(ä)kin
# meit(ä)
# siit(ä)kään
if c.left_endswith_ie_and("t") and c.match_delete(("a", "ä")):
return True
# syyt(ä)
# miljööt(ä)
if c.left_endswith_yoa_and("t") and c.match_delete("ä"):
return True
# -yt => -t / -ut => -t
# estelly(t)kään
# pursunu(t)
if c.left_endswith(("u", "y")) and c.match_delete("t"):
return True
if c.right is not None:
# -oit- => -ot- / -ais- => -as- / -äin- => -än- / ...
# pinno(i)te
# kanso(i)ttaa
# alo(i)tus
if c.left_endswith("o") and c.match_delete("i") and c.right.startswith("t"):
return True
# ranka(i)su
# alakohta(i)sta
# auka(i)see
# tälla(i)sesta
# mitta(i)nen
# puna(i)sen
# viime(i)nen
# tuommo(i)nen
# pito(i)suus
# sillo(i)n
# kiukku(i)set
# repä(i)sit
# myötä(i)nen
if c.left_endswith(("a", "o", "e", "u", "ä")) and c.match_delete("i") and c.right_startswith(("n", "s")):
return True
# -min- => -m- / -sin- => -s-
# m(in)ä
# s(in)äkin
# m(in)ulla
if c.match_left(("m", "s")) and c.match_delete("in") and c.right_startswith(("ä", "u")):
return True
# -hd- => -h-
# jah(d)attiin
# kah(d)eksan
# eh(d)itty
# tah(d)ottiin
# kah(d)en
# paah(d)ettiin
if c.left_endswith("h") and c.match_delete("d"):
return True
# -adi- => -ai- / ija => ia / ...
# vaa(d)itaan
# ve(d)etään
# huu(d)etaan
# pu(d)ottiin
# a(j)attele
# äi(j)ä
# palveli(j)oita
if c.left_endswith_vowel() and c.match_delete(("d", "j")) and c.right_startswith_vowel():
return True
# -uo- => -ua-
# tu -o+a li
# hu -o+a ne
if c.left_endswith("u") and c.match_subst("o", "a"):
return True
# hi -e+a no
# ki -e+a hua
if c.left_endswith("u") and c.match_subst("o", "a"):
return True
# sandhi
# sitte -n+k ki
# kaike -n+l lisäks
# ääne -n+m murros
# asui -n+m paikka
# kirko -n+r rottaa
# laste -n+j juhlat
# joulu -n+v vietto
if c.match_subst("n", "j") and c.right_startswith("j"):
return True
if c.match_subst("n", "k") and c.right_startswith("k"):
return True
if c.match_subst("n", "l") and c.right_startswith("l"):
return True
if c.match_subst("n", "m") and c.right_startswith("m"):
return True
if c.match_subst("n", "m") and c.right_startswith("p"):
return True
if c.match_subst("n", "r") and c.right_startswith("r"):
return True
if c.match_subst("n", "v") and c.right_startswith("v"):
return True
# olet => oot
# o -le+o t
# o -le+o tkos
# o -le+o tpas
# o -le+o than
if (not c.more_left) and c.match_left("o") and c.match_subst("le", "o") and c.right_startswith("t"):
return True
# o -let+o k -o s
if (not c.more_left) and c.match_left("o") and c.match_subst("let", "o") and c.match_right("k"):
return True
if c.match_left("k") and c.match_delete("o") and c.match_right("s") and (not c.more_right):
return True
# nytten => nyt / sitten => sit
# nyt(ten)ki
# sit(ten)kö
if (not c.more_left) and c.match_left(("nyt", "sit")) and c.match_delete("ten"):
return True
# -kos => -ks / -kös => -ks
# palaak(o)s
# sellaistak(o)s
if c.left_endswith_uoa_and("k") and c.match_delete("o") and c.match_right("s") and (not c.more_right):
return True
# meneek(ö)s
# tuleek(o)s
if c.left_endswith_ie_and("k") and c.match_delete(("o", "ö")) and c.match_right("s") and (not c.more_right):
return True
# häslääk(ö)s
if c.left_endswith_yoa_and("k") and c.match_delete("ö") and c.match_right("s") and (not c.more_right):
return True
# asu(t)k(o)s
# makaa(t)k(o)s
# ostettii(n)k(o)s
# hallitse(t)k(o)s
# mene(t)k(ö)s
# mennää(n)k(ö)s
if c.more_left and c.match_left("k") and c.match_delete(("o", "ö")) and c.match_right("s") and (not c.more_right):
return True
return False
def validate(partitions):
if len(partitions) < 2:
return False
if partitions[0].is_change():
return False
for i in range(1, len(partitions) - 1):
left = partitions[i - 1]
center = partitions[i]
right = partitions[i + 1]
if center.is_change() and not validate_change(ChangeContext(left, center, right, i > 1, i < len(partitions) - 2)):
return False
left = partitions[-2]
center = partitions[-1]
if center.is_change() and not validate_change(ChangeContext(left, center, None, len(partitions) > 2, False)):
return False
return True
| StarcoderdataPython |
11301984 | import math
import functorch._src.decompositions
import torch
from torch._decomp import get_decompositions
from torchinductor import config
aten = torch.ops.aten
decompositions = get_decompositions(
[
aten.clamp_max,
aten.clamp_min,
aten.cudnn_batch_norm,
aten.hardsigmoid,
aten.hardswish,
aten.hardtanh,
aten.l1_loss,
aten.leaky_relu,
aten.logsumexp.default,
aten.mse_loss,
aten.native_batch_norm,
aten.native_group_norm,
aten.native_layer_norm,
aten.stack,
aten.transpose.int,
# don't exist (yet), but wish they did:
aten._embedding_bag,
aten.grid_sampler_2d,
aten.norm,
]
)
def register_decomposition(ops):
return functorch._src.decompositions.register_decomposition(ops, decompositions)
@register_decomposition([aten.clamp])
def clamp(x, min=None, max=None):
if min is not None:
x = torch.maximum(x, torch.tensor(min, dtype=x.dtype, device=x.device))
if max is not None:
x = torch.minimum(x, torch.tensor(max, dtype=x.dtype, device=x.device))
return x
@register_decomposition([aten._softmax])
def _softmax(x, dim, half_to_float):
# TODO(jansel): check numerical stability (see SoftMaxKernel.cpp)
if half_to_float and x.dtype in (torch.bfloat16, torch.float16):
x = x.to(torch.float32)
x = torch.exp(x)
x_sum = torch.sum(x, dim, keepdim=True)
scale = torch.reciprocal(x_sum)
return x * scale
@register_decomposition([aten._log_softmax])
def _log_softmax(x, dim, half_to_float):
# TODO(jansel): check numerical stability (see SoftMaxKernel.cpp)
if half_to_float and x.dtype in (torch.bfloat16, torch.float16):
x = x.to(torch.float32)
x_sum = torch.log(torch.sum(torch.exp(x), dim, keepdim=True))
return x - x_sum
@register_decomposition([aten.t])
def t(x):
ndim = x.ndimension()
if x.ndim in (0, 1):
return x
assert ndim == 2
return torch.transpose(x, 0, 1)
@register_decomposition([aten.addmm])
def addmm(input, mat1, mat2):
return torch.mm(mat1, mat2) + input
@register_decomposition([aten.elu])
def elu(self, alpha=1, scale=1, input_scale=1):
negcoef = alpha * scale
return torch.where(
self <= 0, (torch.exp(self * input_scale) - 1) * negcoef, self * scale
)
@register_decomposition([aten.tanh])
def tanh(x):
return 2.0 / (1.0 + torch.exp(-2.0 * x)) - 1.0
@register_decomposition([aten.rsqrt])
def rsqrt(x):
return torch.reciprocal(torch.sqrt(x))
@register_decomposition([aten.log2])
def log2(x):
return torch.log(x) * (1.0 / math.log(2.0))
@register_decomposition([aten.round.decimals])
def round_dec(x, decimals=0):
ten_pow_decimals = 10.0**decimals
return aten.round(x * ten_pow_decimals) * (1.0 / ten_pow_decimals)
@register_decomposition([aten.div.Tensor_mode])
def div_mode(a, b, rounding_mode=None):
result = aten.div(a, b)
if rounding_mode == "floor":
return torch.floor(result)
if rounding_mode == "trunc":
return torch.trunc(result)
return result
@register_decomposition([aten.gelu])
def gelu(x, approximate="none"):
if config.approximations or approximate != "none":
# tanh approximation is much faster
return (
0.5
* x
* (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * x * x * x)))
)
else:
return x * 0.5 * (1.0 + torch.special.erf(x * math.sqrt(0.5)))
@register_decomposition([aten.special_erf, aten.erf])
def special_erf(x):
# TODO(jansel): this might be crazy slow. Triton doesn't have the
# cuda ::erf() builtin. I've made a feature request for this,
# so it may be coming soon.
# from https://www.johndcook.com/blog/2009/01/19/stand-alone-error-function-erf/
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
sign = torch.sign(x)
x = torch.abs(x)
# A & S 7.1.26
t = 1.0 / (1.0 + p * x)
y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * torch.exp(-x * x)
return sign * y
@register_decomposition([aten.rsub.Tensor, aten.rsub.Scalar])
def rsub(a, b):
if isinstance(b, (int, float)):
b = torch.tensor(b, dtype=a.dtype, device=a.device)
return b - a
@register_decomposition([aten.masked_fill.Scalar])
def masked_fill(value, mask, other):
if isinstance(other, (int, float)):
other = torch.tensor(other, dtype=value.dtype, device=value.device)
value, mask, other = torch.broadcast_tensors(value, mask, other)
return torch.where(mask, other, value)
@register_decomposition([aten.nan_to_num])
def nan_to_num(x, nan=0.0, posinf=None, neginf=None):
if nan is None:
nan = 0.0
if posinf is None:
posinf = torch.finfo(x.dtype).max
if neginf is None:
neginf = torch.finfo(x.dtype).min
nan, posinf, neginf = (
torch.tensor(v, dtype=x.dtype, device=x.device) for v in (nan, posinf, neginf)
)
x = torch.where(x != x, nan, x)
x = torch.where(x == float("inf"), posinf, x)
x = torch.where(x == float("-inf"), neginf, x)
return x
| StarcoderdataPython |
3583633 | <filename>anpylar/app_module.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
# Copyright 2018 The AnPyLar Team. All Rights Reserved.
# Use of this source code is governed by an MIT-style license that
# can be found in the LICENSE file at http://anpylar.com/mit-license
###############################################################################
import logging
import os.path
import sys
from .utils import path_name_calc, makedir_error, makefile_error, read_license
# The templates below are used to generate the module depending of the
# options given to the argument parser
Template_Preamble = '''
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
'''
Template_Main_Import = '''from anpylar import Module
'''
Template_Module = '''
class {}(Module):
'''
Template_Bindings = '''
bindings = {}
'''
Template_Services = '''
services = {}
'''
Template_Routes = '''
routes = []
'''
Template_One_Component = '''
components = {}
'''
Template_Components = '''
components = [{}]
'''
Template_Init = '''
def __init__(self):
pass
'''
Template_Pass = '''
pass
'''
class Moduler:
name = ''
preamble = False
licfile = ''
bootstrap = False
components = True
bindings = True
services = True
routes = True
init = True
modpath = None
do_import = False
submodule = False # if not None ... (make dir and) write to that dir
def __init__(self, **kwargs):
for k, v in kwargs.items():
if hasattr(self, k):
setattr(self, k, v)
else:
logging.error('Attribute %s unknown for componenter', k)
sys.exit(1)
self.contents = {}
self.generated = False
def write_out(self, outdir=None, makedir=True, parser=None):
if not self.generated:
self.generated = True
self.generate()
if not outdir:
if self.submodule: # specific check
outdir = self.dir_name
else:
outdir = '.'
if makedir:
makedir_error(outdir, parser=parser)
for path, content in self.contents.values():
filename = os.path.join(outdir, path)
makefile_error(filename, content, parser=parser)
return outdir
def generate(self):
self.generated = True
init = '' # for __init__.py
mod = '' # for the component
if self.preamble: # whether to skip the shebang and coding info
mod += Template_Preamble.lstrip() # 1st line, ensure is 1st
init += Template_Preamble.lstrip() # 1st line, ensure is 1st
lictxt = read_license(self.licfile)
mod += lictxt
init += lictxt
mod += Template_Main_Import
# if the class has no content (all options drive to it) add a pass to
# avoid a syntax error
needpass = True
if self.bootstrap:
for comp in self.bootstrap.split(','):
comp = comp.strip()
compmod = path_name_calc(comp)
mod += '\nfrom .{} import {}'.format(compmod, comp)
mod += '\n' # separate from rest of code
# The definition of the module is a must
self.modname = modname = self.name + 'Module'
mod += Template_Module.format(modname)
if self.bootstrap:
needpass = False
comps = self.bootstrap.split(',')
if len(comps) == 1:
mod += Template_One_Component.format(self.bootstrap)
else:
mod += Template_Components.format(self.bootstrap)
elif self.components:
needpass = False
mod += Template_Components.format('')
if self.bindings:
needpass = False
mod += Template_Bindings
if self.services:
needpass = False
mod += Template_Services
if self.routes:
needpass = False
mod += Template_Routes
if self.init:
needpass = False
mod += Template_Init
if needpass:
mod += Template_Pass
self.path_name = path_name = path_name_calc(self.modname)
self.dir_name = path_name_calc(self.name)
# Module path: given name or (calculated_name + ext)
path = self.modpath or (path_name + '.py')
# keep reference to mod name for import
pypath, _ = os.path.splitext(path)
self.contents['mod'] = (path, mod)
if self.do_import or self.submodule:
# complete __init__, calc path and output
init += 'from .{} import {}'.format(pypath, modname)
self.contents['init'] = ('__init__.py', init)
| StarcoderdataPython |
100431 | import copy
import echidna
import echidna.output.plot as plot
import echidna.core.spectra as spectra
from echidna.output import store
import matplotlib.pyplot as plt
import argparse
import glob
import numpy as np
import os
def convertor(path):
flist=np.array(glob.glob(path))
for ntuple in flist:
os.system("python ~/echidna/echidna/scripts/dump_spectra_ntuple.py -c ~/workspace/PhD/fitting/config.yml -f "+ str(ntuple)+" -s hdf5/")
def combinerNtuple(path,filename):
flist=np.array(glob.glob(path))
print flist
first = True
for hdf5 in flist:
print hdf5
if first:
spectrum1 = store.fill_from_ntuple(hdf5)
first = False
else:
spectrum2 = store.fill_from_ntuple(hdf5)
spectrum1.add(spectrum2)
store.dump(filename, spectrum1)
def combiner(path,filename):
flist=np.array(glob.glob(path))
print flist
first = True
for hdf5 in flist:
print hdf5
if first:
spectrum1 = store.load(hdf5)
first = False
else:
spectrum2 = store.load(hdf5)
spectrum1.add(spectrum2)
store.dump(filename, spectrum1)
"""The way you should do it is to define a lot of spectra and then plot them.
You don't really know how to normlise the histrogram or indeed weather that is of any uses in the first
place.
"""
def slicer(spectrumPath,filler,nslice):
for i in range(nslice):
spectrum=store.load(spectrumPath)
print spectrum.sum()
shrink_dict = {"energy_reco_low": 0.,
"energy_reco_high": 0.6,
"radial_reco_low": i*6000.0/nslice,
"radial_reco_high": (i+1)*6000/nslice}
spectrum.cut(**shrink_dict)
spectrum.scale(1)
spec2=copy.copy(spectrum)
spec2._name=str(i*1000)+"mm to "+str((i+1)*1000)+"mm"
print type(spec2)
filler.append(spec2)
def slicerMC(spectrumPath,filler,nslice):
for i in range(nslice):
spectrum=store.load(spectrumPath)
print spectrum.sum()
shrink_dict = {"energy_mc_low": 0.,
"energy_mc_high": 1,
"radial_mc_low": i*6000.0/nslice,
"radial_mc_high": (i+1)*6000/nslice}
spectrum.cut(**shrink_dict)
spectrum.scale(1)
spec2=copy.copy(spectrum)
spec2._name="MC"
print type(spec2)
print "This gives the number os events in each window:"
print "mc : "+str(i*6000.0/nslice)+"mm to "+str((i+1)*6000.0/nslice)+"mm : "+str(spec2.sum())
filler.append(spec2)
def slicerReco(spectrumPath,filler,nslice):
for i in range(nslice):
spectrum=store.load(spectrumPath)
print spectrum.sum()
shrink_dict = {"energy_reco_low": 0.,
"energy_reco_high": 1.,
"radial_reco_low": i*6000.0/nslice,
"radial_reco_high": (i+1)*6000/nslice}
spectrum.cut(**shrink_dict)
spectrum.scale(1)
spec2=copy.copy(spectrum)
spec2._name="Reco"
print type(spec2)
print "This gives the number os events in each window:"
print "reco : "+str(i*6000.0/nslice)+"mm to "+str((i+1)*6000.0/nslice)+"mm : "+str(spec2.sum())
filler.append(spec2)
def signalPlotter(spectra,dim,name):
i=0
for spec in spectra:
fig = plt.figure()
ax= fig.add_subplot(1,1,1)
par = spec.get_config().get_par(dim)
width = par.get_width()
bins = np.linspace(par._low,par._high, par._bins+1)
x = bins[:-1] + 0.5*width
plt.xlabel(str(dim)+ " [" + par.get_unit() + "]")
plt.ylabel("Events per " + str(width) + " " + par.get_unit() + " bin")
ax.set(title="Normalised energy spectrum in "+str(i*1000)+"mm to "+str((i+1)*1000)+"mm ",ylabel="Events per " + str(width) + " " + par.get_unit() + " bin", xlabel=str(dim)+" [" + par.get_unit() + "]")
ax.hist(x,bins,weights=spec.project(dim),histtype="stepfilled", color="RoyalBlue",label=spec._name)
fig.savefig("slice_"+str(name)+"_"+str(i*1000)+"_"+str((i+1)*1000)+".png")
i=1+i
def combiPlotter(spectra,dim,name):
i=0
fig = plt.figure()
ax= fig.add_subplot(1,1,1)
for spec in spectra:
par = spec.get_config().get_par(dim)
width = par.get_width()
bins = np.linspace(par._low,par._high, par._bins+1)
x = bins[:-1] + 0.5*width
plt.xlabel(str(dim)+ " [" + par.get_unit() + "]")
plt.ylabel("Events per " + str(width) + " " + par.get_unit() + " bin")
ax.set(title="Normalised energy spectrum in 1000mm slices",ylabel="Events per " + str(width) + " " + par.get_unit() + " bin", xlabel="energy_reco"+ " [" + par.get_unit() + "]")
ax.hist(x,bins,weights=spec.project("energy_reco"),label=spec._name,histtype='step')
ax.set_ylim([0,0.03])
ax.set_xlim([0.2,0.7])
ax.legend(loc="best")
fig.savefig("combined_"+str(name)+".png")
def func(path,nslice,name):
spectra=[]
slicer(path,spectra,nslice)
signalPlotter(spectra,"energy_reco",name)
combiPlotter(spectra,"energy_reco",name)
def po210():
convertor("po210_ntuple/*")
combiner("hdf5/SolarPo**ntuple*","hdf5/SolarPo210_combined.hdf5")
plotpath="plots/"
func("hdf5/SolarPo210_combined.hdf5",6,"po210")
def bi210():
convertor("bi210_ntuple/*")
combiner("hdf5/SolarBi**ntuple*","hdf5/SolarBi210_combined.hdf5")
plotpath="plots/"
func("hdf5/SolarBi210_combined.hdf5",6,"bi210")
def compair(spectrumPathReco,spectrumPathMC,name):
spectraReco=[]
spectraMC=[]
slicerReco(spectrumPathReco,spectraReco,6)
slicerMC(spectrumPathMC,spectraMC,6)
for i in range(0,len(spectraReco)):
fig = plt.figure()
ax= fig.add_subplot(1,1,1)
par = spectraReco[i].get_config().get_par("energy_reco")
width = par.get_width()
bins = np.linspace(par._low,par._high, par._bins+1)
x = bins[:-1] + 0.5*width
ax.set(title="Normalised energy spectrum in "+str(i*1000)+"mm to "+str((i+1)*1000)+"mm ",ylabel="Events per " + str(width) + " " + par.get_unit() + " bin", xlabel="Energy [" + par.get_unit() + "]")
ax.hist(x,bins,weights=spectraReco[i].project("energy_reco"),histtype="stepfilled",label=spectraReco[i]._name)
par = spectraMC[i].get_config().get_par("energy_mc")
width = par.get_width()
bins = np.linspace(par._low,par._high, par._bins+1)
x = bins[:-1] + 0.5*width
ax.hist(x,bins,weights=spectraMC[i].project("energy_mc"),histtype="stepfilled",label=spectraMC[i]._name,alpha=0.75)
ax.legend(loc=2)
fig.savefig("compare_"+str(name)+"_"+str(i*1000)+"_"+str((i+1)*1000)+".png")
if __name__=="__main__":
print "You need to compare the recon against the mc"
print "You should bin in bigger bins becuase you could then bin in 4d"
"""You need to plot the standard spectra"""
| StarcoderdataPython |
1874214 | import collections
import dataclasses as dc
import operator
from datetime import date, datetime
from functools import reduce, partial
from typing import Type as PyType, Dict, Any, Union, Callable, Sequence, Optional
import statey as st
from statey.syms import types, utils, impl
# Default Plugin definitions
@dc.dataclass(frozen=True)
class HandleOptionalPlugin:
"""
Handle an Optional[] annotation wrapper
"""
@st.hookimpl
def get_type(
self, annotation: Any, registry: st.Registry, meta: Dict[str, Any]
) -> types.Type:
inner = utils.extract_optional_annotation(annotation)
if inner is None:
return None
meta["nullable"] = True
return registry.get_type(inner, meta)
@dc.dataclass(frozen=True)
class ValuePredicatePlugin:
"""
Simple predicate plugin that will may an annotation to any subclass
whos constructor is just the nullable and meta arguments
"""
predicate: Union[Callable[[Any], bool], PyType]
type_cls: PyType[types.Type]
@st.hookimpl(tryfirst=True)
def get_type(
self, annotation: Any, registry: st.Registry, meta: Dict[str, Any]
) -> types.Type:
predicate = self.predicate
if isinstance(self.predicate, type):
predicate = lambda x: isinstance(x, type) and issubclass(x, self.predicate)
if not predicate(annotation):
return None
meta = meta.copy()
return self.type_cls(meta.pop("nullable", False), meta=meta)
@dc.dataclass(frozen=True)
class AnyPlugin:
"""
Plugin that will always return AnyType. Should be added FIRST
"""
@st.hookimpl
def get_type(
self, annotation: Any, registry: st.Registry, meta: Dict[str, Any]
) -> types.Type:
return types.AnyType(meta=meta)
@dc.dataclass(frozen=True)
class ParseSequencePlugin:
"""
Parse lists and sequences into ArrayTypes
"""
array_type_cls: PyType[types.ArrayType] = types.ArrayType
@st.hookimpl
def get_type(
self, annotation: Any, registry: st.Registry, meta: Dict[str, Any]
) -> types.Type:
# Sort of hacky, will behave differently in different python versions (3.7 works)
if not utils.is_sequence_annotation(annotation):
return None
inner = utils.extract_inner_annotation(annotation)
# Optionals are subtypes of themselves I guess?
if utils.extract_optional_annotation(annotation) is not None:
return None
element_type = registry.get_type(inner) if inner else st.Any
meta = meta.copy()
return self.array_type_cls(element_type, meta.pop("nullable", False), meta=meta)
@st.hookimpl
def infer_type(self, obj: Any, registry: "Registry") -> types.Type:
if not isinstance(obj, (list, tuple)):
return None
element_types = []
for item in obj:
typ = registry.infer_type(item)
if typ not in element_types:
element_types.append(typ)
if not element_types:
return None
if len(element_types) > 1:
return None
return types.ArrayType(element_types.pop(), False)
@dc.dataclass(frozen=True)
class ParseMappingPlugin:
"""
Parse lists and sequences into ArrayTypes
"""
map_type_cls: PyType[types.MapType] = types.MapType
@st.hookimpl
def get_type(
self, annotation: Any, registry: st.Registry, meta: Dict[str, Any]
) -> types.Type:
# Sort of hacky, will behave differently in different python versions (3.7 works)
if not utils.is_mapping_annotation(annotation):
return None
inners = utils.extract_inner_annotations(annotation)
# Optionals are subtypes of themselves I guess?
if utils.extract_optional_annotation(annotation) is not None:
return None
if inners is not None and len(inners) != 2:
return None
key_annotation, value_annotation = inners or (None, None)
key_type = registry.get_type(key_annotation) if key_annotation else st.Any
value_type = registry.get_type(value_annotation) if value_annotation else st.Any
meta = meta.copy()
return self.map_type_cls(
key_type, value_type, meta.pop("nullable", False), meta=meta
)
@dc.dataclass(frozen=True)
class ParseDataClassPlugin:
"""
Parse a specific dataclass into a StructType
"""
dataclass_cls: PyType
struct_type_cls: PyType[types.StructType] = types.StructType
@st.hookimpl
def get_type(
self, annotation: Any, registry: st.Registry, meta: Dict[str, Any]
) -> types.Type:
if annotation is not self.dataclass_cls or not dc.is_dataclass(annotation):
return None
fields = []
for dc_field in utils.encodeable_dataclass_fields(annotation):
field_annotation = dc_field.type
syms_type = registry.get_type(field_annotation)
syms_field = types.StructField(dc_field.name, syms_type)
fields.append(syms_field)
meta = meta.copy()
meta.update(
{
"plugins": [
EncodeDataClassPlugin(self.dataclass_cls, self.struct_type_cls)
]
}
)
instance = self.struct_type_cls(
tuple(fields), meta.pop("nullable", False), meta=meta
)
return instance
@dc.dataclass(frozen=True)
class EncodeDataClassPlugin:
"""
Parse a specific dataclass into a StructType
"""
dataclass_cls: PyType
struct_type_cls: PyType[types.StructType] = types.StructType
@st.hookimpl
def decode(self, value: Any) -> Any:
return self.dataclass_cls(**value) if value is not None else None
@st.hookimpl
def encode(self, value: Any) -> Any:
if not isinstance(value, self.dataclass_cls) or not dc.is_dataclass(value):
return None
return {
field.name: getattr(value, field.name)
for field in utils.encodeable_dataclass_fields(value)
}
@dc.dataclass(frozen=True)
class LiteralPlugin:
"""
Create Data literals from python objects whose types can be inferred directly
"""
@st.hookimpl
def get_object(self, value: Any, registry: st.Registry) -> "Object":
if isinstance(value, st.Object):
return None
try:
value_type = registry.infer_type(value)
except st.exc.NoTypeFound:
return None
return st.Object(impl.Data(value, value_type), registry=registry)
@dc.dataclass(frozen=True)
class StructFromDictPlugin:
"""
Attempts to construct a struct type given a dictionary input
"""
@st.hookimpl
def infer_type(self, obj: Any, registry: "Registry") -> types.Type:
if not isinstance(obj, dict) or not all(isinstance(key, str) for key in obj):
return None
fields = []
for name, value in obj.items():
try:
field_type = registry.infer_type(value)
except st.exc.NoTypeFound:
return None
fields.append(st.Field(name, field_type))
return types.StructType(fields, False)
@dc.dataclass(frozen=True)
class BasicObjectBehaviors:
"""
Basic behavior for inferring types from Objects
"""
@st.hookimpl
def get_object(self, value: Any, registry: st.Registry) -> "Object":
if isinstance(value, st.Object):
return value
return None
@st.hookimpl
def infer_type(self, obj: Any, registry: "Registry") -> types.Type:
if isinstance(obj, st.Object):
return obj._type
return None
@st.hookimpl
def get_type(
self, annotation: Any, registry: st.Registry, meta: Dict[str, Any]
) -> types.Type:
if isinstance(annotation, types.Type):
meta = meta.copy()
type_as_nullable = annotation.with_nullable(
meta.pop("nullable", annotation.nullable)
)
type_meta = type_as_nullable.meta.copy()
type_meta.update(meta)
return type_as_nullable.with_meta(type_meta)
return None
DEFAULT_PLUGINS = [
AnyPlugin(),
ValuePredicatePlugin(types.Type, types.TypeType),
HandleOptionalPlugin(),
ParseSequencePlugin(types.ArrayType),
ParseMappingPlugin(types.MapType),
ValuePredicatePlugin(float, types.FloatType),
ValuePredicatePlugin(int, types.IntegerType),
ValuePredicatePlugin(list, partial(types.ArrayType, types.Any)),
ValuePredicatePlugin(str, types.StringType),
ValuePredicatePlugin(bool, types.BooleanType),
ValuePredicatePlugin(date, types.DateType),
ValuePredicatePlugin(datetime, types.DateTimeType),
ValuePredicatePlugin(range, partial(types.ArrayType, types.Integer)),
ValuePredicatePlugin((lambda x: x is Any), types.AnyType),
LiteralPlugin(),
BasicObjectBehaviors(),
StructFromDictPlugin(),
]
def register(registry: Optional["Registry"] = None) -> None:
"""
Register default plugins
"""
if registry is None:
registry = st.registry
for plugin in DEFAULT_PLUGINS:
registry.register(plugin)
| StarcoderdataPython |
369185 | import tensorflow as tf
class TacotronPostnet(tf.keras.layers.Layer):
"""Tacotron-2 postnet."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv_batch_norm = []
for i in range(config.n_conv_postnet):
conv = tf.keras.layers.Conv1D(
filters=config.postnet_conv_filters
if i < config.n_conv_postnet - 1
else config.num_mels,
kernel_size=config.postnet_conv_kernel_sizes,
padding="same",
name="conv_._{}".format(i),
)
batch_norm = tf.keras.layers.BatchNormalization(
axis=-1, name="batch_norm_._{}".format(i)
)
self.conv_batch_norm.append((conv, batch_norm))
self.dropout = tf.keras.layers.Dropout(
rate=config.postnet_dropout_rate, name="dropout"
)
self.activation = [tf.nn.tanh] * (config.n_conv_postnet - 1) + [tf.identity]
def call(self, inputs, training=False):
"""Call logic."""
outputs, mask = inputs
extended_mask = tf.cast(tf.expand_dims(mask, axis=2), outputs.dtype)
for i, (conv, bn) in enumerate(self.conv_batch_norm):
outputs = conv(outputs)
outputs = bn(outputs)
outputs = self.activation[i](outputs)
outputs = self.dropout(outputs, training=training)
return outputs * extended_mask
| StarcoderdataPython |
37324 | <reponame>oclyke-dev/blue-heron
import blue_heron
import pytest
from pathlib import Path
from lxml import etree as ET
from blue_heron import Root, Drawing
@pytest.fixture(scope='module')
def test_board():
with open(Path(__file__).parent/'data/ArtemisDevKit.brd', 'r') as f:
root = ET.parse(f).getroot()
yield root
def test_get_drawing(test_board):
root = Root(test_board)
drawing = root.drawing
assert type(drawing) == type(blue_heron.drawing.Drawing(None))
| StarcoderdataPython |
3356748 | from functools import partial
import numpy as np
import nifty
import nifty.graph.opt.multicut as nmc
from .blockwise_mc_impl import blockwise_mc_impl
#
# cost functionality
#
def _weight_edges(costs, edge_sizes, weighting_exponent):
w = edge_sizes / float(edge_sizes.max())
if weighting_exponent != 1.:
w = w**weighting_exponent
costs *= w
return costs
def _weight_populations(costs, edge_sizes, edge_populations, weighting_exponent):
# check that the population indices cover each edge at most once
covered = np.zeros(len(costs), dtype='uint8')
for edge_pop in edge_populations:
covered[edge_pop] += 1
assert (covered <= 1).all()
for edge_pop in edge_populations:
costs[edge_pop] = _weight_edges(costs[edge_pop], edge_sizes[edge_pop],
weighting_exponent)
return costs
def transform_probabilities_to_costs(probs, beta=.5, edge_sizes=None,
edge_populations=None, weighting_exponent=1.):
""" Transform probabilities to costs via negative log likelihood.
Arguments:
probs [np.ndarray] - Input probabilities.
beta [float] - boundary bias (default: .5)
edge_sizes [np.ndarray] - sizes of edges for weighting (default: None)
edge_populations [list[np.ndarray]] - different edge populations that will be
size weighted independently passed as list of masks or index arrays.
This can e.g. be useful if we have flat superpixels in a 3d problem. (default: None)
weighting_exponent [float] - exponent used for weighting (default: 1.)
"""
p_min = 0.001
p_max = 1. - p_min
costs = (p_max - p_min) * probs + p_min
# probabilities to costs, second term is boundary bias
costs = np.log((1. - costs) / costs) + np.log((1. - beta) / beta)
# weight the costs with edge sizes, if they are given
if edge_sizes is not None:
assert len(edge_sizes) == len(costs)
if edge_populations is None:
costs = _weight_edges(costs, edge_sizes, weighting_exponent)
else:
costs = _weight_populations(costs, edge_sizes, edge_populations, weighting_exponent)
return costs
def compute_edge_costs(probs, edge_sizes=None, z_edge_mask=None,
beta=.5, weighting_scheme=None, weighting_exponent=1.):
""" Compute edge costs from probabilities with a pre-defined weighting scheme.
Arguments:
probs [np.ndarray] - Input probabilities.
edge_sizes [np.ndarray] - sizes of edges for weighting (default: None)
z_edge_mask [np.ndarray] - edge mask for inter-slice edges,
only necessary for weighting schemes z or xyz (default: None)
beta [float] - boundary bias (default: .5)
weighting_scheme [str] - scheme for weighting the edge costs based on size
of the edges (default: NOne)
weighting_exponent [float] - exponent used for weighting (default: 1.)
"""
schemes = (None, 'all', 'none', 'xyz', 'z')
if weighting_scheme not in schemes:
schemes_str = ', '.join([str(scheme) for scheme in schemes])
raise ValueError("Weighting scheme must be one of %s, got %s" % (schemes_str,
str(weighting_scheme)))
if weighting_scheme is None or weighting_scheme == 'none':
edge_pop = edge_sizes_ = None
elif weighting_scheme == 'all':
if edge_sizes is None:
raise ValueError("Need edge sizes for weighting scheme all")
if len(edge_sizes) != len(probs):
raise ValueError("Invalid edge sizes")
edge_sizes_ = edge_sizes
edge_pop = None
elif weighting_scheme == 'xyz':
if edge_sizes is None or z_edge_mask is None:
raise ValueError("Need edge sizes and z edge mask for weighting scheme xyz")
if len(edge_sizes) != len(probs) or len(z_edge_mask) != len(probs):
raise ValueError("Invalid edge sizes or z edge mask")
edge_pop = [z_edge_mask, np.logical_not(z_edge_mask)]
edge_sizes_ = edge_sizes
elif weighting_scheme == 'z':
edge_pop = [z_edge_mask, np.logical_not(z_edge_mask)]
edge_sizes_ = edge_sizes.copy()
edge_sizes_[edge_pop[1]] = 1.
if len(edge_sizes) != len(probs) or len(z_edge_mask) != len(probs):
raise ValueError("Invalid edge sizes or z edge mask")
if edge_sizes is None or z_edge_mask is None:
raise ValueError("Need edge sizes and z edge mask for weighting scheme z")
return transform_probabilities_to_costs(probs, beta=beta, edge_sizes=edge_sizes_,
edge_populations=edge_pop,
weighting_exponent=weighting_exponent)
#
# multicut solvers
#
def _to_objective(graph, costs):
if isinstance(graph, nifty.graph.UndirectedGraph):
graph_ = graph
else:
graph_ = nifty.graph.undirectedGraph(graph.numberOfNodes)
graph_.insertEdges(graph.uvIds())
objective = nmc.multicutObjective(graph_, costs)
return objective
def _get_solver_factory(objective, internal_solver, warmstart=True, warmstart_kl=False):
if internal_solver == 'kernighan-lin':
sub_solver = objective.kernighanLinFactory(warmStartGreedy=warmstart)
elif internal_solver == 'greedy-additive':
sub_solver = objective.greedyAdditiveFactory()
elif internal_solver == 'greedy-fixation':
sub_solver = objective.greedyFixationFactory()
elif internal_solver == 'cut-glue-cut':
if not nifty.Configuration.WITH_QPBO:
raise RuntimeError("multicut_cgc requires nifty built with QPBO")
sub_solver = objective.cgcFactory(warmStartGreedy=warmstart, warmStartKl=warmstart_kl)
elif internal_solver == 'ilp':
if not any((nifty.Configuration.WITH_CPLEX, nifty.Configuration.WITH_GLPK, nifty.Configuration.WITH_GUROBI)):
raise RuntimeError("multicut_ilp requires nifty built with at least one of CPLEX, GLPK or GUROBI")
sub_solver = objective.multicutIlpFactory()
elif internal_solver in ('fusion-move', 'decomposition'):
raise NotImplementedError(f"Using {internal_solver} as internal solver is currently not supported.")
else:
raise ValueError(f"{internal_solver} cannot be used as internal solver.")
return sub_solver
def _get_visitor(objective, time_limit=None, **kwargs):
logging_interval = kwargs.pop('logging_interval', None)
log_level = kwargs.pop('log_level', 'INFO')
if time_limit is not None or logging_interval is not None:
logging_interval = int(np.iinfo('int32').max) if logging_interval is None else logging_interval
time_limit = float('inf') if time_limit is None else time_limit
log_level = getattr(nifty.LogLevel, log_level, nifty.LogLevel.INFO)
# I can't see a real difference between loggingVisitor and verboseVisitor.
# Use loggingVisitor for now.
# visitor = objective.verboseVisitor(visitNth=logging_interval,
# timeLimitTotal=time_limit,
# logLevel=log_level)
visitor = objective.loggingVisitor(visitNth=logging_interval,
timeLimitTotal=time_limit,
logLevel=log_level)
return visitor
else:
return None
def get_multicut_solver(name, **kwargs):
""" Get multicut solver by name.
"""
solvers = {'kernighan-lin': partial(multicut_kernighan_lin, **kwargs),
'greedy-additive': partial(multicut_gaec, **kwargs),
'decomposition': partial(multicut_decomposition, **kwargs),
'fusion-moves': partial(multicut_fusion_moves, **kwargs),
'blockwise-multicut': partial(blockwise_multicut, **kwargs),
'greedy-fixation': partial(multicut_greedy_fixation, **kwargs),
'cut-glue-cut': partial(multicut_cgc, **kwargs),
'ilp': partial(multicut_ilp, **kwargs)}
try:
solver = solvers[name]
except KeyError:
raise KeyError("Solver %s is not supported" % name)
return solver
def blockwise_multicut(graph, costs, segmentation,
internal_solver, block_shape,
n_threads, n_levels=1, halo=None, **kwargs):
""" Solve multicut with block-wise hierarchical solver.
Introduced in "Solving large Multicut problems for connectomics via domain decomposition":
http://openaccess.thecvf.com/content_ICCV_2017_workshops/papers/w1/Pape_Solving_Large_Multicut_ICCV_2017_paper.pdf
Arguments:
graph [nifty.graph] - graph of multicut problem
costs [np.ndarray] - edge costs of multicut problem
segmentation [np.ndarray] - segmentation underlying multicut problem
internal_solver [str or callable] - internal solver
block_shape [listlike] - shape of blocks used to extract sub-problems
n_threads [int] - number of threads used to solve sub-problems in parallel
n_levels [int] - number of hierarchy levels (default: 1)
halo [listlike] - halo used to enlarge block shape (default: None)
"""
solver = get_multicut_solver(internal_solver) if isinstance(internal_solver, str)\
else internal_solver
if not callable(solver):
raise ValueError("Invalid argument for internal_solver.")
return blockwise_mc_impl(graph, costs, segmentation, solver,
block_shape, n_threads, n_levels, halo)
def multicut_kernighan_lin(graph, costs, time_limit=None, warmstart=True, **kwargs):
""" Solve multicut problem with kernighan lin solver.
Introduced in "An efficient heuristic procedure for partitioning graphs":
http://xilinx.asia/_hdl/4/eda.ee.ucla.edu/EE201A-04Spring/kl.pdf
Arguments:
graph [nifty.graph] - graph of multicut problem
costs [np.ndarray] - edge costs of multicut problem
time_limit [float] - time limit for inference in seconds (default: None)
warmstart [bool] - whether to warmstart with gaec solution (default: True)
"""
objective = _to_objective(graph, costs)
solver = objective.kernighanLinFactory(warmStartGreedy=warmstart).create(objective)
visitor = _get_visitor(objective, time_limit, **kwargs)
return solver.optimize() if visitor is None else solver.optimize(visitor=visitor)
def multicut_gaec(graph, costs, time_limit=None, **kwargs):
""" Solve multicut problem with greedy-addtive edge contraction solver.
Introduced in "Fusion moves for correlation clustering":
http://openaccess.thecvf.com/content_cvpr_2015/papers/Beier_Fusion_Moves_for_2015_CVPR_paper.pdf
Arguments:
graph [nifty.graph] - graph of multicut problem
costs [np.ndarray] - edge costs of multicut problem
time_limit [float] - time limit for inference in seconds (default: None)
"""
objective = _to_objective(graph, costs)
solver = objective.greedyAdditiveFactory().create(objective)
visitor = _get_visitor(objective, time_limit, **kwargs)
return solver.optimize() if visitor is None else solver.optimize(visitor=visitor)
def multicut_greedy_fixation(graph, costs, time_limit=None, **kwargs):
""" Solve multicut problem with greedy fixation solver.
Introduced in "A Comparative Study of Local Search Algorithms for Correlation Clustering":
https://link.springer.com/chapter/10.1007/978-3-319-66709-6_9
Arguments:
graph [nifty.graph] - graph of multicut problem
costs [np.ndarray] - edge costs of multicut problem
time_limit [float] - time limit for inference in seconds (default: None)
"""
objective = _to_objective(graph, costs)
solver = objective.greedyFixationFactory().create(objective)
visitor = _get_visitor(objective, time_limit, **kwargs)
return solver.optimize() if visitor is None else solver.optimize(visitor=visitor)
def multicut_cgc(graph, costs, time_limit=None, warmstart=True, warmstart_kl=True, **kwargs):
""" Solve multicut problem with cut,glue&cut solver.
Introduced in "Cut, Glue & Cut: A Fast, Approximate Solver for Multicut Partitioning":
https://www.cv-foundation.org/openaccess/content_cvpr_2014/html/Beier_Cut_Glue__2014_CVPR_paper.html
Requires nifty build with QPBO.
Arguments:
graph [nifty.graph] - graph of multicut problem
costs [np.ndarray] - edge costs of multicut problem
time_limit [float] - time limit for inference in seconds (default: None)
warmstart [bool] - whether to warmstart with gaec solution (default: True)
warmstart_kl [bool] - also use kernighan lin to warmstart (default: True)
"""
if not nifty.Configuration.WITH_QPBO:
raise RuntimeError("multicut_cgc requires nifty built with QPBO")
objective = _to_objective(graph, costs)
solver = objective.cgcFactory(warmStartGreedy=warmstart, warmStartKl=warmstart_kl).create(objective)
visitor = _get_visitor(objective, time_limit, **kwargs)
return solver.optimize() if visitor is None else solver.optimize(visitor=visitor)
def multicut_decomposition(graph, costs, time_limit=None,
internal_solver='kernighan-lin',
**kwargs):
""" Solve multicut problem with decomposition solver.
Introduced in "Break and Conquer: Efficient Correlation Clustering for Image Segmentation":
https://link.springer.com/chapter/10.1007/978-3-642-39140-8_9
Arguments:
graph [nifty.graph] - graph of multicut problem
costs [np.ndarray] - edge costs of multicut problem
time_limit [float] - time limit for inference in seconds (default: None)
internal_solver [str] - name of solver used for connected components
(default: 'kernighan-lin')
"""
objective = _to_objective(graph, costs)
solver_factory = _get_solver_factory(objective, internal_solver)
solver = objective.multicutDecomposerFactory(
submodelFactory=solver_factory,
fallthroughFactory=solver_factory
).create(objective)
visitor = _get_visitor(objective, time_limit, **kwargs)
return solver.optimize() if visitor is None else solver.optimize(visitor=visitor)
def multicut_fusion_moves(graph, costs, time_limit=None, n_threads=1,
internal_solver='kernighan-lin',
warmstart=True, warmstart_kl=True,
seed_fraction=.05, num_it=1000, num_it_stop=25, sigma=2.,
**kwargs):
""" Solve multicut problem with fusion moves solver.
Introduced in "Fusion moves for correlation clustering":
http://openaccess.thecvf.com/content_cvpr_2015/papers/Beier_Fusion_Moves_for_2015_CVPR_paper.pdf
Arguments:
graph [nifty.graph] - graph of multicut problem
costs [np.ndarray] - edge costs of multicut problem
time_limit [float] - time limit for inference in seconds (default: None)
n_threasd [int] - number of threads (default: 1)
internal_solver [str] - name of solver used for connected components
(default: 'kernighan-lin')
warmstart [bool] - whether to warmstart with gaec solution (default: True)
warmstart_kl [bool] - also use kernighan lin to warmstart (default: True)
seed_fraction [float] - fraction of nodes used as seeds for proposal generation
(default: .05)
num_it [int] - maximal number of iterations (default: 1000)
num_it_stop [int] - stop if no improvement after num_it_stop (default: 1000)
sigma [float] - smoothing factor for weights in proposal generator (default: 2.)
"""
objective = _to_objective(graph, costs)
sub_solver = _get_solver_factory(objective, internal_solver)
sub_solver = objective.fusionMoveSettings(mcFactory=sub_solver)
proposal_gen = objective.watershedCcProposals(sigma=sigma, numberOfSeeds=seed_fraction)
solver = objective.ccFusionMoveBasedFactory(fusionMove=sub_solver,
warmStartGreedy=warmstart,
warmStartKl=warmstart_kl,
proposalGenerator=proposal_gen,
numberOfThreads=n_threads,
numberOfIterations=num_it,
stopIfNoImprovement=num_it_stop).create(objective)
visitor = _get_visitor(objective, time_limit, **kwargs)
return solver.optimize() if visitor is None else solver.optimize(visitor=visitor)
def multicut_ilp(graph, costs, time_limit=None, **kwargs):
""" Solve multicut problem with ilp solver.
Introduced in "Globally Optimal Closed-surface Segmentation for Connectomics":
https://link.springer.com/chapter/10.1007/978-3-642-33712-3_56
Requires nifty build with CPLEX, GUROBI or GLPK.
Arguments:
graph [nifty.graph] - graph of multicut problem
costs [np.ndarray] - edge costs of multicut problem
time_limit [float] - time limit for inference in seconds (default: None)
"""
if not any((nifty.Configuration.WITH_CPLEX, nifty.Configuration.WITH_GLPK, nifty.Configuration.WITH_GUROBI)):
raise RuntimeError("multicut_ilp requires nifty built with at least one of CPLEX, GLPK or GUROBI")
objective = _to_objective(graph, costs)
solver = objective.multicutIlpFactory().create(objective)
visitor = _get_visitor(objective, time_limit, **kwargs)
return solver.optimize() if visitor is None else solver.optimize(visitor=visitor)
| StarcoderdataPython |
9750604 | import unittest
import pipgh
class TestInstall(unittest.TestCase):
def test_cli_fail(self):
# install ( (<full_name> [ref]) | (-r <requirements.txt>) )'
argvs = [
['instal'],
['install'],
['instal', 'docopt/docopt'],
['install', '-r'],
['install', '-r', 'reqs.txt', 'docopt/docopt'],
['install', 'docopt', 'reqs.txt', 'docopt/docopt'],
]
auth_flag = False
for argv in argvs:
try:
self.assertRaises(SystemExit, pipgh.install, auth_flag, argv)
except AssertionError as e:
e.args = (e.args[0] + ' for ' + str(argv),)
raise
try:
argv = ['install', 'docopt/docopt']
self.assertRaises(SystemExit, pipgh.install, True, argv)
except AssertionError as e:
e.args = (e.args[0] + ' for auth_flag=True',)
raise
def test_dry_run(self):
# pipgh install <full_name> 2
# pipgh install <full_name> <ref> 3
# pipgh install -r <requirements.txt> 3
argvs = [
(['install', 'requests'],
['requests'],
[None]),
(['install', 'requests', 'special'],
['requests'],
['special']),
(['install', '-r', 'tests/execution/requirements.txt'],
['docopt/docopt', 'mitsuhiko/flask', 'tornadoweb/tornado', 'kennethreitz/requests'],
['0.6.2', '23cf923c7c2e4a3808e6c71b6faa34d1749d4cb6', 'stable', None]),
]
for argv, repo_labels, refs in argvs:
rst = pipgh.install(False, argv, dry_run=True)
self.assertEqual(rst, (repo_labels, refs))
| StarcoderdataPython |
3426991 | #SCS_class_creator_for_initialization_configuration_sate_validation_and_data_adquisition
from SocketExecutor import SocketExecutor
class K4200:
print "U R in class K4200" #flag 4 debug
#Relate a IP address to the SCS
def __init__(self, ip, port=2099):
print "U R in K4200 - __init__" #flag 4 debug
self.ip = ip
#For the SCS expect_reply is true
self.executor = SocketExecutor(ip, port)
#The class is not configured yet when it is initialized
self.configured = False
#The class has measured anything when it is initialized
self.has_measured = False #Its not realizing measure
#The K4200 has a list of SMUs
self.smus = list()
def attach(self, smu):
#Add a new SMU to the SCS
print "U R in K4200 - attach" #flag 4 debug
self.smus.append(smu)
def configure(self):
print "U R in K4200 - configure" #flag 4 debug
#Send the commands necessary to configure a SMU
for smu in self.smus:
#Each SMU has a list of commands used to configure it
#Execute each configuration command
for command in smu.get_commands():
self.executor.execute_command(command)
#Change the state of the configuration of the SMU
self.configured = True
def is_ready(self):
#Check if the device is ready to send a command
print "U R in K4200 - is_ready" #flag 4 debug
#SP command allows the user to acquire the GPIB spoll byte to determine when not busy or data
#ready while in the Ethernet communication mode.
self.executor.execute_command("SP")
is_ready = self.executor.get_data()
is_ready = is_ready.replace("\0","")
#Returns 1 if there is data
return 0b00000001 & int(is_ready)
def measure(self):
#Send the command to start the measure
print "U R in K4200 - measure" #flag 4 debug
#Sends the command and get data
# The ME1 or ME2 command will trigger the start of the test and perform the programmed number of measurements.
self.executor.execute_command("MD ME1") #"MD ME1" means Measurement control with trigger
self.has_measured = True #Indicator that its measuring
def get_data(self, ch=1):
#Get data from the channel 1
print "U R in K4200 - get_data" #flag 4 debug
if self.has_measured:
template = "DO CH{ch}T"
cmd = template.format(ch=ch)
self.executor.execute_command(cmd)
return self.executor.get_data()
else:
raise NotMeasuredYetError("No data to retrieve")
class NotMeasuredYetError(Exception):
pass
| StarcoderdataPython |
3324887 | <reponame>jnthn/intellij-community
import nspackage.a<caret> | StarcoderdataPython |
1755430 | # get string input
Total_bill =int(raw_input("Enter the total amont: "))
# get integer input: int() convert string to integer
# float() convert string to floating number
tip_rate = float(raw_input("Enter tip rate (such as .15): "))
tip=(Total_bill*tip_rate)
total=int(Total_bill+tip)
# use string formatting to output result
print "You should pay: $%d" % (total)
| StarcoderdataPython |
333258 | from __future__ import annotations
from datetime import datetime
from typing import Any, Mapping, Optional, Sequence, Union
from dataclasses import dataclass
from snuba.datasets.cdc.cdcprocessors import (
CdcProcessor,
CdcMessageRow,
postgres_date_to_clickhouse,
parse_postgres_datetime,
)
from snuba.writer import WriterTableRow
@dataclass(frozen=True)
class GroupMessageRecord:
status: int
last_seen: datetime
first_seen: datetime
active_at: Optional[datetime] = None
first_release_id: Optional[int] = None
@dataclass(frozen=True)
class RawGroupMessageRecord:
"""
This is a faster verison of GroupMessageRecord that does
not rely on datetime objects. This is useful for bulk load
of massive tables to avoid creating and serializing datetime
objects at every record.
"""
status: int
last_seen: str
first_seen: str
active_at: Optional[str] = None
first_release_id: Optional[int] = None
@dataclass(frozen=True)
class GroupedMessageRow(CdcMessageRow):
offset: Optional[int]
project_id: int
id: int
record_deleted: bool
record_content: Union[None, GroupMessageRecord, RawGroupMessageRecord]
@classmethod
def from_wal(
cls, offset: int, columnnames: Sequence[str], columnvalues: Sequence[Any],
) -> GroupedMessageRow:
raw_data = dict(zip(columnnames, columnvalues))
return cls(
offset=offset,
project_id=raw_data["project_id"],
id=raw_data["id"],
record_deleted=False,
record_content=GroupMessageRecord(
status=raw_data["status"],
last_seen=parse_postgres_datetime(raw_data["last_seen"]),
first_seen=parse_postgres_datetime(raw_data["first_seen"]),
active_at=(
parse_postgres_datetime(raw_data["active_at"])
if raw_data["active_at"]
else None
),
first_release_id=raw_data["first_release_id"],
),
)
@classmethod
def from_bulk(cls, row: Mapping[str, Any],) -> GroupedMessageRow:
return cls(
offset=None,
project_id=int(row["project_id"]),
id=int(row["id"]),
record_deleted=False,
record_content=RawGroupMessageRecord(
status=int(row["status"]),
last_seen=postgres_date_to_clickhouse(row["last_seen"]),
first_seen=postgres_date_to_clickhouse(row["first_seen"]),
active_at=(
postgres_date_to_clickhouse(row["active_at"])
if row["active_at"]
else None
),
first_release_id=int(row["first_release_id"])
if row["first_release_id"]
else None,
),
)
def to_clickhouse(self) -> WriterTableRow:
record = self.record_content
return {
"offset": self.offset if self.offset is not None else 0,
"project_id": self.project_id,
"id": self.id,
"record_deleted": 1 if self.record_deleted else 0,
"status": None if not record else record.status,
"last_seen": None if not record else record.last_seen,
"first_seen": None if not record else record.first_seen,
"active_at": None if not record else record.active_at,
"first_release_id": None if not record else record.first_release_id,
}
class GroupedMessageProcessor(CdcProcessor):
def __init__(self, postgres_table: str):
super(GroupedMessageProcessor, self).__init__(
pg_table=postgres_table, message_row_class=GroupedMessageRow,
)
def _process_delete(
self, offset: int, key: Mapping[str, Any],
) -> Sequence[WriterTableRow]:
key_names = key["keynames"]
key_values = key["keyvalues"]
project_id = key_values[key_names.index("project_id")]
id = key_values[key_names.index("id")]
return [
GroupedMessageRow(
offset=offset,
project_id=project_id,
id=id,
record_deleted=True,
record_content=None,
).to_clickhouse()
]
| StarcoderdataPython |
3261436 | from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.contrib.auth import login, logout, authenticate
from .forms import NewPostForm
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic import ListView, UpdateView
from .models import Post, Stream, Likes, Follow, PostFileContent
from authentication.models import Profile, UserAccount
from django.template import loader
from django.template.loader import render_to_string
from django.views.generic import TemplateView
from django.db import transaction
from comments.models import Comments
from comments.forms import commentForm
from django.template import RequestContext
# Create your views here.
@login_required(login_url='authentication:index')
def PostDetail(request, post_id):
user = request.user
post = get_object_or_404(Post, id=post_id)
favourite = False
comments = Comments.objects.filter(post=post).order_by('date_commented')
if request.method == 'POST':
form = commentForm(request.POST)
if form.is_valid():
body = form.cleaned_data.get('body')
body = form.save(commit=False)
body.post = post
body.user = user
body.save()
return HttpResponseRedirect(reverse('posts:postdetails', args=[post_id]))
else:
form = commentForm(request.POST)
if request.user.is_authenticated:
profile = Profile.objects.get(user=request.user)
if profile.favourites.filter(id=post_id).exists():
favourite = True
template = loader.get_template('auth/post-detail.html')
context = {
'post': post,
'favourite': favourite,
'form': form,
'comments': comments
}
return HttpResponse(template.render(context, request))
@login_required(login_url='authentication:index')
def NewPost(request):
user = request.user
file_image = []
if request.method == "POST":
form = NewPostForm(request.POST, request.FILES)
if form.is_valid():
content = request.FILES.getlist('content')
caption = form.cleaned_data.get('caption')
for files in content:
file_instance = PostFileContent(files=files, user=user)
file_instance.save()
file_image.append(file_instance)
p, created = Post.objects.get_or_create(
caption=caption, user=user)
p.content.set(file_image)
p.save()
messages.success(
request, f'Post uploaded successfully see in profile')
return redirect('posts:newpost')
else:
form = NewPostForm()
context = {
'form': form
}
return render(request, 'auth/new-posts.html', context)
@login_required(login_url='authentication:index')
def like(request):
user = request.user
if request.method == 'POST':
post_id = request.POST.get('post_id')
post_obj = Post.objects.get(id=post_id)
if user in post_obj.liked.all():
post_obj.liked.remove(user)
else:
post_obj.liked.add(user)
like, created = Likes.objects.get_or_create(
user=user, post_id=post_id)
if not created:
if like.value == 'like':
like.value == 'unlike'
else:
like.value == 'like'
post_obj.save()
like.save()
# data = {
# 'value': like.value,
# 'likes': post_obj.liked.all().count()
# }
# return JsonResponse(data, safe=False)
return redirect('authentication:home')
@login_required(login_url='authentication:index')
def detail_like(request, post_id):
user = request.user
if request.method == 'POST':
post_id = request.POST.get('post_id')
post_obj = Post.objects.get(id=post_id)
if user in post_obj.liked.all():
post_obj.liked.remove(user)
else:
post_obj.liked.add(user)
like, created = Likes.objects.get_or_create(
user=user, post_id=post_id)
if not created:
if like.value == 'like':
like.value == 'unlike'
else:
like.value == 'like'
post_obj.save()
like.save()
# data = {
# 'value': like.value,
# 'likes': post_obj.liked.all().count()
# }
# return JsonResponse(data, safe=False)
return redirect('posts:postdetails', args=[post_id])
@login_required(login_url='authentication:index')
def favourites(request, post_id):
user = request.user
post = Post.objects.get(id=post_id)
profile = Profile.objects.get(user=user)
if profile.favourites.filter(id=post_id).exists():
profile.favourites.remove(post)
else:
profile.favourites.add(post)
return HttpResponseRedirect(reverse('posts:postdetails', args=[post_id]))
@login_required(login_url='authentication:index')
def follow(request, option, username):
user = request.user
following = get_object_or_404(UserAccount, username=username)
try:
p, created = Follow.objects.get_or_create(
follower=user, following=following)
if int(option) == 0:
p.delete()
Stream.objects.filter(following=following,
user=user).all().delete()
else:
posts = Post.objects.all().filter(user=following)[:3]
with transaction.atomic():
for post in posts:
stream = Stream(post=post, user=user,
date=post.posted, following=following)
stream.save()
return HttpResponseRedirect(reverse('profile', args=[username]))
except UserAccount.DoesNotExist:
return HttpResponseRedirect(reverse('profile', args=[username]))
@login_required(login_url='authentication:index')
def updatePostView(request, post_id):
user = request.user.id
post = Post.objects.get(id=post_id, user_id=user)
post_form = NewPostForm(instance=post, data=request.POST)
if post_form.is_valid():
post_form.save()
return HttpResponseRedirect(reverse('posts:postedit'))
else:
post = Post.objects.get(pk=post_id, user_id=user)
post_form = NewPostForm(instance=post)
return render(request, 'auth/update_post.html', {'form': post_form})
| StarcoderdataPython |
3247652 | <filename>tests/test_help.py
import subprocess
import sys
import pytest
from openpifpaf import __version__
PYTHON = 'python3' if sys.platform != 'win32' else 'python'
MODULE_NAMES = [
'predict',
'train',
'logs',
'eval',
'export_onnx',
'migrate',
'count_ops',
'benchmark',
]
if sys.platform != 'win32':
MODULE_NAMES.append('video')
@pytest.mark.parametrize('module_name', MODULE_NAMES)
def test_help(module_name):
help_text = subprocess.check_output([
PYTHON, '-m', 'openpifpaf.{}'.format(module_name),
'--help',
])
assert len(help_text) > 10
@pytest.mark.parametrize('module_name', MODULE_NAMES)
def test_version(module_name):
output = subprocess.check_output([
PYTHON, '-m', 'openpifpaf.{}'.format(module_name),
'--version',
])
cli_version = output.decode().strip().replace('.dirty', '')
assert cli_version == 'OpenPifPaf {}'.format(__version__.replace('.dirty', ''))
@pytest.mark.parametrize('module_name', MODULE_NAMES)
def test_usage(module_name):
output = subprocess.check_output([
PYTHON, '-m', 'openpifpaf.{}'.format(module_name),
'--help',
])
assert output.decode().startswith('usage: python3 -m openpifpaf.{}'.format(module_name))
| StarcoderdataPython |
11336650 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sqlalchemy import Integer, DateTime, String
from sqlalchemy import Column, MetaData, Table, ForeignKey
ENGINE = 'InnoDB'
CHARSET = 'utf8'
def upgrade(migrate_engine):
"""
This database upgrade creates a new storage_external table
"""
meta = MetaData()
meta.bind = migrate_engine
Table('storage_backend', meta, autoload=True)
# Define and create the storage_external table.
storage_external = Table(
'storage_ceph_external',
meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer,
ForeignKey('storage_backend.id', ondelete="CASCADE"),
primary_key=True, unique=True, nullable=False),
Column('ceph_conf', String(255), unique=True, index=True),
mysql_engine=ENGINE,
mysql_charset=CHARSET,
)
storage_external.create()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# As per other openstack components, downgrade is
# unsupported in this release.
raise NotImplementedError('SysInv database downgrade is unsupported.')
| StarcoderdataPython |
3537790 | <reponame>gtfarng/Odoo_migrade
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import datetime
import dateutil
import logging
import time
from collections import defaultdict
from odoo import api, fields, models, SUPERUSER_ID, tools, _
from odoo.exceptions import AccessError, UserError, ValidationError
from odoo.modules.registry import Registry
from odoo.osv import expression
from odoo.tools.safe_eval import safe_eval
_logger = logging.getLogger(__name__)
MODULE_UNINSTALL_FLAG = '_force_unlink'
def encode(s):
""" Return an UTF8-encoded version of ``s``. """
return s.encode('utf8') if isinstance(s, unicode) else s
# base environment for doing a safe_eval
SAFE_EVAL_BASE = {
'datetime': datetime,
'dateutil': dateutil,
'time': time,
}
def make_compute(text, deps):
""" Return a compute function from its code body and dependencies. """
func = lambda self: safe_eval(text, SAFE_EVAL_BASE, {'self': self}, mode="exec")
deps = [arg.strip() for arg in (deps or "").split(",")]
return api.depends(*deps)(func)
#
# IMPORTANT: this must be the first model declared in the module
#
class Base(models.AbstractModel):
""" The base model, which is implicitly inherited by all models. """
_name = 'base'
class Unknown(models.AbstractModel):
"""
Abstract model used as a substitute for relational fields with an unknown
comodel.
"""
_name = '_unknown'
class IrModel(models.Model):
_name = 'ir.model'
_description = "Models"
_order = 'model'
def _default_field_id(self):
if self.env.context.get('install_mode'):
return [] # no default field when importing
return [(0, 0, {'name': 'x_name', 'field_description': 'Name', 'ttype': 'char'})]
name = fields.Char(string='Model Description', translate=True, required=True)
model = fields.Char(default='x_', required=True, index=True)
info = fields.Text(string='Information')
field_id = fields.One2many('ir.model.fields', 'model_id', string='Fields', required=True, copy=True,
default=_default_field_id)
inherited_model_ids = fields.Many2many('ir.model', compute='_inherited_models', string="Inherited models",
help="The list of models that extends the current model.")
state = fields.Selection([('manual', 'Custom Object'), ('base', 'Base Object')], string='Type', default='manual', readonly=True)
access_ids = fields.One2many('ir.model.access', 'model_id', string='Access')
transient = fields.Boolean(string="Transient Model")
modules = fields.Char(compute='_in_modules', string='In Apps', help='List of modules in which the object is defined or inherited')
view_ids = fields.One2many('ir.ui.view', compute='_view_ids', string='Views')
@api.depends()
def _inherited_models(self):
for model in self:
parent_names = list(self.env[model.model]._inherits)
if parent_names:
model.inherited_model_ids = self.search([('model', 'in', parent_names)])
@api.depends()
def _in_modules(self):
installed_modules = self.env['ir.module.module'].search([('state', '=', 'installed')])
installed_names = set(installed_modules.mapped('name'))
xml_ids = models.Model._get_external_ids(self)
for model in self:
module_names = set(xml_id.split('.')[0] for xml_id in xml_ids[model.id])
model.modules = ", ".join(sorted(installed_names & module_names))
@api.depends()
def _view_ids(self):
for model in self:
model.view_ids = self.env['ir.ui.view'].search([('model', '=', model.model)])
@api.constrains('model')
def _check_model_name(self):
for model in self:
if model.state == 'manual':
if not model.model.startswith('x_'):
raise ValidationError(_("The model name must start with 'x_'."))
if not models.check_object_name(model.model):
raise ValidationError(_("The model name can only contain lowercase characters, digits, underscores and dots."))
_sql_constraints = [
('obj_name_uniq', 'unique (model)', 'Each model must be unique!'),
]
# overridden to allow searching both on model name (field 'model') and model
# description (field 'name')
@api.model
def _name_search(self, name='', args=None, operator='ilike', limit=100):
if args is None:
args = []
domain = args + ['|', ('model', operator, name), ('name', operator, name)]
return super(IrModel, self).search(domain, limit=limit).name_get()
def _drop_table(self):
for model in self:
table = self.env[model.model]._table
self._cr.execute('select relkind from pg_class where relname=%s', (table,))
result = self._cr.fetchone()
if result and result[0] == 'v':
self._cr.execute('DROP view %s' % table)
elif result and result[0] == 'r':
self._cr.execute('DROP TABLE %s CASCADE' % table)
return True
@api.multi
def unlink(self):
# Prevent manual deletion of module tables
if not self._context.get(MODULE_UNINSTALL_FLAG):
for model in self:
if model.state != 'manual':
raise UserError(_("Model '%s' contains module data and cannot be removed!") % model.name)
# prevent screwing up fields that depend on these models' fields
model.field_id._prepare_update()
imc = self.env['ir.model.constraint'].search([('model', 'in', self.ids)])
imc.unlink()
self._drop_table()
res = super(IrModel, self).unlink()
# Reload registry for normal unlink only. For module uninstall, the
# reload is done independently in odoo.modules.loading.
if not self._context.get(MODULE_UNINSTALL_FLAG):
self._cr.commit() # must be committed before reloading registry in new cursor
api.Environment.reset()
registry = Registry.new(self._cr.dbname)
registry.signal_registry_change()
return res
@api.multi
def write(self, vals):
if '__last_update' in self._context:
self = self.with_context({k: v for k, v in self._context.iteritems() if k != '__last_update'})
if 'model' in vals and any(rec.model != vals['model'] for rec in self):
raise UserError(_('Field "Model" cannot be modified on models.'))
if 'state' in vals and any(rec.state != vals['state'] for rec in self):
raise UserError(_('Field "Type" cannot be modified on models.'))
if 'transient' in vals and any(rec.transient != vals['transient'] for rec in self):
raise UserError(_('Field "Transient Model" cannot be modified on models.'))
# Filter out operations 4 from field id, because the web client always
# writes (4,id,False) even for non dirty items.
if 'field_id' in vals:
vals['field_id'] = [op for op in vals['field_id'] if op[0] != 4]
return super(IrModel, self).write(vals)
@api.model
def create(self, vals):
res = super(IrModel, self).create(vals)
if vals.get('state', 'manual') == 'manual':
# setup models; this automatically adds model in registry
self.pool.setup_models(self._cr, partial=(not self.pool.ready))
# update database schema
self.pool.init_models(self._cr, [vals['model']], dict(self._context, update_custom_fields=True))
self.pool.signal_registry_change()
return res
@api.model
def name_create(self, name):
""" Infer the model from the name. E.g.: 'My New Model' should become 'x_my_new_model'. """
vals = {
'name': name,
'model': 'x_' + '_'.join(name.lower().split(' ')),
}
return self.create(vals).name_get()[0]
@api.model
def _instanciate(self, model_data):
""" Return a class for the custom model given by parameters ``model_data``. """
class CustomModel(models.Model):
_name = encode(model_data['model'])
_description = model_data['name']
_module = False
_custom = True
_transient = bool(model_data['transient'])
__doc__ = model_data['info']
return CustomModel
class IrModelFields(models.Model):
_name = 'ir.model.fields'
_description = "Fields"
_order = "name"
_rec_name = 'field_description'
name = fields.Char(string='Field Name', default='x_', required=True, index=True)
complete_name = fields.Char(index=True)
model = fields.Char(string='Object Name', required=True, index=True,
help="The technical name of the model this field belongs to")
relation = fields.Char(string='Object Relation',
help="For relationship fields, the technical name of the target model")
relation_field = fields.Char(help="For one2many fields, the field on the target model that implement the opposite many2one relationship")
model_id = fields.Many2one('ir.model', string='Model', required=True, index=True, ondelete='cascade',
help="The model this field belongs to")
field_description = fields.Char(string='Field Label', default='', required=True, translate=True)
help = fields.Text(string='Field Help', translate=True)
ttype = fields.Selection(selection='_get_field_types', string='Field Type', required=True)
selection = fields.Char(string='Selection Options', default="",
help="List of options for a selection field, "
"specified as a Python expression defining a list of (key, label) pairs. "
"For example: [('blue','Blue'),('yellow','Yellow')]")
copy = fields.Boolean(string='Copied', help="Whether the value is copied when duplicating a record.")
related = fields.Char(string='Related Field', help="The corresponding related field, if any. This must be a dot-separated list of field names.")
required = fields.Boolean()
readonly = fields.Boolean()
index = fields.Boolean(string='Indexed')
translate = fields.Boolean(string='Translatable', help="Whether values for this field can be translated (enables the translation mechanism for that field)")
size = fields.Integer()
state = fields.Selection([('manual', 'Custom Field'), ('base', 'Base Field')], string='Type', default='manual', required=True, readonly=True, index=True)
on_delete = fields.Selection([('cascade', 'Cascade'), ('set null', 'Set NULL'), ('restrict', 'Restrict')],
string='On Delete', default='set null', help='On delete property for many2one fields')
domain = fields.Char(default="[]", help="The optional domain to restrict possible values for relationship fields, "
"specified as a Python expression defining a list of triplets. "
"For example: [('color','=','red')]")
groups = fields.Many2many('res.groups', 'ir_model_fields_group_rel', 'field_id', 'group_id')
selectable = fields.Boolean(default=True)
modules = fields.Char(compute='_in_modules', string='In Apps', help='List of modules in which the field is defined')
serialization_field_id = fields.Many2one('ir.model.fields', 'Serialization Field', domain="[('ttype','=','serialized')]",
ondelete='cascade', help="If set, this field will be stored in the sparse "
"structure of the serialization field, instead "
"of having its own database column. This cannot be "
"changed after creation.")
relation_table = fields.Char(help="Used for custom many2many fields to define a custom relation table name")
column1 = fields.Char(string='Column 1', help="Column referring to the record in the model table")
column2 = fields.Char(string="Column 2", help="Column referring to the record in the comodel table")
compute = fields.Text(help="Code to compute the value of the field.\n"
"Iterate on the recordset 'self' and assign the field's value:\n\n"
" for record in self:\n"
" record['size'] = len(record.name)\n\n"
"Modules time, datetime, dateutil are available.")
depends = fields.Char(string='Dependencies', help="Dependencies of compute method; "
"a list of comma-separated field names, like\n\n"
" name, partner_id.name")
store = fields.Boolean(string='Stored', default=True, help="Whether the value is stored in the database.")
@api.model
def _get_field_types(self):
# retrieve the possible field types from the field classes' metaclass
return sorted((key, key) for key in fields.MetaField.by_type)
@api.depends()
def _in_modules(self):
installed_modules = self.env['ir.module.module'].search([('state', '=', 'installed')])
installed_names = set(installed_modules.mapped('name'))
xml_ids = models.Model._get_external_ids(self)
for field in self:
module_names = set(xml_id.split('.')[0] for xml_id in xml_ids[field.id])
field.modules = ", ".join(sorted(installed_names & module_names))
@api.model
def _check_selection(self, selection):
try:
items = safe_eval(selection)
if not (isinstance(items, (tuple, list)) and
all(isinstance(item, (tuple, list)) and len(item) == 2 for item in items)):
raise ValueError(selection)
except Exception:
_logger.info('Invalid selection list definition for fields.selection', exc_info=True)
raise UserError(_("The Selection Options expression is not a valid Pythonic expression."
"Please provide an expression in the [('key','Label'), ...] format."))
@api.constrains('name', 'state')
def _check_name(self):
for field in self:
if field.state == 'manual' and not field.name.startswith('x_'):
raise ValidationError(_("Custom fields must have a name that starts with 'x_' !"))
try:
models.check_pg_name(field.name)
except ValidationError:
msg = _("Field names can only contain characters, digits and underscores (up to 63).")
raise ValidationError(msg)
@api.constrains('model', 'name')
def _unique_name(self):
# fix on stable branch (to be converted into an SQL constraint)
for field in self:
count = self.search_count([('model', '=', field.model), ('name', '=', field.name)])
if count > 1:
raise ValidationError(_("Field names must be unique per model."))
_sql_constraints = [
('size_gt_zero', 'CHECK (size>=0)', 'Size of the field cannot be negative.'),
]
def _related_field(self):
""" Return the ``Field`` instance corresponding to ``self.related``. """
names = self.related.split(".")
last = len(names) - 1
model = self.env[self.model or self.model_id.model]
for index, name in enumerate(names):
field = model._fields.get(name)
if field is None:
raise UserError(_("Unknown field name '%s' in related field '%s'") % (name, self.related))
if index < last and not field.relational:
raise UserError(_("Non-relational field name '%s' in related field '%s'") % (name, self.related))
model = model[name]
return field
@api.one
@api.constrains('related')
def _check_related(self):
if self.state == 'manual' and self.related:
field = self._related_field()
if field.type != self.ttype:
raise ValidationError(_("Related field '%s' does not have type '%s'") % (self.related, self.ttype))
if field.relational and field.comodel_name != self.relation:
raise ValidationError(_("Related field '%s' does not have comodel '%s'") % (self.related, self.relation))
@api.onchange('related')
def _onchange_related(self):
if self.related:
try:
field = self._related_field()
except UserError as e:
return {'warning': {'title': _("Warning"), 'message': e.message}}
self.ttype = field.type
self.relation = field.comodel_name
self.readonly = True
self.copy = False
@api.constrains('depends')
def _check_depends(self):
""" Check whether all fields in dependencies are valid. """
for record in self:
if not record.depends:
continue
for seq in record.depends.split(","):
if not seq.strip():
raise UserError(_("Empty dependency in %r") % (record.depends))
model = self.env[record.model]
names = seq.strip().split(".")
last = len(names) - 1
for index, name in enumerate(names):
field = model._fields.get(name)
if field is None:
raise UserError(_("Unknown field %r in dependency %r") % (name, seq.strip()))
if index < last and not field.relational:
raise UserError(_("Non-relational field %r in dependency %r") % (name, seq.strip()))
model = model[name]
@api.onchange('compute')
def _onchange_compute(self):
if self.compute:
self.readonly = True
self.copy = False
@api.one
@api.constrains('relation_table')
def _check_relation_table(self):
if self.relation_table:
models.check_pg_name(self.relation_table)
@api.model
def _custom_many2many_names(self, model_name, comodel_name):
""" Return default names for the table and columns of a custom many2many field. """
rel1 = self.env[model_name]._table
rel2 = self.env[comodel_name]._table
table = 'x_%s_%s_rel' % tuple(sorted([rel1, rel2]))
if rel1 == rel2:
return (table, 'id1', 'id2')
else:
return (table, '%s_id' % rel1, '%s_id' % rel2)
@api.onchange('ttype', 'model_id', 'relation')
def _onchange_ttype(self):
self.copy = (self.ttype != 'one2many')
if self.ttype == 'many2many' and self.model_id and self.relation:
names = self._custom_many2many_names(self.model_id.model, self.relation)
self.relation_table, self.column1, self.column2 = names
else:
self.relation_table = False
self.column1 = False
self.column2 = False
@api.onchange('relation_table')
def _onchange_relation_table(self):
if self.relation_table:
# check whether other fields use the same table
others = self.search([('ttype', '=', 'many2many'),
('relation_table', '=', self.relation_table),
('id', 'not in', self._origin.ids)])
if others:
for other in others:
if (other.model, other.relation) == (self.relation, self.model):
# other is a candidate inverse field
self.column1 = other.column2
self.column2 = other.column1
return
return {'warning': {
'title': _("Warning"),
'message': _("The table %r if used for other, possibly incompatible fields.") % self.relation_table,
}}
@api.multi
def _drop_column(self):
tables_to_drop = set()
for field in self:
if field.name in models.MAGIC_COLUMNS:
continue
model = self.env[field.model]
self._cr.execute('SELECT relkind FROM pg_class WHERE relname=%s', (model._table,))
relkind = self._cr.fetchone()
self._cr.execute("""SELECT column_name FROM information_schema.columns
WHERE table_name=%s AND column_name=%s""",
(model._table, field.name))
column_name = self._cr.fetchone()
if column_name and (relkind and relkind[0] == 'r'):
self._cr.execute('ALTER table "%s" DROP column "%s" cascade' % (model._table, field.name))
if field.state == 'manual' and field.ttype == 'many2many':
rel_name = field.relation_table or model._fields[field.name].relation
tables_to_drop.add(rel_name)
model._pop_field(field.name)
if tables_to_drop:
# drop the relation tables that are not used by other fields
self._cr.execute("""SELECT relation_table FROM ir_model_fields
WHERE relation_table IN %s AND id NOT IN %s""",
(tuple(tables_to_drop), tuple(self.ids)))
tables_to_keep = set(row[0] for row in self._cr.fetchall())
for rel_name in tables_to_drop - tables_to_keep:
self._cr.execute('DROP TABLE "%s"' % rel_name)
return True
@api.multi
def _prepare_update(self):
""" Check whether the fields in ``self`` may be modified or removed.
This method prevents the modification/deletion of many2one fields
that have an inverse one2many, for instance.
"""
self = self.filtered(lambda record: record.state == 'manual')
if not self:
return
for record in self:
model = self.env[record.model]
field = model._fields[record.name]
if field.type == 'many2one' and model._field_inverses.get(field):
if self._context.get(MODULE_UNINSTALL_FLAG):
# automatically unlink the corresponding one2many field(s)
inverses = self.search([('relation', '=', field.model_name),
('relation_field', '=', field.name)])
inverses.unlink()
continue
msg = _("The field '%s' cannot be removed because the field '%s' depends on it.")
raise UserError(msg % (field, model._field_inverses[field][0]))
# remove fields from registry, and check that views are not broken
fields = [self.env[record.model]._pop_field(record.name) for record in self]
domain = expression.OR([('arch_db', 'like', record.name)] for record in self)
views = self.env['ir.ui.view'].search(domain)
try:
for view in views:
view._check_xml()
except Exception:
raise UserError("\n".join([
_("Cannot rename/delete fields that are still present in views:"),
_("Fields:") + " " + ", ".join(map(str, fields)),
_("View:") + " " + view.name,
]))
finally:
# the registry has been modified, restore it
self.pool.setup_models(self._cr)
@api.multi
def unlink(self):
if not self:
return True
# Prevent manual deletion of module columns
if not self._context.get(MODULE_UNINSTALL_FLAG) and \
any(field.state != 'manual' for field in self):
raise UserError(_("This column contains module data and cannot be removed!"))
# prevent screwing up fields that depend on these fields
self._prepare_update()
model_names = self.mapped('model')
self._drop_column()
res = super(IrModelFields, self).unlink()
# The field we just deleted might be inherited, and the registry is
# inconsistent in this case; therefore we reload the registry.
if not self._context.get(MODULE_UNINSTALL_FLAG):
self._cr.commit()
api.Environment.reset()
registry = Registry.new(self._cr.dbname)
models = registry.descendants(model_names, '_inherits')
registry.init_models(self._cr, models, dict(self._context, update_custom_fields=True))
registry.signal_registry_change()
return res
@api.model
def create(self, vals):
if 'model_id' in vals:
model_data = self.env['ir.model'].browse(vals['model_id'])
vals['model'] = model_data.model
if vals.get('ttype') == 'selection':
if not vals.get('selection'):
raise UserError(_('For selection fields, the Selection Options must be given!'))
self._check_selection(vals['selection'])
res = super(IrModelFields, self).create(vals)
if vals.get('state', 'manual') == 'manual':
if vals.get('relation') and not self.env['ir.model'].search([('model', '=', vals['relation'])]):
raise UserError(_("Model %s does not exist!") % vals['relation'])
if vals.get('ttype') == 'one2many':
if not self.search([('model_id', '=', vals['relation']), ('name', '=', vals['relation_field']), ('ttype', '=', 'many2one')]):
raise UserError(_("Many2one %s on model %s does not exist!") % (vals['relation_field'], vals['relation']))
self.pool.clear_manual_fields()
if vals['model'] in self.pool:
# setup models; this re-initializes model in registry
self.pool.setup_models(self._cr, partial=(not self.pool.ready))
# update database schema of model and its descendant models
models = self.pool.descendants([vals['model']], '_inherits')
self.pool.init_models(self._cr, models, dict(self._context, update_custom_fields=True))
self.pool.signal_registry_change()
return res
@api.multi
def write(self, vals):
# For the moment renaming a sparse field or changing the storing system
# is not allowed. This may be done later
if 'serialization_field_id' in vals or 'name' in vals:
for field in self:
if 'serialization_field_id' in vals and field.serialization_field_id.id != vals['serialization_field_id']:
raise UserError(_('Changing the storing system for field "%s" is not allowed.') % field.name)
if field.serialization_field_id and (field.name != vals['name']):
raise UserError(_('Renaming sparse field "%s" is not allowed') % field.name)
# if set, *one* column can be renamed here
column_rename = None
# names of the models to patch
patched_models = set()
if vals and self:
# check selection if given
if vals.get('selection'):
self._check_selection(vals['selection'])
for item in self:
if item.state != 'manual':
raise UserError(_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
if vals.get('model_id', item.model_id.id) != item.model_id.id:
raise UserError(_("Changing the model of a field is forbidden!"))
if vals.get('ttype', item.ttype) != item.ttype:
raise UserError(_("Changing the type of a field is not yet supported. "
"Please drop it and create it again!"))
obj = self.pool.get(item.model)
field = getattr(obj, '_fields', {}).get(item.name)
if vals.get('name', item.name) != item.name:
# We need to rename the column
item._prepare_update()
if column_rename:
raise UserError(_('Can only rename one field at a time!'))
column_rename = (obj._table, item.name, vals['name'], item.index)
# We don't check the 'state', because it might come from the context
# (thus be set for multiple fields) and will be ignored anyway.
if obj is not None and field is not None:
patched_models.add(obj._name)
# These shall never be written (modified)
for column_name in ('model_id', 'model', 'state'):
if column_name in vals:
del vals[column_name]
res = super(IrModelFields, self).write(vals)
self.pool.clear_manual_fields()
if column_rename:
# rename column in database, and its corresponding index if present
table, oldname, newname, index = column_rename
self._cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (table, oldname, newname))
if index:
self._cr.execute('ALTER INDEX "%s_%s_index" RENAME TO "%s_%s_index"' % (table, oldname, table, newname))
if column_rename or patched_models:
# setup models, this will reload all manual fields in registry
self.pool.setup_models(self._cr, partial=(not self.pool.ready))
if patched_models:
# update the database schema of the models to patch
models = self.pool.descendants(patched_models, '_inherits')
self.pool.init_models(self._cr, models, dict(self._context, update_custom_fields=True))
if column_rename or patched_models:
self.pool.signal_registry_change()
return res
@api.multi
def name_get(self):
res = []
for field in self:
res.append((field.id, '%s (%s)' % (field.field_description, field.model)))
return res
@api.model
def _instanciate(self, field_data, partial):
""" Return a field instance corresponding to parameters ``field_data``. """
attrs = {
'manual': True,
'string': field_data['field_description'],
'help': field_data['help'],
'index': bool(field_data['index']),
'copy': bool(field_data['copy']),
'related': field_data['related'],
'required': bool(field_data['required']),
'readonly': bool(field_data['readonly']),
'store': bool(field_data['store']),
}
# FIXME: ignore field_data['serialization_field_id']
if field_data['ttype'] in ('char', 'text', 'html'):
attrs['translate'] = bool(field_data['translate'])
attrs['size'] = field_data['size'] or None
elif field_data['ttype'] in ('selection', 'reference'):
attrs['selection'] = safe_eval(field_data['selection'])
elif field_data['ttype'] == 'many2one':
if partial and field_data['relation'] not in self.env:
return
attrs['comodel_name'] = field_data['relation']
attrs['ondelete'] = field_data['on_delete']
attrs['domain'] = safe_eval(field_data['domain']) if field_data['domain'] else None
elif field_data['ttype'] == 'one2many':
if partial and not (
field_data['relation'] in self.env and (
field_data['relation_field'] in self.env[field_data['relation']]._fields or
field_data['relation_field'] in self.pool.get_manual_fields(self._cr, field_data['relation'])
)):
return
attrs['comodel_name'] = field_data['relation']
attrs['inverse_name'] = field_data['relation_field']
attrs['domain'] = safe_eval(field_data['domain']) if field_data['domain'] else None
elif field_data['ttype'] == 'many2many':
if partial and field_data['relation'] not in self.env:
return
attrs['comodel_name'] = field_data['relation']
rel, col1, col2 = self._custom_many2many_names(field_data['model'], field_data['relation'])
attrs['relation'] = field_data['relation_table'] or rel
attrs['column1'] = field_data['column1'] or col1
attrs['column2'] = field_data['column2'] or col2
attrs['domain'] = safe_eval(field_data['domain']) if field_data['domain'] else None
# add compute function if given
if field_data['compute']:
attrs['compute'] = make_compute(field_data['compute'], field_data['depends'])
return fields.Field.by_type[field_data['ttype']](**attrs)
class IrModelConstraint(models.Model):
"""
This model tracks PostgreSQL foreign keys and constraints used by Odoo
models.
"""
_name = 'ir.model.constraint'
name = fields.Char(string='Constraint', required=True, index=True,
help="PostgreSQL constraint or foreign key name.")
definition = fields.Char(help="PostgreSQL constraint definition")
model = fields.Many2one('ir.model', required=True, index=True)
module = fields.Many2one('ir.module.module', required=True, index=True)
type = fields.Char(string='Constraint Type', required=True, size=1, index=True,
help="Type of the constraint: `f` for a foreign key, "
"`u` for other constraints.")
date_update = fields.Datetime(string='Update Date')
date_init = fields.Datetime(string='Initialization Date')
_sql_constraints = [
('module_name_uniq', 'unique(name, module)',
'Constraints with the same name are unique per module.'),
]
@api.multi
def _module_data_uninstall(self):
"""
Delete PostgreSQL foreign keys and constraints tracked by this model.
"""
if not (self._uid == SUPERUSER_ID or self.env.user.has_group('base.group_system')):
raise AccessError(_('Administrator access is required to uninstall a module'))
ids_set = set(self.ids)
for data in self.sorted(key='id', reverse=True):
name = tools.ustr(data.name)
if data.model.model in self.env:
table = self.env[data.model.model]._table
else:
table = data.model.model.replace('.', '_')
typ = data.type
# double-check we are really going to delete all the owners of this schema element
self._cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,))
external_ids = set(x[0] for x in self._cr.fetchall())
if external_ids - ids_set:
# as installed modules have defined this element we must not delete it!
continue
if typ == 'f':
# test if FK exists on this table (it could be on a related m2m table, in which case we ignore it)
self._cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""",
('f', name, table))
if self._cr.fetchone():
self._cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (table, name),)
_logger.info('Dropped FK CONSTRAINT %s@%s', name, data.model.model)
if typ == 'u':
# test if constraint exists
self._cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""",
('u', name, table))
if self._cr.fetchone():
self._cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (table, name),)
_logger.info('Dropped CONSTRAINT %s@%s', name, data.model.model)
self.unlink()
@api.multi
def copy(self, default=None):
default = dict(default or {})
default['name'] = self.name + '_copy'
return super(IrModelConstraint, self).copy(default)
class IrModelRelation(models.Model):
"""
This model tracks PostgreSQL tables used to implement Odoo many2many
relations.
"""
_name = 'ir.model.relation'
name = fields.Char(string='Relation Name', required=True, index=True,
help="PostgreSQL table name implementing a many2many relation.")
model = fields.Many2one('ir.model', required=True, index=True)
module = fields.Many2one('ir.module.module', required=True, index=True)
date_update = fields.Datetime(string='Update Date')
date_init = fields.Datetime(string='Initialization Date')
@api.multi
def _module_data_uninstall(self):
"""
Delete PostgreSQL many2many relations tracked by this model.
"""
if not (self._uid == SUPERUSER_ID or self.env.user.has_group('base.group_system')):
raise AccessError(_('Administrator access is required to uninstall a module'))
ids_set = set(self.ids)
to_drop = tools.OrderedSet()
for data in self.sorted(key='id', reverse=True):
name = tools.ustr(data.name)
# double-check we are really going to delete all the owners of this schema element
self._cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,))
external_ids = set(x[0] for x in self._cr.fetchall())
if external_ids - ids_set:
# as installed modules have defined this element we must not delete it!
continue
self._cr.execute("SELECT 1 FROM information_schema.tables WHERE table_name=%s", (name,))
if self._cr.fetchone():
to_drop.add(name)
self.unlink()
# drop m2m relation tables
for table in to_drop:
self._cr.execute('DROP TABLE %s CASCADE' % table,)
_logger.info('Dropped table %s', table)
class IrModelAccess(models.Model):
_name = 'ir.model.access'
name = fields.Char(required=True, index=True)
active = fields.Boolean(default=True, help='If you uncheck the active field, it will disable the ACL without deleting it (if you delete a native ACL, it will be re-created when you reload the module).')
model_id = fields.Many2one('ir.model', string='Object', required=True, domain=[('transient', '=', False)], index=True, ondelete='cascade')
group_id = fields.Many2one('res.groups', string='Group', ondelete='cascade', index=True)
perm_read = fields.Boolean(string='Read Access')
perm_write = fields.Boolean(string='Write Access')
perm_create = fields.Boolean(string='Create Access')
perm_unlink = fields.Boolean(string='Delete Access')
@api.model
def check_groups(self, group):
""" Check whether the current user has the given group. """
grouparr = group.split('.')
if not grouparr:
return False
self._cr.execute("""SELECT 1 FROM res_groups_users_rel
WHERE uid=%s AND gid IN (
SELECT res_id FROM ir_model_data WHERE module=%s AND name=%s)""",
(self._uid, grouparr[0], grouparr[1],))
return bool(self._cr.fetchone())
@api.model
def check_group(self, model, mode, group_ids):
""" Check if a specific group has the access mode to the specified model"""
assert mode in ('read', 'write', 'create', 'unlink'), 'Invalid access mode'
if isinstance(model, models.BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.name
else:
model_name = model
if isinstance(group_ids, (int, long)):
group_ids = [group_ids]
query = """ SELECT 1 FROM ir_model_access a
JOIN ir_model m ON (m.id = a.model_id)
WHERE a.active AND a.perm_{mode} AND
m.model=%s AND (a.group_id IN %s OR a.group_id IS NULL)
""".format(mode=mode)
self._cr.execute(query, (model_name, tuple(group_ids)))
return bool(self._cr.rowcount)
@api.model_cr
def group_names_with_access(self, model_name, access_mode):
""" Return the names of visible groups which have been granted
``access_mode`` on the model ``model_name``.
:rtype: list
"""
assert access_mode in ('read', 'write', 'create', 'unlink'), 'Invalid access mode'
self._cr.execute("""SELECT c.name, g.name
FROM ir_model_access a
JOIN ir_model m ON (a.model_id=m.id)
JOIN res_groups g ON (a.group_id=g.id)
LEFT JOIN ir_module_category c ON (c.id=g.category_id)
WHERE m.model=%s AND a.active IS TRUE AND a.perm_""" + access_mode,
(model_name,))
return [('%s/%s' % x) if x[0] else x[1] for x in self._cr.fetchall()]
# The context parameter is useful when the method translates error messages.
# But as the method raises an exception in that case, the key 'lang' might
# not be really necessary as a cache key, unless the `ormcache_context`
# decorator catches the exception (it does not at the moment.)
@api.model
@tools.ormcache_context('self._uid', 'model', 'mode', 'raise_exception', keys=('lang',))
def check(self, model, mode='read', raise_exception=True):
if self._uid == 1:
# User root have all accesses
return True
assert mode in ('read', 'write', 'create', 'unlink'), 'Invalid access mode'
if isinstance(model, models.BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.model
else:
model_name = model
# TransientModel records have no access rights, only an implicit access rule
if model_name not in self.env:
_logger.error('Missing model %s', model_name)
elif self.env[model_name].is_transient():
return True
# We check if a specific rule exists
self._cr.execute("""SELECT MAX(CASE WHEN perm_{mode} THEN 1 ELSE 0 END)
FROM ir_model_access a
JOIN ir_model m ON (m.id = a.model_id)
JOIN res_groups_users_rel gu ON (gu.gid = a.group_id)
WHERE m.model = %s
AND gu.uid = %s
AND a.active IS TRUE""".format(mode=mode),
(model_name, self._uid,))
r = self._cr.fetchone()[0]
if not r:
# there is no specific rule. We check the generic rule
self._cr.execute("""SELECT MAX(CASE WHEN perm_{mode} THEN 1 ELSE 0 END)
FROM ir_model_access a
JOIN ir_model m ON (m.id = a.model_id)
WHERE a.group_id IS NULL
AND m.model = %s
AND a.active IS TRUE""".format(mode=mode),
(model_name,))
r = self._cr.fetchone()[0]
if not r and raise_exception:
groups = '\n\t'.join('- %s' % g for g in self.group_names_with_access(model_name, mode))
msg_heads = {
# Messages are declared in extenso so they are properly exported in translation terms
'read': _("Sorry, you are not allowed to access this document."),
'write': _("Sorry, you are not allowed to modify this document."),
'create': _("Sorry, you are not allowed to create this kind of document."),
'unlink': _("Sorry, you are not allowed to delete this document."),
}
if groups:
msg_tail = _("Only users with the following access level are currently allowed to do that") + ":\n%s\n\n(" + _("Document model") + ": %s)"
msg_params = (groups, model_name)
else:
msg_tail = _("Please contact your system administrator if you think this is an error.") + "\n\n(" + _("Document model") + ": %s)"
msg_params = (model_name,)
_logger.info('Access Denied by ACLs for operation: %s, uid: %s, model: %s', mode, self._uid, model_name)
msg = '%s %s' % (msg_heads[mode], msg_tail)
raise AccessError(msg % msg_params)
return bool(r)
__cache_clearing_methods = set()
@classmethod
def register_cache_clearing_method(cls, model, method):
cls.__cache_clearing_methods.add((model, method))
@classmethod
def unregister_cache_clearing_method(cls, model, method):
cls.__cache_clearing_methods.discard((model, method))
@api.model_cr
def call_cache_clearing_methods(self):
self.invalidate_cache()
self.check.clear_cache(self) # clear the cache of check function
for model, method in self.__cache_clearing_methods:
if model in self.env:
getattr(self.env[model], method)()
#
# Check rights on actions
#
@api.model
def create(self, values):
self.call_cache_clearing_methods()
return super(IrModelAccess, self).create(values)
@api.multi
def write(self, values):
self.call_cache_clearing_methods()
return super(IrModelAccess, self).write(values)
@api.multi
def unlink(self):
self.call_cache_clearing_methods()
return super(IrModelAccess, self).unlink()
class IrModelData(models.Model):
"""Holds external identifier keys for records in the database.
This has two main uses:
* allows easy data integration with third-party systems,
making import/export/sync of data possible, as records
can be uniquely identified across multiple systems
* allows tracking the origin of data installed by Odoo
modules themselves, thus making it possible to later
update them seamlessly.
"""
_name = 'ir.model.data'
_order = 'module, model, name'
name = fields.Char(string='External Identifier', required=True,
help="External Key/Identifier that can be used for "
"data integration with third-party systems")
complete_name = fields.Char(compute='_compute_complete_name', string='Complete ID')
model = fields.Char(string='Model Name', required=True)
module = fields.Char(default='', required=True)
res_id = fields.Integer(string='Record ID', help="ID of the target record in the database")
noupdate = fields.Boolean(string='Non Updatable', default=False)
date_update = fields.Datetime(string='Update Date', default=fields.Datetime.now)
date_init = fields.Datetime(string='Init Date', default=fields.Datetime.now)
reference = fields.Char(string='Reference', compute='_compute_reference', readonly=True, store=False)
@api.depends('module', 'name')
def _compute_complete_name(self):
for res in self:
res.complete_name = ".".join(filter(None, [res.module, res.name]))
@api.depends('model', 'res_id')
def _compute_reference(self):
for res in self:
res.reference = "%s,%s" % (res.model, res.res_id)
def __init__(self, pool, cr):
models.Model.__init__(self, pool, cr)
# also stored in pool to avoid being discarded along with this osv instance
if getattr(pool, 'model_data_reference_ids', None) is None:
self.pool.model_data_reference_ids = {}
# put loads on the class, in order to share it among all instances
type(self).loads = self.pool.model_data_reference_ids
@api.model_cr_context
def _auto_init(self):
res = super(IrModelData, self)._auto_init()
self._cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = 'ir_model_data_module_name_uniq_index'")
if not self._cr.fetchone():
self._cr.execute('CREATE UNIQUE INDEX ir_model_data_module_name_uniq_index ON ir_model_data (module, name)')
self._cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = 'ir_model_data_model_res_id_index'")
if not self._cr.fetchone():
self._cr.execute('CREATE INDEX ir_model_data_model_res_id_index ON ir_model_data (model, res_id)')
return res
@api.multi
def name_get(self):
model_id_name = defaultdict(dict) # {res_model: {res_id: name}}
for xid in self:
model_id_name[xid.model][xid.res_id] = None
# fill in model_id_name with name_get() of corresponding records
for model, id_name in model_id_name.iteritems():
try:
ng = self.env[model].browse(id_name).name_get()
id_name.update(ng)
except Exception:
pass
# return results, falling back on complete_name
return [(xid.id, model_id_name[xid.model][xid.res_id] or xid.complete_name)
for xid in self]
# NEW V8 API
@api.model
@tools.ormcache('xmlid')
def xmlid_lookup(self, xmlid):
"""Low level xmlid lookup
Return (id, res_model, res_id) or raise ValueError if not found
"""
module, name = xmlid.split('.', 1)
xid = self.search([('module', '=', module), ('name', '=', name)])
if not xid:
raise ValueError('External ID not found in the system: %s' % xmlid)
# the sql constraints ensure us we have only one result
res = xid.read(['model', 'res_id'])[0]
if not res['res_id']:
raise ValueError('External ID not found in the system: %s' % xmlid)
return res['id'], res['model'], res['res_id']
@api.model
def xmlid_to_res_model_res_id(self, xmlid, raise_if_not_found=False):
""" Return (res_model, res_id)"""
try:
return self.xmlid_lookup(xmlid)[1:3]
except ValueError:
if raise_if_not_found:
raise
return (False, False)
@api.model
def xmlid_to_res_id(self, xmlid, raise_if_not_found=False):
""" Returns res_id """
return self.xmlid_to_res_model_res_id(xmlid, raise_if_not_found)[1]
@api.model
def xmlid_to_object(self, xmlid, raise_if_not_found=False):
""" Return a browse_record
if not found and raise_if_not_found is False return None
"""
t = self.xmlid_to_res_model_res_id(xmlid, raise_if_not_found)
res_model, res_id = t
if res_model and res_id:
record = self.env[res_model].browse(res_id)
if record.exists():
return record
if raise_if_not_found:
raise ValueError('No record found for unique ID %s. It may have been deleted.' % (xmlid))
return None
@api.model
def _get_id(self, module, xml_id):
"""Returns the id of the ir.model.data record corresponding to a given module and xml_id (cached) or raise a ValueError if not found"""
return self.xmlid_lookup("%s.%s" % (module, xml_id))[0]
@api.model
def get_object_reference(self, module, xml_id):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached) or raise ValueError if not found"""
return self.xmlid_lookup("%s.%s" % (module, xml_id))[1:3]
@api.model
def check_object_reference(self, module, xml_id, raise_on_access_error=False):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached), if and only if the user has the necessary access rights
to see that object, otherwise raise a ValueError if raise_on_access_error is True or returns a tuple (model found, False)"""
model, res_id = self.get_object_reference(module, xml_id)
#search on id found in result to check if current user has read access right
if self.env[model].search([('id', '=', res_id)]):
return model, res_id
if raise_on_access_error:
raise AccessError('Not enough access rights on the external ID: %s.%s' % (module, xml_id))
return model, False
@api.model
def get_object(self, module, xml_id):
""" Returns a browsable record for the given module name and xml_id.
If not found, raise a ValueError or return None, depending
on the value of `raise_exception`.
"""
return self.xmlid_to_object("%s.%s" % (module, xml_id), raise_if_not_found=True)
@api.model
def _update_dummy(self, model, module, xml_id=False, store=True):
if xml_id:
try:
# One step to check the ID is defined and the record actually exists
record = self.get_object(module, xml_id)
if record:
self.loads[(module, xml_id)] = (model, record.id)
for parent_model, parent_field in self.env[model]._inherits.iteritems():
parent = record[parent_field]
parent_xid = '%s_%s' % (xml_id, parent_model.replace('.', '_'))
self.loads[(module, parent_xid)] = (parent_model, parent.id)
return record.id
except Exception:
pass
return False
@api.multi
def unlink(self):
""" Regular unlink method, but make sure to clear the caches. """
self.clear_caches()
return super(IrModelData, self).unlink()
@api.model
def _update(self, model, module, values, xml_id=False, store=True, noupdate=False, mode='init', res_id=False):
# records created during module install should not display the messages of OpenChatter
self = self.with_context(install_mode=True)
current_module = module
if xml_id and ('.' in xml_id):
assert len(xml_id.split('.')) == 2, _("'%s' contains too many dots. XML ids should not contain dots ! These are used to refer to other modules data, as in module.reference_id") % xml_id
module, xml_id = xml_id.split('.')
action = self.browse()
record = self.env[model].browse(res_id)
if xml_id:
self._cr.execute("""SELECT imd.id, imd.res_id, md.id, imd.model, imd.noupdate
FROM ir_model_data imd LEFT JOIN %s md ON (imd.res_id = md.id)
WHERE imd.module=%%s AND imd.name=%%s""" % record._table,
(module, xml_id))
results = self._cr.fetchall()
for imd_id, imd_res_id, real_id, imd_model, imd_noupdate in results:
# In update mode, do not update a record if it's ir.model.data is flagged as noupdate
if mode == 'update' and imd_noupdate:
return imd_res_id
if not real_id:
self.clear_caches()
self._cr.execute('DELETE FROM ir_model_data WHERE id=%s', (imd_id,))
record = record.browse()
else:
assert model == imd_model, "External ID conflict, %s already refers to a `%s` record,"\
" you can't define a `%s` record with this ID." % (xml_id, imd_model, model)
action = self.browse(imd_id)
record = record.browse(imd_res_id)
if action and record:
record.write(values)
action.sudo().write({'date_update': fields.Datetime.now()})
elif record:
record.write(values)
if xml_id:
for parent_model, parent_field in record._inherits.iteritems():
self.sudo().create({
'name': xml_id + '_' + parent_model.replace('.', '_'),
'model': parent_model,
'module': module,
'res_id': record[parent_field].id,
'noupdate': noupdate,
})
self.sudo().create({
'name': xml_id,
'model': model,
'module': module,
'res_id': record.id,
'noupdate': noupdate,
})
elif mode == 'init' or (mode == 'update' and xml_id):
existing_parents = set() # {parent_model, ...}
if xml_id:
for parent_model, parent_field in record._inherits.iteritems():
xid = self.search([
('module', '=', module),
('name', '=', xml_id + '_' + parent_model.replace('.', '_')),
])
# XML ID found in the database, try to recover an existing record
if xid:
parent = self.env[xid.model].browse(xid.res_id)
if parent.exists():
existing_parents.add(xid.model)
values[parent_field] = parent.id
else:
xid.unlink()
record = record.create(values)
if xml_id:
#To add an external identifiers to all inherits model
inherit_models = [record]
while inherit_models:
current_model = inherit_models.pop()
for parent_model_name, parent_field in current_model._inherits.iteritems():
inherit_models.append(self.env[parent_model_name])
if parent_model_name in existing_parents:
continue
self.sudo().create({
'name': xml_id + '_' + parent_model_name.replace('.', '_'),
'model': parent_model_name,
'module': module,
'res_id': record[parent_field].id,
'noupdate': noupdate,
})
existing_parents.add(parent_model_name)
self.sudo().create({
'name': xml_id,
'model': model,
'module': module,
'res_id': record.id,
'noupdate': noupdate
})
if current_module and module != current_module:
_logger.warning("Creating the ir.model.data %s in module %s instead of %s.",
xml_id, module, current_module)
if xml_id and record:
self.loads[(module, xml_id)] = (model, record.id)
for parent_model, parent_field in record._inherits.iteritems():
parent_xml_id = xml_id + '_' + parent_model.replace('.', '_')
self.loads[(module, parent_xml_id)] = (parent_model, record[parent_field].id)
return record.id
@api.model
def _module_data_uninstall(self, modules_to_remove):
"""Deletes all the records referenced by the ir.model.data entries
``ids`` along with their corresponding database backed (including
dropping tables, columns, FKs, etc, as long as there is no other
ir.model.data entry holding a reference to them (which indicates that
they are still owned by another module).
Attempts to perform the deletion in an appropriate order to maximize
the chance of gracefully deleting all records.
This step is performed as part of the full uninstallation of a module.
"""
if not (self._uid == SUPERUSER_ID or self.env.user.has_group('base.group_system')):
raise AccessError(_('Administrator access is required to uninstall a module'))
# enable model/field deletion
self = self.with_context(**{MODULE_UNINSTALL_FLAG: True})
datas = self.search([('module', 'in', modules_to_remove)])
wkf_todo = []
to_unlink = tools.OrderedSet()
undeletable = self.browse([])
for data in datas.sorted(key='id', reverse=True):
model = data.model
res_id = data.res_id
to_unlink.add((model, res_id))
if model == 'workflow.activity':
# Special treatment for workflow activities: temporarily revert their
# incoming transition and trigger an update to force all workflow items
# to move out before deleting them
self._cr.execute('SELECT res_type, res_id FROM wkf_instance WHERE id IN (SELECT inst_id FROM wkf_workitem WHERE act_id=%s)', (res_id,))
wkf_todo.extend(self._cr.fetchall())
self._cr.execute("UPDATE wkf_transition SET condition='True', group_id=NULL, signal=NULL, act_to=act_from, act_from=%s WHERE act_to=%s", (res_id, res_id))
self.invalidate_cache()
for model, res_id in wkf_todo:
try:
record = self.env[model].browse(res_id)
record.step_workflow()
except Exception:
_logger.info('Unable to force processing of workflow for item %s@%s in order to leave activity to be deleted', res_id, model, exc_info=True)
def unlink_if_refcount(to_unlink):
undeletable = self.browse()
for model, res_id in to_unlink:
external_ids = self.search([('model', '=', model), ('res_id', '=', res_id)])
if external_ids - datas:
# if other modules have defined this record, we must not delete it
continue
if model == 'ir.model.fields':
# Don't remove the LOG_ACCESS_COLUMNS unless _log_access
# has been turned off on the model.
field = self.env[model].browse(res_id)
if not field.exists():
_logger.info('Deleting orphan external_ids %s', external_ids)
external_ids.unlink()
continue
if field.name in models.LOG_ACCESS_COLUMNS and field.model in self.env and self.env[field.model]._log_access:
continue
if field.name == 'id':
continue
_logger.info('Deleting %s@%s', res_id, model)
try:
self._cr.execute('SAVEPOINT record_unlink_save')
self.env[model].browse(res_id).unlink()
except Exception:
_logger.info('Unable to delete %s@%s', res_id, model, exc_info=True)
undeletable += external_ids
self._cr.execute('ROLLBACK TO SAVEPOINT record_unlink_save')
else:
self._cr.execute('RELEASE SAVEPOINT record_unlink_save')
return undeletable
# Remove non-model records first, then model fields, and finish with models
undeletable += unlink_if_refcount(item for item in to_unlink if item[0] not in ('ir.model', 'ir.model.fields', 'ir.model.constraint'))
undeletable += unlink_if_refcount(item for item in to_unlink if item[0] == 'ir.model.constraint')
modules = self.env['ir.module.module'].search([('name', 'in', modules_to_remove)])
constraints = self.env['ir.model.constraint'].search([('module', 'in', modules.ids)])
constraints._module_data_uninstall()
undeletable += unlink_if_refcount(item for item in to_unlink if item[0] == 'ir.model.fields')
relations = self.env['ir.model.relation'].search([('module', 'in', modules.ids)])
relations._module_data_uninstall()
undeletable += unlink_if_refcount(item for item in to_unlink if item[0] == 'ir.model')
(datas - undeletable).unlink()
@api.model
def _process_end(self, modules):
""" Clear records removed from updated module data.
This method is called at the end of the module loading process.
It is meant to removed records that are no longer present in the
updated data. Such records are recognised as the one with an xml id
and a module in ir_model_data and noupdate set to false, but not
present in self.loads.
"""
if not modules or tools.config.get('import_partial'):
return True
bad_imd_ids = []
self = self.with_context({MODULE_UNINSTALL_FLAG: True})
query = """ SELECT id, name, model, res_id, module FROM ir_model_data
WHERE module IN %s AND res_id IS NOT NULL AND noupdate=%s ORDER BY id DESC
"""
self._cr.execute(query, (tuple(modules), False))
for (id, name, model, res_id, module) in self._cr.fetchall():
if (module, name) not in self.loads:
if model in self.env:
_logger.info('Deleting %s@%s (%s.%s)', res_id, model, module, name)
record = self.env[model].browse(res_id)
if record.exists():
record.unlink()
else:
bad_imd_ids.append(id)
if bad_imd_ids:
self.browse(bad_imd_ids).unlink()
self.loads.clear()
class WizardModelMenu(models.TransientModel):
_name = 'wizard.ir.model.menu.create'
menu_id = fields.Many2one('ir.ui.menu', string='Parent Menu', required=True, ondelete='cascade')
name = fields.Char(string='Menu Name', required=True)
@api.multi
def menu_create(self):
for menu in self:
model = self.env['ir.model'].browse(self._context.get('model_id'))
vals = {
'name': menu.name,
'res_model': model.model,
'view_mode': 'tree,form',
}
action_id = self.env['ir.actions.act_window'].create(vals)
self.env['ir.ui.menu'].create({
'name': menu.name,
'parent_id': menu.menu_id.id,
'action': 'ir.actions.act_window,%d' % (action_id,)
})
return {'type': 'ir.actions.act_window_close'}
| StarcoderdataPython |
4891199 | import os
import sys
from alize.exception import *
from blue.utility import *
from blue.utility import LOG as L
from blue.script import testcase_base
class TestCase_Slack(testcase_base.TestCase_Unit):
def slack_message(self, message, channel=None):
if channel == None: channel = self.get("slack.channel")
try:
self.slack.message(message, channel)
except SlackError as e:
L.warning(str(e))
def slack_upload(self, filepath, channel=None):
if channel == None: channel = self.get("slack.channel")
try:
L.warning(os.path.exists(filepath))
self.slack.upload(filepath, channel, filetype="image/png")
except SlackError as e:
L.warning(str(e))
raise e
| StarcoderdataPython |
4819382 | print(21 + 43)
print(142 - 52)
print(10 * 342)
print (5 ** 2) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.