content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 6 10:11:05 2019
@author: alheritier
"""
from pykds import KDSForest
import numpy as np
from sklearn.datasets import load_breast_cancer
data = load_breast_cancer()
dim = data.data.shape[1]
alpha_label= len(np.unique(data.target))
m = KDSForest(ntrees=1,seed=123,dim=dim,alpha_label=alpha_label,ctw=False, theta0=[])
nll = 0
for point,label in zip(data.data, data.target):
log_probs = m.predict_log2_proba(point=point)
obs_prob = 2 ** log_probs[label]
nll += - log_probs[label]
print("prob assigned to observed symbol ", label, " : ", 2 ** log_probs[label] )
m.update(point=point,label=label)
nll /= data.data.shape[0]
print("Normalized Log Loss: ", nll)
|
nilq/baby-python
|
python
|
import luigi
import json
import time
import re
import datetime
import subprocess
import base64
from urllib import urlopen
import uuid
from uuid import uuid4
from uuid import uuid5
from elasticsearch import Elasticsearch
#for hack to get around non self signed certificates
import ssl
import sys
# TODO
# * I think we want to use S3 for our touch files (aka lock files) since that will be better than local files that could be lost/deleted
# * I have the consonance call turned off here until I figure out why bamstats on rnaseq produces an empty report
class ConsonanceTaskV2(luigi.Task):
redwood_host = luigi.Parameter("storage.ucsc-cgl.org")
redwood_token = luigi.Parameter("must_be_defined")
dockstore_tool_running_dockstore_tool = luigi.Parameter(default="quay.io/ucsc_cgl/dockstore-tool-runner:1.0.7")
target_tool = luigi.Parameter(default="quay.io/briandoconnor/dockstore-tool-bamstats:1.25-11")
target_tool_url = luigi.Parameter(default="https://dockstore.org/containers/quay.io/briandoconnor/dockstore-tool-bamstats")
workflow_type = luigi.Parameter(default="alignment_qc_report")
image_descriptor = luigi.Parameter(default="must be defined")
filename = luigi.Parameter(default="filename")
file_uuid = luigi.Parameter(default="uuid")
bundle_uuid = luigi.Parameter(default="bundle_uuid")
parent_uuids = luigi.ListParameter(default=["parent_uuid"])
tmp_dir = luigi.Parameter(default='/tmp')
def run(self):
print "** EXECUTING IN CONSONANCE **"
print "** MAKE TEMP DIR **"
# create a unique temp dir
cmd = '''mkdir -p %s/consonance-jobs/AlignmentQCCoordinator/%s/''' % (self.tmp_dir, self.get_task_uuid())
print cmd
result = subprocess.call(cmd, shell=True)
if result != 0:
print "PROBLEMS MAKING DIR!!"
print "** MAKE JSON FOR WORKER **"
# create a json for FastQC which will be executed by the dockstore-tool-running-dockstore-tool and passed as base64encoded
# will need to encode the JSON above in this: https://docs.python.org/2/library/base64.html
# see http://luigi.readthedocs.io/en/stable/api/luigi.parameter.html?highlight=luigi.parameter
json_str = '''{
"bam_input":
{
"class": "File",
"path": "redwood://%s/%s/%s/%s"
},
''' % (self.redwood_host, self.bundle_uuid, self.file_uuid, self.filename)
json_str = json_str + '''"bamstats_report" :
{
"class": "File",
"path": "./tmp/bamstats_report.zip"
}
}
'''
print "THE JSON: "+json_str
# now make base64 encoded version
base64_json_str = base64.urlsafe_b64encode(json_str)
print "** MAKE JSON FOR DOCKSTORE TOOL WRAPPER **"
# create a json for dockstoreRunningDockstoreTool, embed the FastQC JSON as a param
p = self.output().open('w')
print >>p, '''{
"json_encoded": "%s",
"docker_uri": "%s",
"dockstore_url": "%s",
"redwood_token": "%s",
"redwood_host": "%s",
"parent_uuids": "%s",
"workflow_type": "%s",
"tmpdir": "/datastore",
"vm_instance_type": "c4.8xlarge",
"vm_region": "us-west-2",
"vm_location": "aws",
"vm_instance_cores": 36,
"vm_instance_mem_gb": 60,
"output_metadata_json": "/tmp/final_metadata.json"
}''' % (base64_json_str, self.target_tool, self.target_tool_url, self.redwood_token, self.redwood_host, ','.join(map("{0}".format, self.parent_uuids)), self.workflow_type)
p.close()
# execute consonance run, parse the job UUID
print "** SUBMITTING TO CONSONANCE **"
cmd = ["consonance", "run", "--image-descriptor", self.image_descriptor, "--flavour", "c4.8xlarge", "--run-descriptor", p.path]
print "executing:"+ ' '.join(cmd)
# try:
# result = subprocess.call(cmd)
# except Exception as e:
# print "Error in Consonance call!!!:" + e.message
#
# if result == 0:
# print "Consonance job return success code!"
# else:
# print "ERROR: Consonance job failed!!!"
def output(self):
return luigi.LocalTarget('%s/consonance-jobs/AlignmentQCCoordinator/%s/settings.json' % (self.tmp_dir, self.get_task_uuid()))
def get_task_uuid(self):
#get a unique id for this task based on the some inputs
#this id will not change if the inputs are the same
#This helps make the task idempotent; it that it
#always has the same task id for the same inputs
reload(sys)
sys.setdefaultencoding('utf8')
print "FILENAME: "+self.filename+" FILE UUID: "+ self.file_uuid +" TARGET TOOL: "+ self.target_tool +" Target TOOL URL "+ self.target_tool_url +" REDWOOD TOKEN: "+ self.redwood_token +" REDWOOD HOST "+ self.redwood_host
task_uuid = uuid5(uuid.NAMESPACE_DNS, (self.filename + self.file_uuid + self.target_tool + self.target_tool_url + self.redwood_token + self.redwood_host).encode('utf-8'))
return task_uuid
class AlignmentQCCoordinatorV2(luigi.Task):
es_index_host = luigi.Parameter(default='localhost')
es_index_port = luigi.Parameter(default='9200')
redwood_token = luigi.Parameter("must_be_defined")
redwood_client_path = luigi.Parameter(default='../ucsc-storage-client')
redwood_host = luigi.Parameter(default='storage.ucsc-cgl.org')
image_descriptor = luigi.Parameter(default="must be defined")
dockstore_tool_running_dockstore_tool = luigi.Parameter(default="quay.io/ucsc_cgl/dockstore-tool-runner:1.0.7")
tmp_dir = luigi.Parameter(default='/tmp')
data_dir = luigi.Parameter(default='/tmp/data_dir')
max_jobs = luigi.Parameter(default='1')
bundle_uuid_filename_to_file_uuid = {}
def requires(self):
print "** COORDINATOR **"
# now query the metadata service so I have the mapping of bundle_uuid & file names -> file_uuid
print str("https://"+self.redwood_host+":8444/entities?page=0")
#hack to get around none self signed certificates
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
json_str = urlopen(str("https://"+self.redwood_host+":8444/entities?page=0"), context=ctx).read()
metadata_struct = json.loads(json_str)
print "** METADATA TOTAL PAGES: "+str(metadata_struct["totalPages"])
for i in range(0, metadata_struct["totalPages"]):
print "** CURRENT METADATA TOTAL PAGES: "+str(i)
json_str = urlopen(str("https://"+self.redwood_host+":8444/entities?page="+str(i)), context=ctx).read()
metadata_struct = json.loads(json_str)
for file_hash in metadata_struct["content"]:
self.bundle_uuid_filename_to_file_uuid[file_hash["gnosId"]+"_"+file_hash["fileName"]] = file_hash["id"]
# now query elasticsearch
es = Elasticsearch([{'host': self.es_index_host, 'port': self.es_index_port}])
# see jqueryflag_alignment_qc
# curl -XPOST http://localhost:9200/analysis_index/_search?pretty -d @jqueryflag_alignment_qc
res = es.search(index="analysis_index", body={"query" : {"bool" : {"should" : [{"term" : { "flags.normal_alignment_qc_report" : "false"}},{"term" : {"flags.tumor_alignment_qc_report" : "false" }}],"minimum_should_match" : 1 }}}, size=5000)
listOfJobs = []
print("Got %d Hits:" % res['hits']['total'])
for hit in res['hits']['hits']:
print("\n\n\n%(donor_uuid)s %(submitter_donor_id)s %(center_name)s %(project)s" % hit["_source"])
for specimen in hit["_source"]["specimen"]:
for sample in specimen["samples"]:
for analysis in sample["analysis"]:
if (analysis["analysis_type"] == "alignment" or analysis["analysis_type"] == "rna_seq_quantification") and \
((hit["_source"]["flags"]["normal_alignment_qc_report"] == False and \
re.match("^Normal - ", specimen["submitter_specimen_type"]) and \
sample["sample_uuid"] in hit["_source"]["missing_items"]["normal_alignment_qc_report"]) or \
(hit["_source"]["flags"]["tumor_alignment_qc_report"] == False and \
re.match("^Primary tumour - |^Recurrent tumour - |^Metastatic tumour - |^Xenograft - |^Cell line - ", specimen["submitter_specimen_type"]) and \
sample["sample_uuid"] in hit["_source"]["missing_items"]["tumor_alignment_qc_report"])):
print "HIT!!!! "+analysis["analysis_type"]+" "+str(hit["_source"]["flags"]["normal_alignment_qc_report"])+" "+specimen["submitter_specimen_type"]
parent_uuids = []
parent_uuids.append(sample["sample_uuid"])
for file in analysis["workflow_outputs"]:
if file["file_type"] == "bam":
print " + will run report for %s file" % (file["file_path"])
if len(listOfJobs) < int(self.max_jobs):
listOfJobs.append(ConsonanceTaskV2(redwood_host=self.redwood_host, redwood_token=self.redwood_token, dockstore_tool_running_dockstore_tool=self.dockstore_tool_running_dockstore_tool, filename=file["file_path"], file_uuid = self.fileToUUID(file["file_path"], analysis["bundle_uuid"]), bundle_uuid = analysis["bundle_uuid"], parent_uuids = parent_uuids, tmp_dir=self.tmp_dir, image_descriptor=self.image_descriptor))
# these jobs are yielded to
return listOfJobs
def run(self):
# now make a final report
f = self.output().open('w')
# TODO: could print report on what was successful and what failed? Also, provide enough details like donor ID etc
print >>f, "batch is complete"
f.close()
def output(self):
# the final report
ts = time.time()
ts_str = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H:%M:%S')
return luigi.LocalTarget('%s/consonance-jobs/AlignmentQCCoordinator/AlignmentQCTask-%s.txt' % (self.tmp_dir, ts_str))
def fileToUUID(self, input, bundle_uuid):
return self.bundle_uuid_filename_to_file_uuid[bundle_uuid+"_"+input]
if __name__ == '__main__':
luigi.run()
|
nilq/baby-python
|
python
|
# Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ExecutorFactory."""
from unittest import mock
from tensorflow_federated.python.common_libs import test as common_test
from tensorflow_federated.python.core.impl import eager_executor
from tensorflow_federated.python.core.impl import executor_factory
from tensorflow_federated.python.core.impl.compiler import placement_literals
from tensorflow_federated.python.core.impl.executors import executor_base
class ExecutorFactoryImplTest(common_test.TestCase):
def test_subclass_base_fails_no_create_method(self):
class NotCallable(executor_factory.ExecutorFactory):
def clean_up_executors(self):
pass
with self.assertRaisesRegex(TypeError, 'instantiate abstract class'):
NotCallable()
def test_subclass_base_fails_no_cleanup(self):
class NoCleanup(executor_factory.ExecutorFactory):
def create_executor(self, x):
pass
with self.assertRaisesRegex(TypeError, 'instantiate abstract class'):
NoCleanup()
def test_instantiation_succeeds_both_methods_specified(self):
class Fine(executor_factory.ExecutorFactory):
def create_executor(self, x):
pass
def clean_up_executors(self):
pass
Fine()
def test_concrete_class_instantiates_stack_fn(self):
def _stack_fn(x):
del x # Unused
return eager_executor.EagerExecutor()
factory = executor_factory.ExecutorFactoryImpl(_stack_fn)
self.assertIsInstance(factory, executor_factory.ExecutorFactoryImpl)
def test_call_constructs_executor(self):
def _stack_fn(x):
del x # Unused
return eager_executor.EagerExecutor()
factory = executor_factory.ExecutorFactoryImpl(_stack_fn)
ex = factory.create_executor({})
self.assertIsInstance(ex, executor_base.Executor)
def test_cleanup_succeeds_without_init(self):
def _stack_fn(x):
del x # Unused
return eager_executor.EagerExecutor()
factory = executor_factory.ExecutorFactoryImpl(_stack_fn)
factory.clean_up_executors()
def test_cleanup_calls_close(self):
ex = eager_executor.EagerExecutor()
ex.close = mock.MagicMock()
def _stack_fn(x):
del x # Unused
return ex
factory = executor_factory.ExecutorFactoryImpl(_stack_fn)
factory.create_executor({})
factory.clean_up_executors()
ex.close.assert_called_once()
def test_construction_with_multiple_cardinalities_reuses_existing_stacks(
self):
ex = eager_executor.EagerExecutor()
ex.close = mock.MagicMock()
num_times_invoked = 0
def _stack_fn(x):
del x # Unused
nonlocal num_times_invoked
num_times_invoked += 1
return ex
factory = executor_factory.ExecutorFactoryImpl(_stack_fn)
for _ in range(2):
factory.create_executor({})
factory.create_executor({placement_literals.SERVER: 1})
self.assertEqual(num_times_invoked, 2)
if __name__ == '__main__':
common_test.main()
|
nilq/baby-python
|
python
|
import weakref
import numpy as np
import qmhub.helpmelib as pme
from .dobject import cache_update
class DependPME(pme.PMEInstanceD):
def __init__(self, cell_basis, alpha, order, nfft):
super().__init__()
self._name = "PME"
self._kwargs = {"alpha": alpha, "order": order, "nfft": nfft}
self._dependencies = [cell_basis]
self._dependants = []
self._cache_valid = False
def _func(self, cell_basis, alpha, order, nfft):
super().setup(
1,
np.asscalar(alpha),
order,
*nfft.tolist(),
1.,
1,
)
super().set_lattice_vectors(
*np.diag(cell_basis).tolist(),
*[90., 90., 90.],
self.LatticeType.XAligned,
)
@cache_update
def compute_recip_esp(self, positions, grid_positions, grid_charges):
recip_esp = np.zeros((len(positions.T), 4))
charges = np.ascontiguousarray(grid_charges)[:, np.newaxis]
coord1 = np.ascontiguousarray(grid_positions.T)
coord2 = np.ascontiguousarray(positions.T)
mat = pme.MatrixD
super().compute_P_rec(
0,
mat(charges),
mat(coord1),
mat(coord2),
1,
mat(recip_esp),
)
return np.ascontiguousarray(recip_esp.T)
def add_dependant(self, dependant):
self._dependants.append(weakref.ref(dependant))
def update_cache(self):
if not self._cache_valid:
self._func(*self._dependencies, **self._kwargs)
self._cache_valid = True
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
class NeuralColumn(nn.Module):
def __init__(self, channels: int, output_dim: int) -> None:
""" channels is the number of output convolution channels for
each convolution layer of the network, except the last one.
"""
super(NeuralColumn, self).__init__()
self._conv_net = nn.Sequential(
nn.Conv2d(3, channels, kernel_size=5),
nn.LeakyReLU(0.2),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(channels, channels, kernel_size=3),
nn.LeakyReLU(0.2),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(channels, output_dim, kernel_size=3, bias=False)
)
def output_dim(self) -> int:
return self._conv_net[-1].weight.size(0)
def channels(self) -> int:
return self._conv_net[0].weight.size(0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
conved = self._conv_net(x)
# global pooling on x and y axis
pooled = conved.max(dim=3)[0].max(dim=2)[0]
return pooled.view(x.size(0), self.output_dim())
class Ensemble(nn.Module):
def __init__(self, n_columns: int, column_dim: int, channels: int) -> None:
super(Ensemble, self).__init__()
self.columns = nn.ModuleList([
NeuralColumn(channels, column_dim) for _ in range(n_columns)
])
def num_columns(self) -> int:
return len(self.columns)
def channels(self) -> int:
return self.columns[0].channels()
def column_dim(self) -> int:
return self.columns[0].output_dim()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.cat([
column(x) for column in self.columns
], dim=1)
def load_model(path: str) -> nn.Module:
print("loading model")
parts = path.split("_")
columns, channels, col_dim = parts[-3:]
columns = int(columns)
channels = int(channels)
col_dim = int(col_dim)
net = Ensemble(n_columns=columns, column_dim=col_dim, channels=channels)
net.load_state_dict(torch.load(path))
net.eval()
for param in net.parameters():
param.requires_grad_(False)
print("model loaded")
return net
|
nilq/baby-python
|
python
|
"""D-Bus interface objects."""
from .systemd import Systemd
from .hostname import Hostname
from .rauc import Rauc
from ..coresys import CoreSysAttributes
class DBusManager(CoreSysAttributes):
"""A DBus Interface handler."""
def __init__(self, coresys):
"""Initialize D-Bus interface."""
self.coresys = coresys
self._systemd = Systemd()
self._hostname = Hostname()
self._rauc = Rauc()
@property
def systemd(self):
"""Return the systemd interface."""
return self._systemd
@property
def hostname(self):
"""Return the hostname interface."""
return self._hostname
@property
def rauc(self):
"""Return the rauc interface."""
return self._rauc
async def load(self):
"""Connect interfaces to D-Bus."""
await self.systemd.connect()
await self.hostname.connect()
await self.rauc.connect()
|
nilq/baby-python
|
python
|
from enforce_typing import enforce_types
from engine import AgentBase
from agents.PublisherAgent import PublisherAgent
from agents.SpeculatorAgent import StakerspeculatorAgent
from agents.DataconsumerAgent import DataconsumerAgent
@enforce_types
class DataecosystemAgent(AgentBase.AgentBaseNoEvm):
"""Will operate as a high-fidelity replacement for MarketplacesAgents,
when it's ready."""
def takeStep(self, state):
if self._doCreatePublisherAgent(state):
self._createPublisherAgent(state)
if self._doCreateStakerspeculatorAgent(state):
self._createStakerspeculatorAgent(state)
if self._doCreateDataconsumerAgent(state):
self._createDataconsumerAgent(state)
@staticmethod
def _doCreatePublisherAgent(state) -> bool:
# magic number: rule - only create if no agents so far
return not state.publisherAgents()
def _createPublisherAgent(self, state) -> None: # pylint: disable=no-self-use
name = "foo_publisher"
USD = 0.0 # magic number
OCEAN = 1000.0 # magic number
new_agent = PublisherAgent(name=name, USD=USD, OCEAN=OCEAN)
state.addAgent(new_agent)
@staticmethod
def _doCreateStakerspeculatorAgent(state) -> bool:
# magic number: rule - only create if no agents so far
return not state.stakerspeculatorAgents()
def _createStakerspeculatorAgent( # pylint: disable=no-self-use
self, state
) -> None:
name = "foo_stakerspeculator"
USD = 0.0 # magic number
OCEAN = 1000.0 # magic number
new_agent = StakerspeculatorAgent(name=name, USD=USD, OCEAN=OCEAN)
state.addAgent(new_agent)
@staticmethod
def _doCreateDataconsumerAgent(state) -> bool:
# magic number: rule - only create if no agents so far
return not state.dataconumerAgents()
def _createDataconsumerAgent(self, state) -> None: # pylint: disable=no-self-use
name = "foo_dataconsumer"
USD = 0.0 # magic number
OCEAN = 1000.0 # magic number
new_agent = DataconsumerAgent(name=name, USD=USD, OCEAN=OCEAN)
state.addAgent(new_agent)
|
nilq/baby-python
|
python
|
from django.core.management.commands.test import Command as TestCommand
from jsdir.core import JSDir
class Command(TestCommand):
def __init__(self):
JSDir.set_use_finders(True) # sets the value only for this thread
super(Command, self).__init__()
|
nilq/baby-python
|
python
|
import matplotlib as mil
import tensorflow as tf
from matplotlib import pyplot
fig = pyplot.gcf()
fig.set_size_inches(4, 4)
sess = tf.InteractiveSession()
image_filename = "/home/ubuntu/Downloads/n02107142_16917.jpg"
filename_queue = tf.train.string_input_producer([image_filename]) # list of files to read
reader = tf.WholeFileReader()
try:
image_reader = tf.WholeFileReader()
_, image_file = image_reader.read(filename_queue)
image = tf.image.decode_jpeg(image_file)
print(image)
except Exception as e:
print(e)
sess.run(tf.initialize_all_variables())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
image_batch = tf.image.convert_image_dtype(tf.expand_dims(image, 0), tf.float32, saturate=False)
# In[8]:
kernel = tf.constant([
[
[[ -1., 0., 0.], [ 0., -1., 0.], [ 0., 0., -1.]],
[[ -1., 0., 0.], [ 0., -1., 0.], [ 0., 0., -1.]],
[[ -1., 0., 0.], [ 0., -1., 0.], [ 0., 0., -1.]]
],
[
[[ -1., 0., 0.], [ 0., -1., 0.], [ 0., 0., -1.]],
[[ 8., 0., 0.], [ 0., 8., 0.], [ 0., 0., 8.]],
[[ -1., 0., 0.], [ 0., -1., 0.], [ 0., 0., -1.]]
],
[
[[ -1., 0., 0.], [ 0., -1., 0.], [ 0., 0., -1.]],
[[ -1., 0., 0.], [ 0., -1., 0.], [ 0., 0., -1.]],
[[ -1., 0., 0.], [ 0., -1., 0.], [ 0., 0., -1.]]
]
])
conv2d = tf.nn.conv2d(image_batch, kernel, [1, 1, 1, 1], padding="SAME")
activation_map = sess.run(tf.minimum(tf.nn.relu(conv2d), 255))
fig = pyplot.gcf()
pyplot.imshow(activation_map[0], interpolation='nearest')
fig.set_size_inches(4, 4)
fig.savefig("./example-edge-detection.png")
#pyplot.show()
|
nilq/baby-python
|
python
|
#
# Copyright (C) 2014-2015 UAVCAN Development Team <uavcan.org>
#
# This software is distributed under the terms of the MIT License.
#
# Author: Ben Dyer <ben_dyer@mac.com>
# Pavel Kirienko <pavel.kirienko@zubax.com>
#
from __future__ import division, absolute_import, print_function, unicode_literals
import sys
import time
import math
import copy
import struct
import functools
try:
import collections.abc # Python 3
MutableSequence = collections.abc.MutableSequence
except ImportError:
import collections # Python 2
MutableSequence = collections.MutableSequence
import uavcan
import uavcan.dsdl as dsdl
import uavcan.dsdl.common as common
try:
long # Python 2
except NameError:
long = int # Python 3
if sys.version_info[0] < 3:
bchr = chr
else:
def bchr(x):
return bytes([x])
def get_uavcan_data_type(obj):
# noinspection PyProtectedMember
return obj._type
def is_union(obj):
if not isinstance(obj, CompoundValue):
raise ValueError('Only CompoundValue can be union')
# noinspection PyProtectedMember
return obj._is_union
def get_active_union_field(obj):
if not is_union(obj):
raise ValueError('Object is not a union')
# noinspection PyProtectedMember
return obj._union_field
def switch_union_field(obj, value):
if not is_union(obj):
raise ValueError('Object is not a union')
# noinspection PyProtectedMember
obj._union_field = value
def get_fields(obj):
if not isinstance(obj, CompoundValue):
raise ValueError('Only CompoundValue can have fields')
# noinspection PyProtectedMember
return obj._fields
def get_constants(obj):
if not isinstance(obj, CompoundValue):
raise ValueError('Only CompoundValue can have constants')
# noinspection PyProtectedMember
return obj._constants
def is_request(obj):
# noinspection PyProtectedMember
return obj._mode == 'request'
def is_response(obj):
# noinspection PyProtectedMember
return obj._mode == 'response'
def bits_from_bytes(s):
return "".join(format(c, "08b") for c in s)
def bytes_from_bits(s):
#pad bytes if not a multiple of 8
if len(s) % 8 != 0:
s += '0' * (8 - len(s) % 8)
return bytearray(int(s[i:i + 8], 2) for i in range(0, len(s), 8))
def be_from_le_bits(s, bitlen):
if len(s) < bitlen:
raise ValueError("Not enough bits; need {0} but got {1}".format(bitlen, len(s)))
elif len(s) > bitlen:
s = s[0:bitlen]
return "".join([s[i:i + 8] for i in range(0, len(s), 8)][::-1])
def le_from_be_bits(s, bitlen):
if len(s) < bitlen:
raise ValueError("Not enough bits; need {0} but got {1}".format(bitlen, len(s)))
elif len(s) > bitlen:
s = s[len(s) - bitlen:]
return "".join([s[max(0, i - 8):i] for i in range(len(s), 0, -8)])
def format_bits(s):
return " ".join(s[i:i + 8] for i in range(0, len(s), 8))
def union_tag_bits_from_num_elements(num_elements):
return int(math.ceil(math.log(num_elements, 2)))
def array_len_bits_from_max_size(max_size):
return int(math.ceil(math.log(max_size+1, 2)))
def enum_mark_last(iterable, start=0):
"""
Returns a generator over iterable that tells whether the current item is the last one.
Usage:
>>> iterable = range(10)
>>> for index, is_last, item in enum_mark_last(iterable):
>>> print(index, item, end='\n' if is_last else ', ')
"""
it = iter(iterable)
count = start
try:
last = next(it)
except StopIteration:
return
for val in it:
yield count, False, last
last = val
count += 1
yield count, True, last
class Float32IntegerUnion(object):
"""
Yes we've got ourselves a tiny little union here:
union FloatIntegerUnion
{
std::uint32_t u;
float f;
};
This is madness.
"""
def __init__(self, integer=None, floating_point=None):
self._bytes = struct.pack("=L", 0)
if integer is not None:
assert floating_point is None
self.u = int(integer)
if floating_point is not None:
self.f = float(floating_point)
@property
def f(self):
return struct.unpack("=f", self._bytes)[0]
@f.setter
def f(self, value):
assert isinstance(value, float)
self._bytes = struct.pack("=f", value)
@property
def u(self):
return struct.unpack("=I", self._bytes)[0]
@u.setter
def u(self, value):
assert isinstance(value, (int, long))
self._bytes = struct.pack("=I", value)
def f16_from_f32(float32):
# Directly translated from libuavcan's implementation in C++
f32infty = Float32IntegerUnion(integer=255 << 23)
f16infty = Float32IntegerUnion(integer=31 << 23)
magic = Float32IntegerUnion(integer=15 << 23)
inval = Float32IntegerUnion(floating_point=float32)
sign_mask = 0x80000000
round_mask = ~0xFFF
sign = inval.u & sign_mask
inval.u ^= sign
if inval.u >= f32infty.u: # Inf or NaN (all exponent bits set)
out = 0x7FFF if inval.u > f32infty.u else 0x7C00
else:
inval.u &= round_mask
inval.f *= magic.f
inval.u -= round_mask
if inval.u > f16infty.u:
inval.u = f16infty.u # Clamp to signed infinity if overflowed
out = (inval.u >> 13) & 0xFFFF # Take the bits!
return out | (sign >> 16) & 0xFFFF
def f32_from_f16(float16):
# Directly translated from libuavcan's implementation in C++
magic = Float32IntegerUnion(integer=(254 - 15) << 23)
was_inf_nan = Float32IntegerUnion(integer=(127 + 16) << 23)
out = Float32IntegerUnion(integer=(float16 & 0x7FFF) << 13) # exponent/mantissa bits
out.f *= magic.f # exponent adjust
if out.f >= was_inf_nan.f: # make sure Inf/NaN survive
out.u |= 255 << 23
out.u |= (float16 & 0x8000) << 16 # sign bit
return out.f
def cast(value, dtype):
if dtype.cast_mode == dsdl.PrimitiveType.CAST_MODE_SATURATED:
if value > dtype.value_range[1]:
value = dtype.value_range[1]
elif value < dtype.value_range[0]:
value = dtype.value_range[0]
return value
elif dtype.cast_mode == dsdl.PrimitiveType.CAST_MODE_TRUNCATED and dtype.kind == dsdl.PrimitiveType.KIND_FLOAT:
if not math.isnan(value) and value > dtype.value_range[1]:
value = float("+inf")
elif not math.isnan(value) and value < dtype.value_range[0]:
value = float("-inf")
return value
elif dtype.cast_mode == dsdl.PrimitiveType.CAST_MODE_TRUNCATED:
return value & ((1 << dtype.bitlen) - 1)
else:
raise ValueError("Invalid cast_mode: " + repr(dtype))
class BaseValue(object):
# noinspection PyUnusedLocal
def __init__(self, _uavcan_type, *_args, **_kwargs):
self._type = _uavcan_type
self._bits = None
def _unpack(self, stream, tao):
if self._type.bitlen:
self._bits = be_from_le_bits(stream, self._type.bitlen)
return stream[self._type.bitlen:]
else:
return stream
def _pack(self, tao):
if self._bits:
return le_from_be_bits(self._bits, self._type.bitlen)
else:
return "0" * self._type.bitlen
class VoidValue(BaseValue):
def _unpack(self, stream, tao):
return stream[self._type.bitlen:]
def _pack(self, tao):
return "0" * self._type.bitlen
class PrimitiveValue(BaseValue):
def __init__(self, _uavcan_type, *args, **kwargs):
super(PrimitiveValue, self).__init__(_uavcan_type, *args, **kwargs)
# Default initialization
self.value = 0
def __repr__(self):
return repr(self.value)
@property
def value(self):
if not self._bits:
return None
int_value = int(self._bits, 2)
if self._type.kind == dsdl.PrimitiveType.KIND_BOOLEAN:
return bool(int_value)
elif self._type.kind == dsdl.PrimitiveType.KIND_UNSIGNED_INT:
return int_value
elif self._type.kind == dsdl.PrimitiveType.KIND_SIGNED_INT:
if int_value >= (1 << (self._type.bitlen - 1)):
int_value = -((1 << self._type.bitlen) - int_value)
return int_value
elif self._type.kind == dsdl.PrimitiveType.KIND_FLOAT:
if self._type.bitlen == 16:
return f32_from_f16(int_value)
elif self._type.bitlen == 32:
return struct.unpack("<f", struct.pack("<L", int_value))[0]
elif self._type.bitlen == 64:
return struct.unpack("<d", struct.pack("<Q", int_value))[0]
else:
raise ValueError('Bad float')
@value.setter
def value(self, new_value):
if new_value is None:
raise ValueError("Can't serialize a None value")
elif self._type.kind == dsdl.PrimitiveType.KIND_BOOLEAN:
self._bits = "1" if new_value else "0"
elif self._type.kind == dsdl.PrimitiveType.KIND_UNSIGNED_INT:
new_value = cast(new_value, self._type)
self._bits = format(new_value, "0" + str(self._type.bitlen) + "b")
elif self._type.kind == dsdl.PrimitiveType.KIND_SIGNED_INT:
new_value = cast(new_value, self._type)
if new_value < 0: # Computing two's complement for negatives
new_value += 2 ** self._type.bitlen
self._bits = format(new_value, "0" + str(self._type.bitlen) + "b")
elif self._type.kind == dsdl.PrimitiveType.KIND_FLOAT:
new_value = cast(new_value, self._type)
if self._type.bitlen == 16:
int_value = f16_from_f32(new_value)
elif self._type.bitlen == 32:
int_value = struct.unpack("<L", struct.pack("<f", new_value))[0]
elif self._type.bitlen == 64:
int_value = struct.unpack("<Q", struct.pack("<d", new_value))[0]
else:
raise ValueError('Bad float, no donut')
self._bits = format(int_value, "0" + str(self._type.bitlen) + "b")
# noinspection PyProtectedMember
class ArrayValue(BaseValue, MutableSequence):
def __init__(self, _uavcan_type, *args, **kwargs):
super(ArrayValue, self).__init__(_uavcan_type, *args, **kwargs)
if isinstance(self._type.value_type, dsdl.PrimitiveType):
self.__item_ctor = functools.partial(PrimitiveValue, self._type.value_type)
elif isinstance(self._type.value_type, dsdl.ArrayType):
self.__item_ctor = functools.partial(ArrayValue, self._type.value_type)
elif isinstance(self._type.value_type, dsdl.CompoundType):
self.__item_ctor = functools.partial(CompoundValue, self._type.value_type)
if self._type.mode == dsdl.ArrayType.MODE_STATIC:
self.__items = list(self.__item_ctor() for _ in range(self._type.max_size))
else:
self.__items = []
def __repr__(self):
return "ArrayValue(type={0!r}, items={1!r})".format(self._type, self.__items)
def __str__(self):
if self._type.is_string_like:
# noinspection PyBroadException
try:
return self.decode()
except Exception:
pass
return self.__repr__()
def __getitem__(self, idx):
if isinstance(self.__items[idx], PrimitiveValue):
return self.__items[idx].value if self.__items[idx]._bits else 0
else:
return self.__items[idx]
def __setitem__(self, idx, value):
if idx >= self._type.max_size:
raise IndexError("Index {0} too large (max size {1})".format(idx, self._type.max_size))
if isinstance(self._type.value_type, dsdl.PrimitiveType):
self.__items[idx].value = value
else:
self.__items[idx] = value
def __delitem__(self, idx):
del self.__items[idx]
def __len__(self):
return len(self.__items)
def __eq__(self, other):
if isinstance(other, str):
return self.decode() == other
else:
return list(self) == other
def clear(self):
try:
while True:
self.pop()
except IndexError:
pass
def new_item(self):
return self.__item_ctor()
def insert(self, idx, value):
if idx >= self._type.max_size:
raise IndexError("Index {0} too large (max size {1})".format(idx, self._type.max_size))
elif len(self) == self._type.max_size:
raise IndexError("Array already full (max size {0})".format(self._type.max_size))
if isinstance(self._type.value_type, dsdl.PrimitiveType):
new_item = self.__item_ctor()
new_item.value = value
self.__items.insert(idx, new_item)
else:
self.__items.insert(idx, value)
def _unpack(self, stream, tao):
if self._type.mode == dsdl.ArrayType.MODE_STATIC:
for _, last, i in enum_mark_last(range(self._type.max_size)):
stream = self.__items[i]._unpack(stream, tao and last)
elif tao and self._type.value_type.get_min_bitlen() >= 8:
del self[:]
while len(stream) >= 8:
new_item = self.__item_ctor()
stream = new_item._unpack(stream, False)
self.__items.append(new_item)
stream = ''
else:
del self[:]
count_width = array_len_bits_from_max_size(self._type.max_size)
count = int(be_from_le_bits(stream[0:count_width], count_width), 2)
stream = stream[count_width:]
for _, last, i in enum_mark_last(range(count)):
new_item = self.__item_ctor()
stream = new_item._unpack(stream, tao and last)
self.__items.append(new_item)
return stream
def _pack(self, tao):
self.__items = self.__items[:self._type.max_size] # Constrain max len
if self._type.mode == dsdl.ArrayType.MODE_STATIC:
while len(self) < self._type.max_size: # Constrain min len
self.__items.append(self.new_item())
return ''.join(i._pack(tao and last) for _, last, i in enum_mark_last(self.__items))
elif tao and self._type.value_type.get_min_bitlen() >= 8:
return ''.join(i._pack(False) for i in self.__items)
else:
count_width = array_len_bits_from_max_size(self._type.max_size)
count = le_from_be_bits(format(len(self), '0{0:1d}b'.format(count_width)), count_width)
return count + ''.join(i._pack(tao and last) for _, last, i in enum_mark_last(self.__items))
def from_bytes(self, value):
del self[:]
for byte in bytearray(value):
self.append(byte)
def to_bytes(self):
return bytes(bytearray(item.value for item in self.__items if item._bits))
def encode(self, value, errors='strict'):
if not self._type.is_string_like:
raise ValueError('encode() can be used only with string-like arrays')
del self[:]
value = bytearray(value, encoding="utf-8", errors=errors)
for byte in value:
self.append(byte)
def decode(self, encoding="utf-8"):
if not self._type.is_string_like:
raise ValueError('decode() can be used only with string-like arrays')
return bytearray(item.value for item in self.__items if item._bits).decode(encoding)
# noinspection PyProtectedMember
class CompoundValue(BaseValue):
def __init__(self, _uavcan_type, _mode=None, *args, **kwargs):
self.__dict__["_fields"] = collections.OrderedDict()
self.__dict__["_constants"] = {}
super(CompoundValue, self).__init__(_uavcan_type, *args, **kwargs)
if self._type.kind == dsdl.CompoundType.KIND_SERVICE:
if _mode == "request":
source_fields = self._type.request_fields
source_constants = self._type.request_constants
self._is_union = self._type.request_union
elif _mode == "response":
source_fields = self._type.response_fields
source_constants = self._type.response_constants
self._is_union = self._type.response_union
else:
raise ValueError("mode must be either 'request' or 'response' for service types")
else:
if _mode is not None:
raise ValueError("mode is not applicable for message types")
source_fields = self._type.fields
source_constants = self._type.constants
self._is_union = self._type.union
self._mode = _mode
self._union_field = None
for constant in source_constants:
self._constants[constant.name] = constant.value
for idx, field in enumerate(source_fields):
if isinstance(field.type, dsdl.VoidType):
self._fields["_void_{0}".format(idx)] = VoidValue(field.type)
elif isinstance(field.type, dsdl.PrimitiveType):
self._fields[field.name] = PrimitiveValue(field.type)
elif isinstance(field.type, dsdl.ArrayType):
self._fields[field.name] = ArrayValue(field.type)
elif isinstance(field.type, dsdl.CompoundType):
self._fields[field.name] = CompoundValue(field.type)
for name, value in kwargs.items():
if name.startswith('_'):
raise NameError('%r is not a valid field name' % name)
setattr(self, name, value)
def __repr__(self):
if self._is_union:
field = self._union_field or list(self._fields.keys())[0]
fields = "{0}={1!r}".format(field, self._fields[field])
else:
fields = ", ".join("{0}={1!r}".format(f, v) for f, v in self._fields.items() if not f.startswith("_void_"))
return "{0}({1})".format(self._type.full_name, fields)
def __copy__(self):
# http://stackoverflow.com/a/15774013/1007777
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
# http://stackoverflow.com/a/15774013/1007777
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
# noinspection PyArgumentList
result.__dict__[k] = copy.deepcopy(v, memo)
return result
def __getattr__(self, attr):
if attr in self._constants:
return self._constants[attr]
elif attr in self._fields:
if self._is_union:
if self._union_field and self._union_field != attr:
raise AttributeError(attr)
else:
self._union_field = attr
if isinstance(self._fields[attr], PrimitiveValue):
return self._fields[attr].value
else:
return self._fields[attr]
else:
raise AttributeError(attr)
def __setattr__(self, attr, value):
if attr in self._constants:
raise AttributeError(attr + " is read-only")
elif attr in self._fields:
if self._is_union:
if self._union_field and self._union_field != attr:
raise AttributeError(attr)
else:
self._union_field = attr
# noinspection PyProtectedMember
attr_type = self._fields[attr]._type
if isinstance(attr_type, dsdl.PrimitiveType):
self._fields[attr].value = value
elif isinstance(attr_type, dsdl.CompoundType):
if not isinstance(value, CompoundValue):
raise AttributeError('Invalid type of the value, expected CompoundValue, got %r' % type(value))
if attr_type.full_name != get_uavcan_data_type(value).full_name:
raise AttributeError('Incompatible type of the value, expected %r, got %r' %
(attr_type.full_name, get_uavcan_data_type(value).full_name))
self._fields[attr] = copy.copy(value)
elif isinstance(attr_type, dsdl.ArrayType):
self._fields[attr].clear()
try:
if isinstance(value, str):
self._fields[attr].encode(value)
else:
for item in value:
self._fields[attr].append(item)
except Exception as ex:
# We should be using 'raise from' here, but unfortunately we have to be compatible with 2.7
raise AttributeError('Array field could not be constructed from the provided value', ex)
else:
raise AttributeError(attr + " cannot be set directly")
else:
super(CompoundValue, self).__setattr__(attr, value)
def _unpack(self, stream, tao=True):
if self._is_union:
tag_len = union_tag_bits_from_num_elements(len(self._fields))
self._union_field = list(self._fields.keys())[int(stream[0:tag_len], 2)]
stream = self._fields[self._union_field]._unpack(stream[tag_len:], tao)
else:
for _, last, field in enum_mark_last(self._fields.values()):
stream = field._unpack(stream, tao and last)
return stream
def _pack(self, tao=True):
if self._is_union:
keys = list(self._fields.keys())
field = self._union_field or keys[0]
tag = keys.index(field)
tag_len = union_tag_bits_from_num_elements(len(self._fields))
return format(tag, '0' + str(tag_len) + 'b') + self._fields[field]._pack(tao)
else:
return ''.join(field._pack(tao and last) for _, last, field in enum_mark_last(self._fields.values()))
class Frame(object):
def __init__(self, message_id, data, ts_monotonic=None, ts_real=None): # @ReservedAssignment
self.message_id = message_id
self.bytes = bytearray(data)
self.ts_monotonic = ts_monotonic
self.ts_real = ts_real
@property
def transfer_key(self):
# The transfer is uniquely identified by the message ID and the 5-bit
# Transfer ID contained in the last byte of the frame payload.
return self.message_id, (self.bytes[-1] & 0x1F) if self.bytes else None
@property
def toggle(self):
return bool(self.bytes[-1] & 0x20) if self.bytes else False
@property
def end_of_transfer(self):
return bool(self.bytes[-1] & 0x40) if self.bytes else False
@property
def start_of_transfer(self):
return bool(self.bytes[-1] & 0x80) if self.bytes else False
class TransferError(uavcan.UAVCANException):
pass
class Transfer(object):
DEFAULT_TRANSFER_PRIORITY = 31
def __init__(self,
transfer_id=0,
source_node_id=0,
dest_node_id=None,
payload=None,
transfer_priority=None,
request_not_response=False,
service_not_message=False,
discriminator=None):
self.transfer_priority = transfer_priority if transfer_priority is not None else self.DEFAULT_TRANSFER_PRIORITY
self.transfer_id = transfer_id
self.source_node_id = source_node_id
self.dest_node_id = dest_node_id
self.data_type_signature = 0
self.request_not_response = request_not_response
self.service_not_message = service_not_message
self.discriminator = discriminator
self.ts_monotonic = None
self.ts_real = None
if payload:
# noinspection PyProtectedMember
payload_bits = payload._pack()
if len(payload_bits) & 7:
payload_bits += "0" * (8 - (len(payload_bits) & 7))
self.payload = bytes_from_bits(payload_bits)
self.data_type_id = get_uavcan_data_type(payload).default_dtid
self.data_type_signature = get_uavcan_data_type(payload).get_data_type_signature()
self.data_type_crc = get_uavcan_data_type(payload).base_crc
else:
self.payload = None
self.data_type_id = None
self.data_type_signature = None
self.data_type_crc = None
self.is_complete = True if self.payload else False
def __repr__(self):
return "Transfer(id={0}, source_node_id={1}, dest_node_id={2}, transfer_priority={3}, payload={4!r})"\
.format(self.transfer_id, self.source_node_id, self.dest_node_id, self.transfer_priority, self.payload)
@property
def message_id(self):
# Common fields
id_ = (((self.transfer_priority & 0x1F) << 24) |
(int(self.service_not_message) << 7) |
(self.source_node_id or 0))
if self.service_not_message:
assert 0 <= self.data_type_id <= 0xFF
assert 1 <= self.dest_node_id <= 0x7F
# Service frame format
id_ |= self.data_type_id << 16
id_ |= int(self.request_not_response) << 15
id_ |= self.dest_node_id << 8
elif self.source_node_id == 0:
assert self.dest_node_id is None
assert self.discriminator is not None
# Anonymous message frame format
id_ |= self.discriminator << 10
id_ |= (self.data_type_id & 0x3) << 8
else:
assert 0 <= self.data_type_id <= 0xFFFF
# Message frame format
id_ |= self.data_type_id << 8
return id_
@message_id.setter
def message_id(self, value):
self.transfer_priority = (value >> 24) & 0x1F
self.service_not_message = bool(value & 0x80)
self.source_node_id = value & 0x7F
if self.service_not_message:
self.data_type_id = (value >> 16) & 0xFF
self.request_not_response = bool(value & 0x8000)
self.dest_node_id = (value >> 8) & 0x7F
elif self.source_node_id == 0:
self.discriminator = (value >> 10) & 0x3FFF
self.data_type_id = (value >> 8) & 0x3
else:
self.data_type_id = (value >> 8) & 0xFFFF
def to_frames(self):
out_frames = []
remaining_payload = self.payload
# Prepend the transfer CRC to the payload if the transfer requires
# multiple frames
if len(remaining_payload) > 7:
crc = common.crc16_from_bytes(self.payload,
initial=self.data_type_crc)
remaining_payload = bytearray([crc & 0xFF, crc >> 8]) + remaining_payload
# Generate the frame sequence
tail = 0x20 # set toggle bit high so the first frame is emitted with it cleared
while True:
# Tail byte contains start-of-transfer, end-of-transfer, toggle, and Transfer ID
tail = ((0x80 if len(out_frames) == 0 else 0) |
(0x40 if len(remaining_payload) <= 7 else 0) |
((tail ^ 0x20) & 0x20) |
(self.transfer_id & 0x1F))
out_frames.append(Frame(message_id=self.message_id, data=remaining_payload[0:7] + bchr(tail)))
remaining_payload = remaining_payload[7:]
if not remaining_payload:
break
return out_frames
def from_frames(self, frames):
# Initialize transfer timestamps from the first frame
self.ts_monotonic = frames[0].ts_monotonic
self.ts_real = frames[0].ts_real
# Validate the flags in the tail byte
expected_toggle = 0
expected_transfer_id = frames[0].bytes[-1] & 0x1F
for idx, f in enumerate(frames):
tail = f.bytes[-1]
if (tail & 0x1F) != expected_transfer_id:
raise TransferError("Transfer ID {0} incorrect, expected {1}".format(tail & 0x1F, expected_transfer_id))
elif idx == 0 and not (tail & 0x80):
raise TransferError("Start of transmission not set on frame 0")
elif idx > 0 and tail & 0x80:
raise TransferError("Start of transmission set unexpectedly on frame {0}".format(idx))
elif idx == len(frames) - 1 and not (tail & 0x40):
raise TransferError("End of transmission not set on last frame")
elif idx < len(frames) - 1 and (tail & 0x40):
raise TransferError("End of transmission set unexpectedly on frame {0}".format(idx))
elif (tail & 0x20) != expected_toggle:
raise TransferError("Toggle bit value {0} incorrect on frame {1}".format(tail & 0x20, idx))
expected_toggle ^= 0x20
self.transfer_id = expected_transfer_id
self.message_id = frames[0].message_id
payload_bytes = bytearray(b''.join(bytes(f.bytes[0:-1]) for f in frames))
# Find the data type
if self.service_not_message:
kind = dsdl.CompoundType.KIND_SERVICE
else:
kind = dsdl.CompoundType.KIND_MESSAGE
datatype = uavcan.DATATYPES.get((self.data_type_id, kind))
if not datatype:
raise TransferError("Unrecognised {0} type ID {1}"
.format("service" if self.service_not_message else "message", self.data_type_id))
# For a multi-frame transfer, validate the CRC and frame indexes
if len(frames) > 1:
transfer_crc = payload_bytes[0] + (payload_bytes[1] << 8)
payload_bytes = payload_bytes[2:]
crc = common.crc16_from_bytes(payload_bytes, initial=datatype.base_crc)
if crc != transfer_crc:
raise TransferError("CRC mismatch: expected {0:x}, got {1:x} for payload {2!r} (DTID {3:d})"
.format(crc, transfer_crc, payload_bytes, self.data_type_id))
self.data_type_id = datatype.default_dtid
self.data_type_signature = datatype.get_data_type_signature()
self.data_type_crc = datatype.base_crc
if self.service_not_message:
self.payload = datatype(_mode="request" if self.request_not_response else "response")
else:
self.payload = datatype()
# noinspection PyProtectedMember
self.payload._unpack(bits_from_bytes(payload_bytes))
@property
def key(self):
return self.message_id, self.transfer_id
def is_response_to(self, transfer):
if (transfer.service_not_message and self.service_not_message and
transfer.request_not_response and
not self.request_not_response and
transfer.dest_node_id == self.source_node_id and
transfer.source_node_id == self.dest_node_id and
transfer.data_type_id == self.data_type_id and
transfer.transfer_id == self.transfer_id):
return True
else:
return False
class TransferManager(object):
def __init__(self):
self.active_transfers = {}
self.active_transfer_timestamps = {}
def receive_frame(self, frame):
result = None
key = frame.transfer_key
if key in self.active_transfers or frame.start_of_transfer:
# If the first frame was received, restart this transfer from scratch
if frame.start_of_transfer:
self.active_transfers[key] = []
self.active_transfers[key].append(frame)
self.active_transfer_timestamps[key] = time.monotonic()
# If the last frame of a transfer was received, return its frames
if frame.end_of_transfer:
result = self.active_transfers[key]
del self.active_transfers[key]
del self.active_transfer_timestamps[key]
return result
def remove_inactive_transfers(self, timeout=1.0):
t = time.monotonic()
transfer_keys = self.active_transfers.keys()
for key in transfer_keys:
if t - self.active_transfer_timestamps[key] > timeout:
del self.active_transfers[key]
del self.active_transfer_timestamps[key]
|
nilq/baby-python
|
python
|
from .binarytree import BinaryTree
class LinkedBinaryTree(BinaryTree):
""" Linked reperesentation of a binary tree structure. """
class _Node: # Ligtweight non-public class for storing a node
__slots__ = '_element', '_parent', '_left', '_right'
def __init__(self, element, parent=None, left=None, right=None):
self._element = element
self._parent = parent
self._left = left
self._right = right
class Position(BinaryTree.Position):
""" A concerete class representing the location of a single element. """
def __init__(self, container, node):
""" Constructor should not be invoked by user. """
self._container = container
self._node = node
def element(self):
""" Return the element stored at this position. """
return self._node._element
def __eq__(self, other):
""" Return True if other is a Position representing the same location."""
return type(other) is type(self) and other._nodde is self._node
def _validate(self, p):
"""" Returns associated node if Position p is valid. """
if not isinstance(p, self.Position):
raise TypeError('p must be a proper Position type')
if p._container is not self:
raise ValueError("p does not belong to this container")
if p._node._parent is p._node: # convention for deprecated nodes
raise ValueError("p is no longer valid")
return p._node
def make_postion(self, node):
""" Return Position instance for given Node (or None if no node). """
return self.Position(self, node) if node is not None else None
# --------------------- binary tree constructor ---------------------------------
def __init__(self):
""" Create an initial empty binary tree. """
self._root = None
self._size = 0
# ------------------------- public accessors --------------------------------------
def __len__(self):
""" Return the number of elements in the tree."""
return self._size
def root(self):
""" Return the root Position of the tree (or None if tree is empty). """
return self._root
def parent(self, p):
""" Return the Position p's parent. """
node = self._validate(p)
return self.make_postion(node)
def left(self, p):
""" Return the Position p's left child (or None if no left child). """
node = self._validate(p)
return self.make_postion(node._left)
def right(self, p):
""" Return the Position p's right child (or None if no right child). """
node = self._validate(p)
return self.make_postion(node._right)
def num_children(self, p):
"""" Return the number of children of Position p."""
node = self._validate(p)
count = 0
if node._left is not None: # left child exists:
count += 1
if node._right is not None: # right child exists:
count += 1
return count
def add_root(self, e):
""" Place element e at the root of an empty tree and return new Position.
Raise ValueError if tree is nonEmpty.
"""
|
nilq/baby-python
|
python
|
import logging
from helper import is_welsh
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class CoursesBySubject:
def __init__(self, mapper, language):
self.mapper = mapper
self.language = language
def group(self, courses, limit, offset):
logging.debug('group')
single_course_accordions = {}
multiple_course_accordions = {}
institutions = []
self.add_courses_to_accordions(
courses,
single_course_accordions,
multiple_course_accordions,
institutions,
)
# single courses
single_course_accordions = self.sort_by_count(single_course_accordions)
most_common_subject_code = get_first_accordion_subject_code(single_course_accordions)
most_common_subject_label = self.mapper.get_label(most_common_subject_code)
combine_most_common_subjects(self.mapper, most_common_subject_code, most_common_subject_label, single_course_accordions)
self.group_single_courses_that_are_less_than_one_percent(
courses,
single_course_accordions,
)
self.replace_codes_with_labels(most_common_subject_label, single_course_accordions)
single_course_accordions = self.sort_by_count(single_course_accordions)
# multiple courses
self.group_multiple_courses_that_are_less_than_one_percent(
courses,
multiple_course_accordions,
most_common_subject_code,
)
self.replace_codes_with_labels(most_common_subject_label, multiple_course_accordions)
multiple_course_accordions = sort_alphabetically(multiple_course_accordions)
self.sort_other_combinations(most_common_subject_label, multiple_course_accordions)
self.sort_contents(single_course_accordions)
self.sort_contents(multiple_course_accordions)
add_number_of_courses(single_course_accordions)
add_number_of_courses(multiple_course_accordions)
log_accordions(single_course_accordions, courses)
log_accordions(multiple_course_accordions, courses)
return {
"items": {
"single_subject_courses": single_course_accordions,
"multiple_subject_courses": multiple_course_accordions,
},
"limit": limit,
"number_of_items": len(single_course_accordions) + len(multiple_course_accordions),
"offset": offset,
"total_number_of_courses": len(courses),
"total_results": len(institutions),
}
def add_courses_to_accordions(self, courses, single_course_accordions, multiple_course_accordions, institutions):
logging.debug('add_courses_to_accordions')
single_courses = {}
multiple_courses = {}
for c in courses:
institution = c[key_course][key_institution]
if institution[key_pub_ukprn_name] == "not available":
continue
add_institution_to_list(institution, institutions)
course = build_course(c[key_course], institution, self.language)
sort_results_into_groups(
course,
single_courses,
multiple_courses,
)
add_single_courses_to_accordions(
single_courses,
single_course_accordions,
)
add_multiple_courses_to_accordions(
multiple_courses,
multiple_course_accordions,
)
def sort_contents(self, accordion):
logging.debug('sort_contents')
self.sort_contents_alphabetically_by_subject(accordion)
self.sort_contents_alphabetically_by_institution(accordion)
def sort_contents_alphabetically_by_subject(self, accordion):
logging.debug('sort_contents_alphabetically_by_subject')
for key in list(accordion.keys()):
accordion[key][key_courses] = sorted(accordion[key][key_courses], key=lambda k: self.get_translation(k[key_title]))
def sort_contents_alphabetically_by_institution(self, accordion):
logging.debug('sort_contents_alphabetically_by_institution')
for key in list(accordion.keys()):
courses = {}
for course in accordion[key][key_courses]:
title = self.get_translation(course[key_title])
group_courses(key, course, title, courses)
accordion[key][key_courses] = []
for k, v in courses.items():
for k2, v2 in v.items():
v2 = sorted(v2, key=lambda k3: k3[key_institution][key_pub_ukprn_name])
accordion[key][key_courses].extend(v2)
def get_translation(self, json):
logging.debug(f'get_translation({self.language})')
language_name = 'welsh' if is_welsh(self.language) else 'english'
if not json[language_name]:
logging.warning(f'missing translation: {json}')
return json['english']
return json[language_name]
def replace_codes_with_labels(self, most_common_subject_label, accordions):
logging.debug('replace_codes_with_labels')
key_other_combinations_with = get_key_other_combinations_with(self.language)
for codes in list(accordions):
if codes.startswith(key_other_combinations_with):
accordions[f'{key_other_combinations_with} {most_common_subject_label}'] = accordions.pop(codes)
continue
labels = []
for code in codes.split():
if code.startswith('CAH'):
labels.append(self.mapper.get_label(code))
label = wrap_with_course(labels, self.language)
if codes.startswith('CAH'):
accordions[label] = accordions.pop(codes)
def group_multiple_courses_that_are_less_than_one_percent(self, courses, accordions, most_common_subject_code):
logging.debug('group_multiple_courses_that_are_less_than_one_percent')
key_other_combinations_with = get_key_other_combinations_with(self.language)
for key in list(accordions.keys()):
if most_common_subject_code == key:
continue
percentage = len(accordions[key][key_courses]) / len(courses) * 100
if percentage <= 1:
if most_common_subject_code in key.split():
label = f'{key_other_combinations_with} {most_common_subject_code}'
move_course(accordions, key, label)
else:
label = get_key_other_combinations(self.language)
move_course(accordions, key, label)
def sort_other_combinations(self, most_common_subject_label, accordions):
logging.debug('sort_other_combinations')
key_other_combinations_with = get_key_other_combinations_with(self.language)
key = f'{key_other_combinations_with} {most_common_subject_label}'
if accordions.get(key):
other_combinations_with = accordions[key]
accordions.pop(key)
accordions[key] = other_combinations_with
key_other_combinations = get_key_other_combinations(self.language)
if accordions.get(key_other_combinations):
other_combinations = accordions[key_other_combinations]
accordions.pop(key_other_combinations)
accordions[key_other_combinations] = other_combinations
def sort_by_count(self, accordion):
logging.debug('sort_by_count')
keys = accordion.keys()
sorted_keys = sorted(keys, key=lambda key: len(accordion[key][key_courses]), reverse=True)
[accordion[key] for key in sorted_keys]
sorted_accordion = {}
for key in sorted_keys:
sorted_accordion[key] = accordion[key]
key_courses_in_other_subjects = get_key_courses_in_other_subjects(self.language)
if key_courses_in_other_subjects in sorted_accordion:
sorted_accordion[key_courses_in_other_subjects] = sorted_accordion.pop(key_courses_in_other_subjects)
return sorted_accordion
def group_single_courses_that_are_less_than_one_percent(self, courses, accordions):
logging.debug('group_single_courses_that_are_less_than_one_percent')
key_courses_in_other_subjects = get_key_courses_in_other_subjects(self.language)
for key in list(accordions.keys()):
label = key_courses_in_other_subjects
if label == key:
continue
percentage = len(accordions[key][key_courses]) / len(courses) * 100
if percentage <= 1:
move_course(accordions, key, label)
def get_key_courses_in_other_subjects(language):
return key_courses_in_other_subjects[get_language_name(language)]
def get_key_other_combinations(language):
return key_other_combinations[get_language_name(language)]
def get_key_other_combinations_with(language):
return key_other_combinations_with[get_language_name(language)]
def get_language_name(language):
return 'welsh' if is_welsh(language) else 'english'
def wrap_with_course(labels, language):
return f'Cyrsiau {" & ".join(labels)}' if is_welsh(language) else f'{" & ".join(labels)} courses'
def build_course(course, institution, language):
logging.debug('build_course')
institution_body = {
key_pub_ukprn_name: institution[key_pub_ukprn_welsh_name] if is_welsh(language) else institution[key_pub_ukprn_name],
key_pub_ukprn: institution[key_pub_ukprn],
}
locations = []
for location in course[key_locations]:
locations.append(location[key_name])
return {
"country": course["country"]["label"],
"distance_learning": course["distance_learning"]["label"],
"foundation_year": course["foundation_year_availability"]["label"],
"honours_award": course["honours_award_provision"],
"kis_course_id": course[key_kis_course_id],
"length_of_course": course["length_of_course"]["label"],
"mode": course["mode"]["label"],
"qualification": course["qualification"]["label"],
"sandwich_year": course["sandwich_year"]["label"],
"subjects": course[key_subjects],
"title": course["title"],
"year_abroad": course["year_abroad"]["label"],
key_locations: locations,
key_institution: institution_body,
}
def add_institution_to_list(institution, institutions):
logging.debug('add_institution_to_list')
pub_ukprn = institution[key_pub_ukprn]
if pub_ukprn not in institutions:
institutions.append(pub_ukprn)
def sort_results_into_groups(course, single_courses, multiple_courses):
logging.debug('sort_results_into_groups')
if len(course[key_subjects]) == 1:
single_courses[course[key_kis_course_id]] = course
if len(course[key_subjects]) > 1:
multiple_courses[course[key_kis_course_id]] = course
def add_single_courses_to_accordions(courses, accordions):
logging.debug('add_single_courses_to_accordions')
for course in courses.values():
label = course[key_subjects][0][key_code]
add_course_to_accordions(course, label, accordions)
def add_course_to_accordions(course, label, accordions):
if label not in accordions:
accordions[label] = {}
accordions[label][key_courses] = []
if course not in accordions[label][key_courses]:
accordions[label][key_courses].append(course)
def add_multiple_courses_to_accordions(courses, accordions):
logging.debug('add_multiple_courses_to_accordions')
for course in courses.values():
subject_codes = []
for subject in course[key_subjects]:
subject_codes.append(subject[key_code])
label = f'{" ".join(subject_codes)}'
add_course_to_accordions(course, label, accordions)
def move_course(accordions, key, label):
if label not in accordions:
accordions[label] = {}
accordions[label][key_courses] = []
for c in accordions[key][key_courses]:
if c not in accordions[label][key_courses]:
accordions[label][key_courses].append(c)
accordions.pop(key)
def group_courses(key, course, title, accordions):
logging.debug('group_courses')
if not accordions.get(key):
accordions[key] = {}
if not accordions[key].get(title):
accordions[key][title] = []
accordions[key][title].append(course)
def sort_alphabetically(accordions):
logging.debug('sort_alphabetically')
return dict(sorted(accordions.items()))
def get_first_accordion_subject_code(accordions):
logging.debug('get_first_accordion_subject_code')
key = next(iter(accordions))
return accordions.get(key)[key_courses][0][key_subjects][0][key_code]
def combine_most_common_subjects(mapper, most_common_subject_code, most_common_subject_label, accordions):
for key in list(accordions.keys()):
if key != most_common_subject_code:
if mapper.get_label(key) == most_common_subject_label:
accordions[most_common_subject_code][key_courses].extend(accordions[key][key_courses])
accordions.pop(key)
break
def add_number_of_courses(accordions):
logging.debug('add_number_of_courses')
for key in accordions.keys():
accordions[key][key_number_of_courses] = len(accordions.get(key)[key_courses])
def log_accordions(accordions, courses):
logging.info('---------------------------------------')
for key in accordions.keys():
percentage = len(accordions[key][key_courses]) / len(courses) * 100
logging.info(f'{key}: {len(accordions[key][key_courses])} ({round(percentage,1)}%)')
key_code = 'code'
key_course = 'course'
key_courses = 'courses'
key_courses_in_other_subjects = {'english': 'Courses in other subjects', 'welsh': 'Cyrsiau mewn pynciau eraill'}
key_institution = 'institution'
key_institutions = 'institutions'
key_kis_course_id = 'kis_course_id'
key_locations = 'locations'
key_name = 'name'
key_number_of_courses = 'number_of_courses'
key_other_combinations = {'english': 'Other combinations', 'welsh': 'Cyfuniadau arall'}
key_other_combinations_with = {'english': 'Other combinations with', 'welsh': 'Cyfuniadau eraill gyda'}
key_pub_ukprn = 'pub_ukprn'
key_pub_ukprn_name = 'pub_ukprn_name'
key_pub_ukprn_welsh_name = 'pub_ukprn_welsh_name'
key_subjects = 'subjects'
key_title = 'title'
|
nilq/baby-python
|
python
|
import copy
import numpy
def convert_to_binary(list_of_digits):
list_of_str_digits = [str(digit) for digit in list_of_digits]
return int("".join(list_of_str_digits), 2)
with open("data/day3.txt") as f:
# The entire input forms a matrix of integers, parse it as such.
matrix = [[int(num) for num in list(line.rstrip())] for line in f.readlines()]
matrix_transpose = numpy.transpose(matrix)
# `bincount` gives the occurrences of each element in the array.
# `argmax` returns the element having the maximum occurrences.
# `transpose` transposes a given matrix.
max_occurrence = [numpy.bincount(row).argmax() for row in matrix_transpose]
# There has to be a better way to do this. :-)
max_occurrence_complement = [1 if digit == 0 else 0 for digit in max_occurrence]
gamma = convert_to_binary(max_occurrence)
epsilon = convert_to_binary(max_occurrence_complement)
print("Day 3 - a")
print(gamma * epsilon)
# =======================
oxy_matrix = copy.deepcopy(matrix)
for column_index in range(len(matrix_transpose)):
column = [row[column_index] for row in oxy_matrix]
bincount = numpy.bincount(column)
max_element = 1 if len(bincount) > 1 and bincount[1] >= bincount[0] else 0
oxy_matrix = filter(lambda x: (x[column_index] == max_element), oxy_matrix)
if len(oxy_matrix) == 1:
break
oxy = convert_to_binary(oxy_matrix[0])
co2_matrix = copy.deepcopy(matrix)
for column_index in range(len(matrix_transpose)):
column = [row[column_index] for row in co2_matrix]
bincount = numpy.bincount(column)
min_element = 0 if len(bincount) <= 1 or bincount[0] <= bincount[1] else 1
min_element = min_element if isinstance(min_element, int) else 0
co2_matrix = filter(lambda x: (x[column_index] == min_element), co2_matrix)
if len(co2_matrix) == 1:
break
co2 = convert_to_binary(co2_matrix[0])
print("Day 3 - b")
print(oxy * co2)
|
nilq/baby-python
|
python
|
# Copyright 2018 The ops Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Sequence
from cirq import ops, circuits, optimizers
from cirq.contrib.paulistring.pauli_string_optimize import (
pauli_string_optimized_circuit)
from cirq.contrib.paulistring.clifford_optimize import (
clifford_optimized_circuit)
def optimized_circuit(circuit: circuits.Circuit,
atol: float = 1e-8,
repeat: int = 10,
merge_interactions: bool = True
) -> circuits.Circuit:
circuit = circuits.Circuit(circuit) # Make a copy
for _ in range(repeat):
start_len = len(circuit)
start_cz_count = _cz_count(circuit)
if merge_interactions:
optimizers.MergeInteractions(allow_partial_czs=False,
post_clean_up=_optimized_ops,
).optimize_circuit(circuit)
circuit2 = pauli_string_optimized_circuit(
circuit,
move_cliffords=False,
atol=atol)
circuit3 = clifford_optimized_circuit(
circuit2,
atol=atol)
if (len(circuit3) == start_len
and _cz_count(circuit3) == start_cz_count):
return circuit3
circuit = circuit3
return circuit
def _optimized_ops(ops: Sequence[ops.Operation],
atol: float = 1e-8,
repeat: int = 10) -> ops.OP_TREE:
c = circuits.Circuit.from_ops(ops)
c_opt = optimized_circuit(c, atol, repeat, merge_interactions=False)
return c_opt.all_operations()
def _cz_count(circuit):
return sum(isinstance(op, ops.GateOperation)
and isinstance(op, ops.CZPowGate)
for op in circuit)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
KJ-65X9500G の輝度制限と階調特性の関係調査
=========================================
Description.
"""
# import standard libraries
import os
# import third-party libraries
import numpy as np
from colour import write_image
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
# import my libraries
import test_pattern_generator2 as tpg
import transfer_functions as tf
# information
__author__ = 'Toru Yoshihara'
__copyright__ = 'Copyright (C) 2019 - Toru Yoshihara'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Toru Yoshihara'
__email__ = 'toru.ver.11 at-sign gmail.com'
__all__ = []
FONT_PATH = "/usr/share/fonts/opentype/noto/NotoSansMonoCJKjp-Regular.otf"
def convert_from_pillow_to_numpy(img):
img = np.uint16(np.asarray(img)) * 2 ** (10 - 8)
return img
def merge_text(img, txt_img, pos):
"""
テキストを合成する作業の最後の部分。
pos は テキストの (st_pos_h, st_pos_v) 。
## 個人的実装メモ
今回はちゃんとアルファチャンネルを使った合成をしたかったが、
PILは8bit, それ以外は 10~16bit により BG_COLOR に差が出るので断念。
"""
st_pos_v = pos[1]
ed_pos_v = pos[1] + txt_img.shape[0]
st_pos_h = pos[0]
ed_pos_h = pos[0] + txt_img.shape[1]
# かなり汚い実装。0x00 で無いピクセルのインデックスを抽出し、
# そのピクセルのみを元の画像に上書きするという処理をしている。
text_index = txt_img > 0
temp_img = img[st_pos_v:ed_pos_v, st_pos_h:ed_pos_h]
temp_img[text_index] = txt_img[text_index]
img[st_pos_v:ed_pos_v, st_pos_h:ed_pos_h] = temp_img
def merge_each_spec_text(img, pos, font_size, text_img_size, text):
"""
各パーツの説明テキストを合成。
pos は テキストの (st_pos_h, st_pos_v) 。
text_img_size = (size_h, size_v)
## 個人的実装メモ
今回はちゃんとアルファチャンネルを使った合成をしたかったが、
PILは8bit, それ以外は 10~16bit により BG_COLOR に差が出るので断念。
"""
# テキストイメージ作成
text_width = text_img_size[0]
text_height = text_img_size[1]
fg_color = (0x00, 0x60, 0x60)
bg_coor = (0x00, 0x00, 0x00)
txt_img = Image.new("RGB", (text_width, text_height), bg_coor)
draw = ImageDraw.Draw(txt_img)
font = ImageFont.truetype(FONT_PATH, font_size)
draw.text((0, 0), text, font=font, fill=fg_color)
txt_img = convert_from_pillow_to_numpy(txt_img)
merge_text(img, txt_img, pos)
def research_recognizable_peak_luminance(target_luminance=1000):
"""
識別可能な最大輝度を調査するためのパッチを作成する。
"""
bg_width = 1920
bg_height = 1080
fg_width = int(bg_width * (0.1 ** 0.5) + 0.5)
fg_height = int(bg_height * (0.1 ** 0.5) + 0.5)
img = np.zeros((bg_height, bg_width, 3), dtype=np.uint16)
low_level = tf.oetf_from_luminance(target_luminance, tf.ST2084)
low_level = np.uint16(np.round(low_level * 1023))
print(low_level)
low_level = (low_level, low_level, low_level)
fg_img = tpg.make_tile_pattern(
width=fg_width, height=fg_height, h_tile_num=16, v_tile_num=9,
low_level=low_level, high_level=(1023, 1023, 1023))
tpg.merge(img, fg_img, pos=(0, 0))
merge_each_spec_text(
img, pos=(630, 5), font_size=30, text_img_size=(960, 100),
text="target luminance = {:d} cd/m2".format(target_luminance))
fname = "./img/{:05d}_peak_lumiance.tiff".format(target_luminance)
write_image(img / 0x3FF, fname, bit_depth='uint16')
def research_st2084_with_bg_luminance_change(
target_luminance=1600, bg_luminance=1000):
"""
識別可能な最大輝度を調査するためのパッチを作成する。
"""
bg_width = 1920
bg_height = 1080
fg_width = int(bg_width * (0.1 ** 0.5) + 0.5)
fg_height = int(bg_height * (0.1 ** 0.5) + 0.5)
bg_level = tf.oetf_from_luminance(bg_luminance, tf.ST2084)
bg_level = np.uint16(np.round(bg_level * 1023))
img = np.ones((bg_height, bg_width, 3), dtype=np.uint16) * bg_level
low_level = tf.oetf_from_luminance(target_luminance, tf.ST2084)
low_level = np.uint16(np.round(low_level * 1023))
low_level = (low_level, low_level, low_level)
fg_img = tpg.make_tile_pattern(
width=fg_width, height=fg_height, h_tile_num=16, v_tile_num=9,
low_level=low_level, high_level=(1023, 1023, 1023))
tpg.merge(img, fg_img, pos=(0, 0))
text_base = "target_luminance = {:d} cd/m2, bg_luminance = {:d} cd/m2"
merge_each_spec_text(
img, pos=(630, 5), font_size=30, text_img_size=(960, 100),
text=text_base.format(target_luminance, bg_luminance))
fname_base = "./img/target_{:05d}_bg_{:05d}_lumiance.tiff"
fname = fname_base.format(target_luminance, bg_luminance)
write_image(img / 0x3FF, fname, bit_depth='uint16')
def main_func():
# KJ-65X9500G の表示限界を調査
for idx in range(10):
luminance = 1000 + 100 * idx
research_recognizable_peak_luminance(luminance)
# BG Luminance を変化させた場合の挙動確認
bg_luminance_list = [0, 100, 300, 500, 800, 1000, 2000, 4000, 10000]
target_list = [500, 750, 1000, 1500, 2000]
for target in target_list:
for bg in bg_luminance_list:
research_st2084_with_bg_luminance_change(
target_luminance=target, bg_luminance=bg)
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
main_func()
|
nilq/baby-python
|
python
|
import pytest
from openpecha.utils import download_pecha
@pytest.mark.skip("Downloading github repo")
def test_download_pecha():
pecha_path = download_pecha("collections")
|
nilq/baby-python
|
python
|
# Generated by Django 2.2 on 2020-08-11 11:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Iniciativa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nro_correlativo', models.IntegerField()),
('eje_estrategico', models.CharField(max_length=100)),
('requerimiento', models.CharField(max_length=100)),
('fecha_solicitud', models.DateField()),
('fecha_solped', models.DateField()),
('nombre', models.CharField(max_length=75)),
('descripcion', models.TextField(max_length=240)),
('objetivo', models.TextField(max_length=300)),
('beneficio', models.TextField(max_length=500)),
('impacto', models.CharField(max_length=100)),
('inicio', models.DateField()),
('fin', models.DateField()),
('sponsor', models.CharField(max_length=100)),
('solicitante', models.CharField(max_length=100)),
('capa_red', models.CharField(max_length=50)),
('ubicacion', models.CharField(max_length=100)),
('tipo', models.CharField(max_length=50)),
],
),
]
|
nilq/baby-python
|
python
|
from dsame.trees.BinaryTreeNode import BinaryTreeNode
def inOrder(root: BinaryTreeNode):
if root:
inOrder(root.left)
print(root.data)
inOrder(root.right)
a = BinaryTreeNode(2)
b = BinaryTreeNode(3)
c = BinaryTreeNode(1, a, b)
print(inOrder(c))
|
nilq/baby-python
|
python
|
import os
from ast import literal_eval
from pathlib import Path
import numpy as np
def read_info(filename: os.PathLike) -> dict:
"""Read volume metadata.
Parameters
----------
filename : PathLike
Path to the file.
Returns
-------
dct : dict
Dictionary with the metadata.
"""
dct = {}
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if not line:
continue
if line.startswith('!'):
continue
key, val = line.split('=')
key = key.strip()
val = val.strip()
try:
val = literal_eval(val)
except ValueError:
pass
dct[key] = val
return dct
def load_vol(filename: os.PathLike,
dtype=np.float32,
mmap_mode: str = None,
shape: tuple = None) -> np.ndarray:
"""Load data from `.vol` file.
The image shape is deduced from the `.vol.info` file. If this file is
not present, the shape can be specified using the `shape` keyword.
Parameters
----------
filename : os.PathLike
Path to the file.
dtype : dtype, optional
Numpy dtype of the data.
mmap_mode : None, optional
If not None, open the file using memory mapping. For more info on
the modes, see: :func:`numpy.memmap`
shape : tuple, optional
Tuple of three ints specifying the shape of the data (order: z, y, x).
Returns
-------
result : numpy.ndarray
Data stored in the file.
"""
filename = Path(filename)
if not filename.exists():
raise IOError(f'No such file: {filename}')
try:
filename_info = filename.with_suffix(filename.suffix + '.info')
if not shape:
info = read_info(filename_info)
shape = info['NUM_Z'], info['NUM_Y'], info['NUM_X']
except FileNotFoundError:
raise ValueError(
f'Info file not found: {filename_info.name}, specify '
'the volume shape using the `shape` parameter.') from None
result: np.ndarray
if mmap_mode:
result = np.memmap(filename, dtype=dtype, shape=shape,
mode=mmap_mode) # type: ignore
else:
result = np.fromfile(filename, dtype=dtype)
result = result.reshape(shape)
return result
|
nilq/baby-python
|
python
|
import time
from options.train_options import TrainOptions
opt = TrainOptions().parse() # set CUDA_VISIBLE_DEVICES before import torch
import pickle
from data.custom_transforms import ToLabelTensor
# with open("opt.obj",'wb') as f:
# pickle.dump(opt,f)
from data.segmentation import SegmentationDataset
from models.models import create_model
from data.unaligned_data_loader import UnalignedDataLoader
import torch.utils.data
import torchvision.transforms as transforms
#from models.models import create_model
from util.visualizer import Visualizer
from pdb import set_trace as st
import numpy as np
import gc
import evaluation.metrics
labels = __import__('data.labels')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from data.custom_transforms import DownSizeLabelTensor
ds1= DownSizeLabelTensor(2*opt.factor)
size= ds1.findDecreasedResolution(opt.fineSize)/2
transform = transforms.Compose([
transforms.CenterCrop(opt.fineSize),
transforms.Scale(size),
transforms.ToTensor(),
])
target_transform = transforms.Compose([
transforms.CenterCrop(opt.fineSize),transforms.ToTensor(),ToLabelTensor(labels.labels.labels)
])
target_transform2 = transforms.Compose([
transforms.CenterCrop(opt.fineSize),transforms.ToTensor(),ToLabelTensor(labels.labels.labels)
])
opt.continue_train=True
domainAdata= SegmentationDataset(root=opt.dataroot + '/' + opt.domain_A , split_ratio=opt.split_ratio_A,
transform=transform, target_transform=target_transform, return_paths=True)
domainBdata= SegmentationDataset(root=opt.dataroot + '/' + opt.domain_B , split_ratio=opt.split_ratio_B,
transform=transform, target_transform=target_transform2, return_paths=True)
domainAdataloader = torch.utils.data.DataLoader(
domainAdata,
batch_size=opt.batchSize,
shuffle=not opt.serial_batches,
num_workers=int(opt.nThreads))
domainBdataloader = torch.utils.data.DataLoader(
domainBdata,
batch_size=opt.batchSize,
shuffle=not opt.serial_batches,
num_workers=int(opt.nThreads))
cycle_data_loader=UnalignedDataLoader()
cycle_data_loader.initialize(opt,transform,transform)
dataset = cycle_data_loader.load_data()
num_train = len(cycle_data_loader)
print('#training images = %d' % num_train)
print ('Finetune:'+str(opt.finetune))
print ('Split Ratio A:'+str(opt.split_ratio_A))
print ('Split Ratio B:'+str(opt.split_ratio_B))
print ('Split Ratio AB:'+str(opt.split_ratio_AB))
print ('Experiment Name:'+opt.name)
print ('Iterations'+str(opt.niter))
print ('Iterations Decay'+str(opt.niter_decay))
opt.switch=0
model = create_model(opt)
visualizer = Visualizer(opt)
print 'Pretraining Done!!'
print 'Starting Combined Training'
avgtimetaken=[]
total_steps=0
# for epoch in range(1,opt.niter + opt.niter_decay + 1): #
# epoch_start_time = time.time()
# domainBdata_iter = domainBdataloader.__iter__()
# iter=0
# print epoch
# for i in range(0,len(domainBdataloader)):
# s=time.time()
# batch_n= next(domainBdata_iter)
# data={}
# data['B_image'] = batch_n[0][0]
# data['B_label'] = ds1.downsize(ds1.downsize(batch_n[1][0]).data).data
# print i
# iter_start_time = time.time()
# total_steps += opt.batchSize
# epoch_iter = total_steps % num_train
# model.set_input(data,'BC')
# model.optimize_parameters()
# e=time.time()
# avgtimetaken.append(e-s)
# if total_steps % opt.display_freq == 0:
# visualizer.display_current_results(model.get_current_visuals(), epoch)
# if total_steps % opt.print_freq == 0:
# errors = model.get_current_errors()
# visualizer.print_current_errors(epoch, total_steps, errors, iter_start_time)
# if opt.display_id > 0:
# visualizer.plot_current_errors(epoch, total_steps, opt, errors)
# if total_steps % opt.save_latest_freq == 0:
# print('saving the latest model (epoch %d, total_steps %d)' %
# (epoch, total_steps))
# model.save('latest')
# if epoch % opt.save_epoch_freq == 0:
# print('saving the model at the end of epoch %d, iters %d' %
# (epoch, total_steps))
# model.save('latest')
# model.save(epoch)
# print('End of epoch %d / %d \t Time Taken: %d sec' %
# (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
# if epoch > opt.niter + opt.niter_decay*0.75:
# model.update_learning_rate()
# print 'Done'
print 'Training Target Domain to Source Domain Adversarially'
for epoch in range(1,opt.niter + opt.niter_decay + 1): #
epoch_start_time = time.time()
domainABdata_iter = dataset.__iter__()
iter=0
for i in range(0,num_train,opt.batchSize):
s=time.time()
batch_n= next(domainABdata_iter)
data={}
data['AB_image_1'] = batch_n['A']
data['AB_image_2'] = batch_n['B']
iter_start_time = time.time()
total_steps += opt.batchSize
epoch_iter = total_steps % num_train
model.set_input(data,'AB')
model.optimize_parameters()
e=time.time()
avgtimetaken.append(e-s)
if total_steps % opt.print_freq == 0:
errors = model.get_current_errors()
visualizer.print_current_errors(epoch, total_steps, errors, iter_start_time)
if total_steps % opt.display_freq == 0:
visualizer.display_current_results(model.get_current_visuals(), epoch)
if total_steps % opt.save_latest_freq == 0:
print('saving the latest model (epoch %d, total_steps %d)' %
(epoch, total_steps))
model.save('latest')
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' %
(epoch, total_steps))
model.save('latest')
model.save(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
if opt.finetune>1:
print 'FineTuning'
for epoch in range(1,opt.niter + opt.niter_decay + 1): #
epoch_start_time = time.time()
domainAdata_iter = domainAdataloader.__iter__()
iter=0
for i in range(0,len(domainAdataloader),opt.batchSize):
s=time.time()
batch_n= next(domainAdata_iter)
data={}
data['A_image'] = batch_n[0][0]
data['A_label'] = ds1.downsize(ds1.downsize(batch_n[1][0]).data).data
iter_start_time = time.time()
total_steps += opt.batchSize
epoch_iter = total_steps % num_train
model.set_input(data,'AC')
model.optimize_parameters()
e=time.time()
avgtimetaken.append(e-s)
if total_steps % opt.display_freq == 0:
visualizer.display_current_results(model.get_current_visuals(), epoch)
if total_steps % opt.print_freq == 0:
errors = model.get_current_errors()
visualizer.print_current_errors(epoch, total_steps, errors, iter_start_time)
if opt.display_id > 0:
visualizer.plot_current_errors(epoch, total_steps, opt, errors)
if total_steps % opt.save_latest_freq == 0:
print('saving the latest model (epoch %d, total_steps %d)' %
(epoch, total_steps))
model.save('latest')
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' %
(epoch, total_steps))
model.save('latest')
model.save(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
if epoch > opt.niter + opt.niter_decay*0.75:
model.update_learning_rate()
#----------------Begin Testing Now!!---------
print 'Testing Now'
import time
from options.train_options import TrainOptions
opt = TrainOptions().parse()
#opt.dataroot='/home/sloke/repos/nips2017/left8bit/gtacityscapes/test'
opt.split_ratio_A=1
opt.split_ratio_B=1
# set CUDA_VISIBLE_DEVICES before import torch
import pickle
from data.custom_transforms import ToLabelTensor
# with open("opt.obj",'wb') as f:
# pickle.dump(opt,f)
from data.segmentation import SegmentationDataset
from models.models import create_model
from data.unaligned_data_loader import UnalignedDataLoader
import torch.utils.data
import torchvision.transforms as transforms
#from models.models import create_model
from util.visualizer import Visualizer
from pdb import set_trace as st
import numpy as np
import gc
import evaluation.metrics
labels = __import__('data.labels')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
opt.continue_train=True
from data.custom_transforms import DownSizeLabelTensor
ds1= DownSizeLabelTensor(opt.factor)
size= ds1.findDecreasedResolution(opt.fineSize)/2
transform = transforms.Compose([
transforms.CenterCrop(opt.fineSize),
transforms.Scale(size),
transforms.ToTensor(),
])
target_transform = transforms.Compose([
transforms.CenterCrop(opt.fineSize),transforms.ToTensor(),ToLabelTensor(labels.labels.labels)
])
target_transform2 = transforms.Compose([
transforms.CenterCrop(opt.fineSize),transforms.ToTensor(),ToLabelTensor(labels.labels.labels)
])
#mean_pixel_acc_test_epoch, mean_class_acc_test_epoch, mean_class_iou_test_epoch, per_class_acc_test_epoch, per_class_iou_test_epoch=[],[],[],[],[]
test_epoch_results=[]
mean_pixel_acc, mean_class_acc, mean_class_iou, per_class_acc, per_class_iou=0,0,0,np.zeros((opt.num_classes)),np.zeros((opt.num_classes))
avgcountAC=0
avgcountBC=0
total_steps=0
avgtimetaken=[]
model = create_model(opt)
visualizer = Visualizer(opt)
domainAdata_test= SegmentationDataset(root=opt.dataroot + '/' + opt.domain_A , split_ratio=opt.split_ratio_A,
transform=transform, target_transform=target_transform, return_paths=True)
domainBdata_test= SegmentationDataset(root=opt.dataroot + '/' + opt.domain_B , split_ratio=opt.split_ratio_B,
transform=transform, target_transform=target_transform2, return_paths=True)
print 'Dataset A Size:'+str(len(domainAdata_test))
print 'Dataset B Size:'+str(len(domainBdata_test))
domainAdataloader_test = torch.utils.data.DataLoader(
domainAdata_test,
batch_size=opt.batchSize,
shuffle=not opt.serial_batches,
num_workers=int(opt.nThreads))
domainBdataloader_test = torch.utils.data.DataLoader(
domainBdata_test,
batch_size=opt.batchSize,
shuffle=not opt.serial_batches,
num_workers=int(opt.nThreads))
domainAdata_iter_test = domainAdataloader_test.__iter__()
domainBdata_iter_test = domainBdataloader_test.__iter__()
mean_pixel_acc_test_A, mean_class_acc_test_A, mean_class_iou_test_A, per_class_acc_test_A, per_class_iou_test_A=0,0,0,np.zeros((opt.num_classes)),np.zeros((opt.num_classes))
for i in range(0,len(domainAdata_test)):
batch_n= next(domainAdata_iter_test)
data={}
data['A_image'] = batch_n[0][0]
data['A_label'] = ds1.downsize(ds1.downsize(batch_n[1][0]).data).data
model.set_input(data,'AC')
a,b,c,d,e=model.test()
mean_pixel_acc_test_A +=a
mean_class_acc_test_A +=b
mean_class_iou_test_A +=c
per_class_acc_test_A +=d
per_class_iou_test_A +=e
print 'Mean Pixel Accuracy (Domain A):'+str(a)
print 'Mean Class Accuracy (Domain A):'+str(b)
print 'Mean Class IoU (Domain A):'+str(c)
print 'Per Class Accuracy (Domain A):'+str(d)
print 'Per Class IoU (Domain A):'+str(e)
print 'Iteration:'+str(i)
print 'Model:'+opt.name
if total_steps % opt.display_freq == 0:
visualizer.display_current_results(model.get_current_visuals(), i)
mean_pixel_acc_test_A /= len(domainAdata_test)
cycle_data_loader=UnalignedDataLoader()
cycle_data_loader.initialize(opt,transform,transform)
|
nilq/baby-python
|
python
|
from ...attribute import models as attribute_models
from ...discount import models as discount_models
from ...product import models as product_models
from ...shipping import models as shipping_models
def resolve_translation(instance, _info, language_code):
"""Get translation object from instance based on language code."""
return instance.translations.filter(language_code=language_code).first()
def resolve_shipping_methods(info):
return shipping_models.ShippingMethod.objects.all()
def resolve_attribute_values(info):
return attribute_models.AttributeValue.objects.all()
def resolve_products(_info):
return product_models.Product.objects.all()
def resolve_product_variants(_info):
return product_models.ProductVariant.objects.all()
def resolve_sales(_info):
return discount_models.Sale.objects.all()
def resolve_vouchers(_info):
return discount_models.Voucher.objects.all()
def resolve_collections(_info):
return product_models.Collection.objects.all()
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.13 on 2021-04-01 08:17
from django.db import migrations, models
import ensembl.production.djcore.models
class Migration(migrations.Migration):
dependencies = [
('ensembl_dbcopy', '0005_targethostgroup'),
]
operations = [
migrations.AlterField(
model_name='requestjob',
name='convert_innodb',
field=models.BooleanField(default=False, verbose_name='Convert Innodb=>MyISAM'),
),
migrations.AlterField(
model_name='requestjob',
name='dry_run',
field=models.BooleanField(default=False, verbose_name='Dry Run'),
),
migrations.AlterField(
model_name='requestjob',
name='email_list',
field=models.TextField(blank=True, max_length=2048, null=True, verbose_name='Notify Email(s)'),
),
migrations.AlterField(
model_name='requestjob',
name='end_date',
field=models.DateTimeField(blank=True, editable=False, null=True, verbose_name='Ended on'),
),
migrations.AlterField(
model_name='requestjob',
name='request_date',
field=models.DateTimeField(auto_now_add=True, verbose_name='Submitted on'),
),
migrations.AlterField(
model_name='requestjob',
name='skip_optimize',
field=models.BooleanField(default=False, verbose_name='Optimize on target'),
),
migrations.AlterField(
model_name='requestjob',
name='src_host',
field=models.TextField(max_length=2048, verbose_name='Source Host'),
),
migrations.AlterField(
model_name='requestjob',
name='src_incl_db',
field=ensembl.production.djcore.models.NullTextField(blank=True, max_length=2048, null=True,
verbose_name='Included Db(s)'),
),
migrations.AlterField(
model_name='requestjob',
name='src_incl_tables',
field=ensembl.production.djcore.models.NullTextField(blank=True, max_length=2048, null=True,
verbose_name='Included Table(s)'),
),
migrations.AlterField(
model_name='requestjob',
name='src_skip_db',
field=ensembl.production.djcore.models.NullTextField(blank=True, max_length=2048, null=True,
verbose_name='Skipped Db(s)'),
),
migrations.AlterField(
model_name='requestjob',
name='src_skip_tables',
field=ensembl.production.djcore.models.NullTextField(blank=True, max_length=2048, null=True,
verbose_name='Skipped Table(s)'),
),
migrations.AlterField(
model_name='requestjob',
name='start_date',
field=models.DateTimeField(blank=True, editable=False, null=True, verbose_name='Started on'),
),
migrations.AlterField(
model_name='requestjob',
name='status',
field=models.CharField(blank=True, editable=False, max_length=20, null=True, verbose_name='Status'),
),
migrations.AlterField(
model_name='requestjob',
name='tgt_db_name',
field=ensembl.production.djcore.models.NullTextField(blank=True, max_length=2048, null=True,
verbose_name='Target DbName(s)'),
),
migrations.AlterField(
model_name='requestjob',
name='tgt_host',
field=models.TextField(max_length=2048, verbose_name='Target Host(s)'),
),
migrations.AlterField(
model_name='requestjob',
name='user',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name='Submitter'),
),
migrations.AlterField(
model_name='requestjob',
name='wipe_target',
field=models.BooleanField(default=False, verbose_name='Wipe target'),
),
migrations.AlterModelTable(
name='group',
table='host_group',
),
]
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
import logging
# App must be initialized before models or ADDONS_AVAILABLE are available
from website.app import init_app
init_app()
from osf.models import OSFUser, AbstractNode
from framework.database import paginated
from scripts.analytics.base import SnapshotAnalytics
from website.settings import ADDONS_AVAILABLE
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
# Modified from scripts/analytics/benchmarks.py
def get_enabled_authorized_linked(user_settings_list, has_external_account, short_name):
""" Gather the number of users who have at least one node in each of the stages for an addon
:param user_settings_list: list of user_settings for a particualr addon
:param has_external_account: where addon is derrived from, determines method to load node settings
:param short_name: short name of addon to get correct node_settings
:return: dict with number of users that have at least one project at each stage
"""
from addons.forward.models import NodeSettings as ForwardNodeSettings
num_enabled = 0 # of users w/ 1+ addon account connected
num_authorized = 0 # of users w/ 1+ addon account connected to 1+ node
num_linked = 0 # of users w/ 1+ addon account connected to 1+ node and configured
# osfstorage and wiki don't have user_settings, so always assume they're enabled, authorized, linked
if short_name == 'osfstorage' or short_name == 'wiki':
num_enabled = num_authorized = num_linked = OSFUser.objects.filter(
is_registered=True,
password__isnull=False,
merged_by__isnull=True,
date_disabled__isnull=True,
date_confirmed__isnull=False
).count()
elif short_name == 'forward':
num_enabled = num_authorized = ForwardNodeSettings.objects.count()
num_linked = ForwardNodeSettings.objects.filter(url__isnull=False).count()
else:
for user_settings in paginated(user_settings_list):
node_settings_list = []
if has_external_account:
if user_settings.has_auth:
num_enabled += 1
node_settings_list = [AbstractNode.load(guid).get_addon(short_name) for guid in user_settings.oauth_grants.keys()]
else:
num_enabled += 1
node_settings_list = [AbstractNode.load(guid).get_addon(short_name) for guid in user_settings.nodes_authorized]
if any([ns.has_auth for ns in node_settings_list if ns]):
num_authorized += 1
if any([(ns.complete and ns.configured) for ns in node_settings_list if ns]):
num_linked += 1
return {
'enabled': num_enabled,
'authorized': num_authorized,
'linked': num_linked
}
class AddonSnapshot(SnapshotAnalytics):
@property
def collection_name(self):
return 'addon_snapshot'
def get_events(self, date=None):
super(AddonSnapshot, self).get_events(date)
counts = []
addons_available = {k: v for k, v in [(addon.short_name, addon) for addon in ADDONS_AVAILABLE]}
for short_name, addon in addons_available.items():
has_external_account = hasattr(addon.models.get('nodesettings'), 'external_account')
connected_count = 0
deleted_count = 0
disconnected_count = 0
node_settings_model = addon.models.get('nodesettings')
if node_settings_model:
for node_settings in paginated(node_settings_model):
if node_settings.owner and not node_settings.owner.all_tags.filter(name='old_node_collection', system=True).exists():
connected_count += 1
deleted_count = addon.models['nodesettings'].objects.filter(deleted=True).count() if addon.models.get('nodesettings') else 0
if has_external_account:
disconnected_count = addon.models['nodesettings'].objects.filter(external_account__isnull=True, is_deleted=False).count() if addon.models.get('nodesettings') else 0
else:
if addon.models.get('nodesettings'):
for nsm in addon.models['nodesettings'].objects.filter(deleted=False):
if nsm.configured and not nsm.complete:
disconnected_count += 1
total = connected_count + deleted_count + disconnected_count
usage_counts = get_enabled_authorized_linked(addon.models.get('usersettings'), has_external_account, addon.short_name)
counts.append({
'provider': {
'name': short_name
},
'users': usage_counts,
'nodes': {
'total': total,
'connected': connected_count,
'deleted': deleted_count,
'disconnected': disconnected_count
}
})
logger.info(
'{} counted. Users with a linked node: {}, Total connected nodes: {}.'.format(
addon.short_name,
usage_counts['linked'],
total
)
)
return counts
def get_class():
return AddonSnapshot
if __name__ == '__main__':
addon_snapshot = AddonSnapshot()
events = addon_snapshot.get_events()
addon_snapshot.send_events(events)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#class dedicated to archival functions for dnmt data
import re
import sys
import subprocess,platform,os,time,datetime,zipfile
import difflib
import pickle
import collections
# import pysvn
# import git
#3rd party imports
#local subroutine import
from DNMT.procedure.subroutines import SubRoutines
class Archivist:
def __init__(self, cmdargs, config):
# initialize values
self.log_array = []
self.cmdargs = cmdargs
self.config = config
self.subs = SubRoutines(cmdargs, config)
self.config.logpath = os.path.join(os.path.expanduser(self.config.logpath), "logs", "UpgradeCheck",
datetime.date.today().strftime('%Y%m%d'))
def basic_maintenance(self,maxfiles):
#
self.subs.verbose_printer("##### Cleaning up backup files #####")
#Remove oldest files (listed first on windows
filelist = os.listdir(os.path.join(self.subs.log_path, "activitycheck", "backups"))
if len(filelist) > 0 and len(filelist) > maxfiles:
# self.subs.verbose_printer("##### unsorted list:{} #####".format(filelist))
sortedfilelist = sorted(filelist)
# self.subs.verbose_printer("##### sorted list:{} #####".format(testlist))
filestoremove = sortedfilelist[0:(len(filelist)-maxfiles)]
self.subs.custom_printer("verbose", "total files:{}\nremoving files:{}".format(len(filelist),len(filestoremove)))
for file in filestoremove:
if file.endswith("-SwitchStatus.Backup.zip"):
# process
try:
self.subs.verbose_printer("##### File to remove:{} #####".format(file))
if 'check' in self.cmdargs and self.cmdargs.check is True :
self.subs.custom_printer("debug", "## DBG - testing, would have removed {} ##".format(file))
else:
self.subs.custom_printer("debug", "## Removing file {} ##".format(file))
os.remove(os.path.join(self.subs.log_path, "activitycheck", "backups", file))
except Exception as err: # currently a catch all to stop linux from having a conniption when reloading
print("FILE ERROR {}:{}".format(file, err.args[0]))
else:
self.subs.verbose_printer("total files:{} are less than max value:{}".format(len(filelist), maxfiles))
def basic_archival(self):
try:
working_folder = os.path.join(self.subs.log_path, "activitycheck", "rawfiles", "legacy")
zipfile_name = os.path.join(self.subs.log_path, "activitycheck", "backups",
"{}-SwitchStatus.Backup.zip".format(
datetime.datetime.now().strftime("%Y%m%d%H%M")))
files = os.listdir(working_folder)
files_py = files
# zipfile_name = "SwitchStatus Backup {}.zip".format(datetime.datetime.now().strftime("%Y%m%d%H%M"))
#check for existance of the directory (if a first run)
if not os.path.exists(os.path.join(self.subs.log_path, "activitycheck", "backups")):
self.subs.custom_printer("debug", "## DBG - Creating activitycheck/backups directory ##")
os.makedirs(os.path.join(self.subs.log_path, "activitycheck", "backups"))
ZipFile = zipfile.ZipFile(zipfile_name, "a")
self.subs.custom_printer("debug", "## DBG - adding files to backup zipfile:{} ##".format(zipfile_name))
for a in files_py:
full_file_path = os.path.join(working_folder,a)
# ZipFile.write(full_file_path, compress_type=zipfile.ZIP_DEFLATED)
ZipFile.write(full_file_path,a, compress_type=zipfile.ZIP_DEFLATED)
ZipFile.close()
self.subs.custom_printer("debug", "## DBG - zipfile backup created ##")
if 'email' in self.cmdargs and self.cmdargs.email is not None:
msg_subject = "SwitchStatus Backup {}".format(datetime.date.today().strftime('%Y-%m-%d'))
body = "Attached is the Legacy Backup files"
self.subs.custom_printer("debug", "## DBG - sending email ##")
self.subs.email_with_attachment(msg_subject, self.cmdargs.email, body, zipfile_name)
if 'remove' in self.cmdargs and self.cmdargs.remove:
if os.path.exists("{}".format(zipfile_name)):
os.remove("{}".format(zipfile_name))
self.subs.custom_printer("debug", "## DBG - zipfile {} removed ##".format(zipfile_name))
else:
print("The file does not exist")
if 'maintenance' in self.cmdargs and self.cmdargs.maintenance is not None:
try:
self.basic_maintenance(int(self.cmdargs.maintenance))
except ValueError:
self.subs.custom_printer("debug", "## DBG - maintenance({}) is not a number. maintenance not performed ##".format(self.cmdargs.maintenance))
except Exception as err:
print(err)
def test(self):
try:
# write a file foo.txt
pass
# repo = Repo(self.rorepo.working_tree_dir)
# assert not repo.bare
except Exception as err:
print(err)
|
nilq/baby-python
|
python
|
# Django
from django.db import models
class UserAbstractModel(models.Model):
""" Modelo basico abstracto.
UserAbstractModel es una clase abstracta de la que heredan
todos los modelos de User de la API. Esta clase provee
los siguientes atributos:
+ created (DateTime): Almacena la fecha de creacion
+ modified (DateTime): Almacena la fecha de modificacion
+ is_active (Boolean): Si esta activo el valor o no
"""
created = models.DateTimeField(
"created at",
auto_now_add=True,
help_text="Date Time de la creacion del objeto"
)
modified = models.DateTimeField(
"modified at",
auto_now=True,
help_text="Date Time de la ultima modificacion del objeto"
)
is_active = models.BooleanField(
"is active",
default=True,
blank=True,
help_text="La fila esta activa o no"
)
class Meta:
""" Opciones del meta """
abstract = True
get_latest_by = "created"
ordering = ["-created", "modified"]
class FunctionAbstractModel(models.Model):
""" Modelo basico abstracto.
FunctionAbstractModel es una clase abstracta de la que heredan
todos los modelos de Management de la API. Esta clase provee
los siguientes atributos:
+ created (DateTime): Almacena la fecha de creacion
+ modified (DateTime): Almacena la fecha de modificacion
+ is_active (Boolean): Si esta activo el valor o no
+ user (Foreing Key): Almacena el usuario a quien pertenece
"""
created = models.DateTimeField(
"created at",
auto_now_add=True,
help_text="Date Time de la creacion del objeto"
)
modified = models.DateTimeField(
"modified at",
auto_now=True,
help_text="Date Time de la ultima modificacion del objeto"
)
is_active = models.BooleanField(
"is active",
default=True,
blank=True,
help_text="La fila esta activa o no"
)
user = models.ForeignKey("users.User", on_delete=models.CASCADE)
class Meta:
""" Opciones del meta """
abstract = True
get_latest_by = "created"
ordering = ["-created", "modified"]
|
nilq/baby-python
|
python
|
from time import perf_counter
def timer(func):
def wrapper(*args, **kwargs):
start_t = perf_counter()
r_val = func(*args, **kwargs)
end_t = perf_counter()
elapsed = end_t - start_t
print(f"{func.__name__} took time: {elapsed} seconds, {elapsed/60} minutes")
return r_val
return wrapper
|
nilq/baby-python
|
python
|
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import MetodPage
def metod(request):
metods = MetodPage.objects.all().order_by('title')
paginator = Paginator(metods, 10)
page = request.GET.get('page')
try:
metod = paginator.page(page)
except PageNotAnInteger:
metod = paginator.page(1)
except EmptyPage:
metod = paginator.page(paginator.num_pages)
return render(request, 'metodics/metod.html', {'page': page, 'metods': metod})
|
nilq/baby-python
|
python
|
import itertools
def subpaths_for_path_range(path_range, hardening_chars="'pH"):
"""
Return an iterator of paths
# examples:
# 0/1H/0-4 => ['0/1H/0', '0/1H/1', '0/1H/2', '0/1H/3', '0/1H/4']
# 0/2,5,9-11 => ['0/2', '0/5', '0/9', '0/10', '0/11']
# 3H/2/5/15-20p => ['3H/2/5/15p', '3H/2/5/16p', '3H/2/5/17p', '3H/2/5/18p',
# '3H/2/5/19p', '3H/2/5/20p']
# 5-6/7-8p,15/1-2 => ['5/7H/1', '5/7H/2', '5/8H/1', '5/8H/2',
# '5/15/1', '5/15/2', '6/7H/1', '6/7H/2', '6/8H/1', '6/8H/2', '6/15/1', '6/15/2']
"""
if path_range == '':
yield ''
return
def range_iterator(the_range):
for r in the_range.split(","):
is_hardened = r[-1] in hardening_chars
hardened_char = hardening_chars[-1] if is_hardened else ''
if is_hardened:
r = r[:-1]
if '-' in r:
low, high = [int(x) for x in r.split("-", 1)]
for t in range(low, high+1):
yield "%d%s" % (t, hardened_char)
else:
yield "%s%s" % (r, hardened_char)
components = path_range.split("/")
iterators = [range_iterator(c) for c in components]
for v in itertools.product(*iterators):
yield '/'.join(v)
"""
The MIT License (MIT)
Copyright (c) 2013 by Richard Kiss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""
Play Nerdle (https://nerdlegame.com)
"""
import json
import logging
from itertools import product
from os import mkdir
from os.path import dirname, join, realpath
from typing import Any, Iterator, List, Optional, Tuple
from .exceptions import CorrectAnswer, OutOfEquations, OutOfGuesses
class NerdleSolver:
"""
Encapsulates the logic of Nerdle (https://nerdlegame.com)
"""
# I like using short variable names in my methods. And I really
# don't care if I burn a few hundred microseconds interpolating a
# string I'm not going to emit.
# pylint: disable="invalid-name"
# pylint: disable="logging-not-lazy"
# pylint: disable="logging-fstring-interpolation"
# pylint: disable="too-many-instance-attributes"
def __init__(
self,
answer: str = "",
debug: bool = False,
expression_length: int = 8,
guesses: int = 6,
initial_guess: str = "",
top: int = 5,
expr_file: str = "",
): # pylint: disable = "too-many-arguments"
# pylint: disable = "too-many-statements"
self.debug = debug
self.log = logging.getLogger(__name__)
level = logging.INFO
if self.debug:
level = logging.DEBUG
logging.basicConfig(level=level)
self.log.setLevel(level)
self.valid_guess: str = ""
self.guess: str = initial_guess
self.answer: str = answer
self.expr_value: int = -1
self.expression_length: int = expression_length
self.current_pattern: str = ""
self.guesses: int = 1
self.max_guesses: int = guesses
self.top: int = top
self.known_character_positions: str = "X" * self.expression_length
self.position_could_be: List(set[str]) = []
self.legal_chars: str = "0123456789+-*/="
# pylint: disable="consider-using-set-comprehension"
# pylint: disable="unnecessary-comprehension"
for _ in range(self.expression_length):
self.position_could_be.append(set([c for c in self.legal_chars]))
self.not_in_expr: set[str] = set()
self.in_expr: set[str] = set()
self.guesses_tried: list[str] = []
# in self._expr_by_str, a value of None means the expression does not
# parse. This lets us cache failed parses as well.
self._expr_by_str: dict[str, Optional[int]] = {}
self._expr_by_val: dict[int, List[str]] = {}
# We only actually use them, though, if we have to build our own
# list of valid equations
self._valid_equations: dict[str, int] = {}
expr_loaded = False
if not expr_file:
datadir = realpath(join(dirname(__file__), "static"))
expr_file = f"{datadir}/equations-{self.expression_length}.json"
try:
with open(expr_file, "r", encoding="utf-8") as f:
_exprs = json.load(f)
self._valid_equations = _exprs
expr_loaded = True
except Exception as exc: # pylint: disable="broad-except"
self.log.debug(f"Failed to read expr file {expr_file}: {exc}")
self.log.debug("Calculating legal expressions")
self.generate_legal_expressions()
if not expr_loaded:
try:
self.log.debug("Writing expression cache")
try:
mkdir(datadir)
except FileExistsError:
pass
with open(expr_file, "w", encoding="utf-8") as f:
json.dump(self._valid_equations, f)
except Exception as exc: # pylint: disable="broad-except"
self.log.debug(f"Failed to write expr file {expr_file}: {exc}")
self.remaining_possibilities: list[str] = list(
self._valid_equations.keys()
)
self.remaining_possibilities.sort() # Initial lexicographic sort
self._rp_hash = {}
for rp in self.remaining_possibilities:
# looking up a hash entry is constant-time. Looking up in a
# list is linear. Validating our equations relies on that
# lookup. This actually makes an obvious difference when iterating
# through all equations.
self._rp_hash[rp] = True
self.sort_remainder()
if not self.guess:
self.log.debug(
"Best initial guesses:"
+ f"{self.remaining_possibilities[:self.top]}"
)
def play(self) -> None:
"""
Main loop of the game. If we get the right answer, it will raise
CorrectAnswer; if we run out of guesses, OutOfGuesses. We catch
the first, report, and return. We let the second propagate.
"""
while True:
self.log.info(f"{self.guesses}/{self.max_guesses}")
try:
self.loop_once()
except CorrectAnswer as exc:
self.log.info(
f"Correct answer: '{exc}' in {self.guesses} guesses"
)
return
def loop_once(self) -> None:
"""
Single pass through the loop
"""
self.choose_or_show_next_guess()
self.get_current_guess()
self.update_pattern()
self.guess = ""
self.valid_guess = ""
self.restrict_possibilities()
self.sort_remainder()
def get_current_guess(self) -> None:
"""
Get and check a guess
"""
while not self.valid_guess:
self.solicit_current_guess()
self.check_current_guess()
def solicit_current_guess(self) -> None:
"""
Interactively get a guess
"""
if not self.guess: # Initial guess will do, if we have one.
self.guess = input("Guess expression > ")
def check_current_guess(self) -> None:
"""
Is the supplied guess valid?
"""
try:
self.validate_guess(self.guess)
self.valid_guess = self.guess
except ValueError as exc:
self.log.warning(f"'{self.guess}' failed: {exc}")
self.guess = ""
self.expr_value = -1
def parse_expr(self, expr: str) -> int:
"""
This is the central feature. Take a string consisting of digits and
operators (excluding '='), and try to reduce it to an integer.
If it succeeds, it stores the result in the self._expr_by_str cache,
and if it fails, it stores None in that cache.
"""
# Yes, it's a little complicated.
#
# pylint: disable = "too-many-statements"
# pylint: disable = "too-many-branches"
if expr in self._expr_by_str:
if self._expr_by_str[expr] is None:
raise ValueError(f"'{expr}' is known not to parse")
return self._expr_by_str[expr]
ttok = []
curstr = ""
for c in expr:
if c.isdigit():
curstr += c
else:
try:
# This will catch both leading zeroes and repeated
# operators
ttok.append(self.check_is_valid_number(curstr))
except ValueError:
# Mark as invalid
self.store_expr(expr, None)
raise
curstr = ""
ttok.append(c)
if curstr:
# The last token was a number
ttok.append(self.check_is_valid_number(curstr))
# Now ttok contains alternating ints and strings representing
# operations
while True:
if len(ttok) == 1:
if ttok[0] < 0:
raise ValueError("Only non-negative results allowed")
if ttok[0] != int(ttok[0]):
raise ValueError("Only integer expressions allowed")
ttok = [int(ttok[0])]
break
for idx, tok in enumerate(ttok):
if isinstance(tok, int) or isinstance(tok, float):
continue
if tok in ("*", "/"): # high-priority operator
if tok == "/":
# Division by zero can't happen without constant
# zero terms, because subtraction is lower-priority.
#
# However: we CAN have fractional terms, as long
# as they become integers by the time we have finished
# computing the expression.
result = ttok[idx - 1] / ttok[idx + 1]
else:
result = ttok[idx - 1] * ttok[idx + 1]
else:
if "*" in ttok or "/" in ttok:
# We can't parse low-priority operators until
# we have exhausted the high-priority operators.
continue
if tok == "+":
result = ttok[idx - 1] + ttok[idx + 1]
else:
result = ttok[idx - 1] - ttok[idx + 1]
# Replace the numbers on either side of the operator,
# and the operator itself, with the result. Restart
# parsing ttok.
first = []
last = []
if idx > 2:
first = ttok[: idx - 1]
if len(ttok) > idx + 1:
last = ttok[idx + 2 :]
ttok = first + [result] + last
break # From the inner for, not the 'while True'
lhs = ttok[0]
return lhs
def validate_guess(self, guess) -> None:
"""
Only returns if guess is plausible; raises ValueError otherwise
"""
if guess not in self._rp_hash:
raise ValueError(f"'{guess}' is not in remaining_possibilities.")
chars_in_guess = set(guess)
if chars_in_guess < self.in_expr:
raise ValueError(
f"{self.in_expr} are all in the expression, but "
+ f"{self.guess} only has {chars_in_guess}"
)
for idx, c in enumerate(guess):
if c in self.not_in_expr:
raise ValueError(f"'{c}' is known to not be in expression")
if c not in self.position_could_be[idx]:
raise ValueError(
f"'{c}' cannot be in position {idx}: "
+ f"not one of {self.position_could_be[idx]}"
)
# Well, it *could* be right.
def update_pattern(self) -> None:
"""
If we know the answer, figure out the pattern; if not, request it
from the user (who's presumably getting it from the game)
"""
if self.answer:
self.calculate_pattern()
else:
self.solicit_pattern()
if self.current_pattern == "!" * self.expression_length:
raise CorrectAnswer(self.guess)
self.guesses += 1
if self.guesses > self.max_guesses:
raise OutOfGuesses()
self.update_positions()
def calculate_pattern(self) -> None:
"""
If we know the answer, generate the response pattern
"""
pattern = ""
assert self.answer, "Cannot calculate pattern without the answer"
for idx, c in enumerate(self.valid_guess):
self.log.debug(f"considering '{c}' in position {idx}")
p = self.answer[idx]
if c == p:
self.log.debug(f"'{c}' is in position {idx}")
pattern += "!"
elif c not in self.answer:
self.log.debug(f"'{c}' does not appear in expression")
pattern += "."
else:
self.log.debug(
f"'{c}' appears in expression, but not in position {idx}"
)
pattern += "D"
# Just like update_positions, we do a second pass to catch multiples
# where we already have them all
for idx, c in enumerate(self.valid_guess):
if pattern[idx] != "D":
continue
actual_count = self.answer.count(c)
# How many do we have that we know where they are?
# There's gotta be a better way to do this, but let's get it
# working first.
pattern_count = 0
for a_idx, a_c in enumerate(self.answer):
if a_c == c:
if pattern[a_idx] == "!":
pattern_count += 1
assert pattern_count <= actual_count, f"Overcount of '{c}'"
# This might not be stable.
pattern_char = "?" # Default: we don't know where they all are
if pattern_count == actual_count:
self.log.debug(f"Already found all occurrences of '{c}'")
pattern_char = "."
else:
self.log.debug(f"'{c}' appears but position unknown")
# This should just replace this "D" with a resolved "?" or "."
pattern = pattern[:idx] + pattern_char + pattern[idx + 1 :]
self.current_pattern = pattern
def solicit_pattern(self) -> None:
"""
Since we don't know the answer, ask about the pattern
"""
while True:
response = input("Response pattern > ")
if len(response) != self.expression_length:
continue
rchars = set(response)
if not rchars <= set("!?."):
self.log.debug(f"rchars {rchars}; {set('!?.')}")
continue
self.current_pattern = response
break
def update_positions(self) -> None:
"""
For each position in the expression, update the set of possible
characters
"""
self.guesses_tried.append(self.valid_guess)
for idx, c in enumerate(self.current_pattern):
g = self.valid_guess[idx]
setc = set(g)
if c == "!":
self.position_could_be[idx] = setc # Fixed in place
self.in_expr |= setc
self.log.debug(f"position {idx}: '{g}'")
continue
if c == "?":
self.position_could_be[idx] ^= setc
self.in_expr |= setc
self.log.debug(f"position {idx}: not '{g}'")
self.log.debug(f"'{g}' in expression")
# Now we start over. This catches the case of "not in word" that
# really means "it's a multiple, and you have too many, and it's
# not here" because by the time we do this, if we have any
# occurrences, they will be in self.in_expr
for idx, c in enumerate(self.current_pattern):
if c == ".":
g = self.valid_guess[idx]
setc = set(g)
self.position_could_be[idx] ^= setc
self.log.debug(f"position {idx}: not '{g}'")
if g not in self.in_expr:
self.log.debug(f"'{g}' not in expression")
self.not_in_expr |= setc
def generate_legal_expressions(self):
"""
If we did not have an expression file to load, generate legal
equations. This takes a while to run.
"""
eqn: dict[str, bool] = {}
e_l = self.expression_length
equals_position = [e_l - 3, e_l - 2] # Two-digit answers, then one.
if e_l > 6:
for i in range(e_l - 3, 3, -1): # Then longer answers
equals_position.append(i)
# '=' cannot be farther to the left than the fourth character, because
# the first three (at least) must be a OPR b . Since the string length
# is even, both sides cannot just be numbers, and the right hand side
# has to be a non-negative integer without a leading zero (unless it
# is just zero), so the equal sign can't be at the end.
#
# This is dumb, but what we are going to do is brute-force the solution
# space, with the equals sign in the above place in each place in the
# sequence based on my intuition that that the given sequence
# represents the sequence of most likely places for it.
#
for eqp in equals_position:
for exp_tuple in self.generate_expressions(eqp):
q = "".join(exp_tuple)
try:
_ = int(q)
continue
# It's an integer constant. It evaluates to itself,
# and it is not worth storing.
except ValueError:
pass
try:
lhs = self.parse_expr(q)
eqn = f"{q}={lhs}"
self.store_expr(q, lhs)
except ValueError as exc:
self.log.debug(f"{q} does not parse: {exc}")
self.store_expr(q, None)
continue
# Mark the equation as true
self.store_expr(eqn, lhs)
# Well, it's true, buuuuut...not valid by our rules.
# So we don't store it as a valid equation.
#
# The LHS *is* permitted to be a lone zero.
#
if len(eqn) == self.expression_length:
self._valid_equations[eqn] = lhs
# I thought about storing all the equations that evaluated
# to invalid answers, but it takes a lot of memory for
# not much gain.
def store_expr(self, expr: str, val: Optional[int]):
"""
Determining whether an expression has a legal evaluation is
expensive, so we build a cache so we only evaluate each expression
once.
"""
if expr in self._expr_by_str:
oldval = self._expr_by_str[expr]
if oldval == val:
return
raise ValueError(f"Does '{expr}' evaluate to {oldval} or {val}?")
try:
# There's no point in storing integer constants: testing equality
# is faster than looking up the map and then testing equality.
_ = int(expr)
return
except ValueError:
pass
self._expr_by_str[expr] = val
if val is None:
return
self.log.debug(f"Stored '{expr}' -> {val}")
if val not in self._expr_by_val:
self._expr_by_val[val] = []
if expr not in self._expr_by_val[val]:
self._expr_by_val[val].append(expr)
def generate_expressions(self, e_l: int) -> Iterator[Tuple[Any, ...]]:
"""
Generate all expressions of length e_l. Returns an iterator so we
can chew through, and cache, all the ones that parse to an integer
value.
"""
legal_rhs_chars = set("=") ^ set(self.legal_chars)
digits = set("+-*/") ^ set(legal_rhs_chars)
assert e_l > 2, "expression length must be at least 3"
assert (
e_l < self.expression_length - 1
), f"expression length must be at most {self.expression_length - 2}"
# We know the first and last character are digits
exp_args = [digits]
for _ in range(e_l - 2):
exp_args.append(legal_rhs_chars)
exp_args.append(digits)
expr = product(*exp_args) # itertools is awesome
return expr
def check_is_valid_number(self, n: str) -> int:
"""
Check whether a string is a valid-by-Nerdle-rules number: return
the corresponding int if so.
"""
# It's prettier this way.
# pylint: disable="no-self-use"
if not n:
raise ValueError("The empty string is not a number")
for c in n:
if not c.isdigit():
raise ValueError("numbers are made of digits")
if len(n) > 1:
if n[0] == "0":
raise ValueError(
"Leading zeroes on multi-digit numbers are not allowed"
)
if n == "0":
raise ValueError("Lone zeroes are not allowed")
i_n = int(n)
return i_n
def restrict_possibilities(self) -> None:
"""
Iterate through our remaining valid equations, eliminating the ones
that don't fit the observed facts.
"""
remainder = []
rl = len(self.remaining_possibilities)
for s in self.remaining_possibilities:
try:
self.validate_guess(s)
remainder.append(s)
except ValueError as exc:
self.log.debug(f"'{s}' is eliminated: '{exc}'")
rr = len(remainder)
if rr == 0:
raise OutOfEquations("No possible valid equations remain")
self.log.debug(f"{rl - rr} equations eliminated: {rr} remain")
remainder.sort() # Having a stable order makes testing easier
self.remaining_possibilities = remainder
self._rp_hash = {}
for rp in self.remaining_possibilities:
self._rp_hash[rp] = True
def sort_remainder(self) -> None:
"""
Return the "best" remaining possibilities, for some metric of "best"
"""
# No idea what the best strategy here is. Let's pick the ones with
# the most unconfirmed characters? (Eliminated characters were
# eliminated from remaining_possibilities already)
#
# So, in order: most unconfirmed characters, most characters,
# mixing and matching from operator precedence, and finally we
# inherit from the lexigraphic sort.
self.remaining_possibilities.sort(
key=lambda e: (
len(set(e) - self.in_expr),
len(set(e)),
),
reverse=True,
)
def choose_or_show_next_guess(self) -> None:
"""
We have a sorted list of remaining guesses. If we know the answer,
pick the top one. If we don't, display some to the user to prompt
the next guess.
"""
if self.answer:
if self.guesses == 1:
if self.guess:
self.log.debug(f"Using initial guess '{self.guess}'")
if self.guess in self.remaining_possibilities:
self.valid_guess = self.guess
return
self.log.debug(f"Guess '{self.guess}' is invalid")
self.log.debug("Choosing best guess")
self.guess = self.remaining_possibilities[0]
self.valid_guess = self.guess
return
if not self.guess:
best = self.remaining_possibilities[: self.top]
print(f"Best remaining possibilities: {', '.join(best)}")
|
nilq/baby-python
|
python
|
from dbt.contracts.graph.manifest import Manifest
import os
from test.integration.base import DBTIntegrationTest, use_profile
def get_manifest():
path = './target/partial_parse.msgpack'
if os.path.exists(path):
with open(path, 'rb') as fp:
manifest_mp = fp.read()
manifest: Manifest = Manifest.from_msgpack(manifest_mp)
return manifest
else:
return None
class TestAllExperimentalParser(DBTIntegrationTest):
@property
def schema(self):
return "072_experimental_parser"
@property
def models(self):
return "models"
@use_profile('postgres')
def test_postgres_experimental_parser(self):
results = self.run_dbt(['--use-experimental-parser', 'parse'])
manifest = get_manifest()
node = manifest.nodes['model.test.model_a']
self.assertEqual(node.refs, [['model_a']])
self.assertEqual(node.sources, [['my_src', 'my_tbl']])
self.assertEqual(node.config._extra, {'x': True})
self.assertEqual(node.config.tags, ['hello', 'world'])
|
nilq/baby-python
|
python
|
import time
from base.common.skeleton_base import SkeletonBase
from base.constants import DEFAULT_BEFORE_EXPIRES
from base.exceptions import ChannelTemplateNotFound
from base.helpers import validate_channel
from base.utils import format_response
from typing import Dict
from .router import *
from .webhook import *
from .token_refresher import TokenRefresherManager
class SkeletonDevice(SkeletonBase):
def __init__(self, mqtt=None):
super(SkeletonDevice, self).__init__(mqtt)
# self.DEFAULT_BEFORE_EXPIRES = DEFAULT_BEFORE_EXPIRES
self.before_expires = settings.config_refresh.get('before_expires_seconds', DEFAULT_BEFORE_EXPIRES)
@property
def _swap_url(self) -> str:
server = settings.api_server
version = settings.api_version
client_id = settings.client_id
url = '{}/{}/managers/{}/swap-credentials'.format(server, version, client_id)
return url
@staticmethod
def _credentials_dict(credentials, sender):
credentials_dict = {
'key': sender['key'],
'value': credentials
}
return credentials_dict
def swap_credentials(self, credentials, sender, token_key='access_token') -> Dict:
url = self._swap_url
credentials = self.auth_response(credentials) or {}
if credentials:
payload = {
"client_id": sender.get('client_id', credentials.get('client_id', '')),
"owner_id": sender.get('owner_id', ''),
"credentials": {
token_key: credentials.get(token_key, '')
}
}
response = requests.request('POST', url, headers=self.header, json=payload)
else:
logger.warning("[swap_credentials] Credentials not sent")
return {}
if response and response.status_code == 200:
return response.json()
else:
payload.pop('credentials', None)
self.log(f'Error on request swap credentials. Status code: {response.status_code}; URL: {url}; '
f'Payload: {payload}', 3)
return {}
def check_manager_client_id(self, owner_id, channel_id, main_credentials, second_credentials=None):
"""
Check if credentials has manager_client_id. Update credentials calling swap credentials if not
"""
second_credentials = second_credentials or {}
credentials = self.auth_response(main_credentials)
has_error = False
if 'client_man_id' not in credentials:
sender = {
'client_id': credentials.get('client_id'),
'owner_id': owner_id,
'key': f"credential-owners/{owner_id}/channels/{channel_id}"
}
logger.debug(f"[check_manager_client_id] Will try to swap credentials for sender: {sender}")
swap_credentials = self.swap_credentials(credentials, sender)
if swap_credentials:
credentials['client_man_id'] = swap_credentials.get('client_id')
else:
logger.warning("[check_manager_client_id] Invalid swap credentials return with main credentials")
second_credentials = self.auth_response(second_credentials)
swap_credentials = self.swap_credentials(second_credentials, sender)
if swap_credentials:
credentials['client_man_id'] = swap_credentials.get('client_id')
else:
logger.warning("[check_manager_client_id] Invalid swap credentials return with secondary credentials")
has_error = True
return credentials, has_error
def auth_requests(self, sender):
"""
*** MANDATORY ***
Receives,
sender - A dictionary with keys 'channel_template_id', 'owner_id' and 'client_id'.
Returns a list of dictionaries with the structure,
[
{
"method" : "<get/post>"
"url" : "<manufacturer's authorize API uri and parameters>"
"headers" : {}
},
...
]
If the value of headers is {} for empty header, otherwise it follows the structure as of the
sample given below.
"headers" : {
"Accept": "application/json",
"Authorization": "Bearer {client_secret}"
}
Each dictionary in list represent an individual request to be made to manufacturer's API and
its position denotes the order of request.
"""
return NotImplemented
def get_devices(self, sender, credentials):
"""
*** MANDATORY ***
Receives,
credentials - All persisted user credentials.
sender - A dictionary with keys 'channel_template_id', 'owner_id' and 'client_id'.
Returns a list of dictionaries with the following structure ,
[
{
"content" : "<device name>",
"id" : "<manufacturer's device ID>",
"photoUrl" : "<url to device's image in cdn.muzzley.com>"
},
...
]
Each dictionary in list denotes a device of user.
"""
return NotImplemented
def update_channel_template(self, device_id):
"""
This method is used to return a channel_template other than the one sent in request on select_devices
:param device_id: Dict of device characteristcs
:return: new_channel_id or None
"""
return None
def did_pair_devices(self, credentials, sender, paired_devices, channels):
"""
*** MANDATORY ***
Invoked after successful device pairing.
Receives,
credentials - All persisted user credentials.
sender - A dictionary:
{'channel_template_id': xxxx-xxxxx-xxxxx-xxxx,
'owner_id': xxxx-xxxxx-xxxxx-xxxx,
'client_id': xxxx-xxxxx-xxxxx-xxxx}
paired_devices - A list of dictionaries with selected device's data
channels - A list of channels_id from paired_devices
"""
return NotImplemented
def access_check(self, mode, case, credentials, sender):
"""
*** MANDATORY ***
Checks for access to manufacture for a component, replace if requires a different process
Receives,
mode - 'r' or 'w'
r - read from manufacturer's API
w - write to manufacturer's API
case - A dictionary with keys 'device_id','channel_id','component' and 'property'.
credentials - credentials of user from database
sender - A dictionary with keys 'owner_id' and
'client_id'.
Returns updated valid credentials or current one or None if no access
"""
try:
now = int(time.time())
expiration_date = credentials['expiration_date']
if 'key' in sender:
if now >= expiration_date: # we should refresh the token
self.log('[access_check] token is expired trying to refresh {}'.format(sender['key']), 7)
credentials_dict = self._credentials_dict(credentials, sender)
credentials = self.refresh_token(credentials_dict)
return credentials
except KeyError as e:
self.log('Error: missing {} key'.format(e), 4)
except Exception:
self.log('Unexpected error {}'.format(traceback.format_exc(limit=5)), 3)
self.log(f'Missing info in access_check: \nsender: {sender} \ncase:{case}', 9)
return None
def polling(self, data):
"""
Invoked by the manager itself when performing a polling request to manufacturer's API
Receives,
data - A dictionary with keys 'channel_id', 'credentials' and 'response' where response is a json object
This function is in charge
"""
raise NotImplementedError('No polling handler implemented')
def get_channel_template(self, channel_id):
"""
Input :
channel_id - channel_id of the device.
Returns channel_template_id
"""
channel = validate_channel(channel_id)
return channel['channeltemplate_id'] if (channel and 'channeltemplate_id' in channel) else ''
def get_channels_by_channeltemplate(self, channeltemplate_id):
"""
Input :
channeltemplate_id - channeltemplate_id of the device.
Returns list of channels_id
"""
try:
if not channeltemplate_id:
logger.warning(f"[get_channels_by_channeltemplate] Invalid channeltemplate_id")
return ''
url = f"{settings.api_server_full}/managers/{settings.client_id}/channels?" \
f"page_size=9999&channel.channeltemplate_id={channeltemplate_id}&fields=channel.id"
resp = requests.get(url, headers=self.header)
logger.verbose("[get_channels_by_channeltemplate] Received response code[{}]".format(resp.status_code))
if int(resp.status_code) == 200:
return [client_channel.get('channel', {}).get("id") for client_channel in
resp.json().get("elements", [])]
else:
raise ChannelTemplateNotFound("Failed to retrieve channel_ids for {}".format(channeltemplate_id))
except (OSError, ChannelTemplateNotFound) as e:
logger.warning('[get_channels_by_channeltemplate] Error while making request to platform: {}'.format(e))
except Exception:
logger.alert("[get_channels_by_channeltemplate] Unexpected error: {}".format(traceback.format_exc(limit=5)))
return ''
def get_channel_by_owner(self, owner_id, channel_id):
"""
Input :
owner_id
channel_id
Returns channeltemplate_id
"""
url = "{}/users/{}/channels?channel_id={}".format(settings.api_server_full, owner_id, channel_id)
try:
resp = requests.get(url, headers=self.header)
if int(resp.status_code) == 200:
return resp.json()['elements'][0]['channel']["channeltemplate_id"]
elif int(resp.status_code) == 204: # No content
logger.verbose("[get_channel_by_owner] Received response code[{}]".format(resp.status_code))
return False
else:
logger.verbose("[get_channel_by_owner] Received response code[{}]".format(resp.status_code))
raise ChannelTemplateNotFound(f"[get_channel_by_owner] Failed to retrieve channel_template_id "
f"for {channel_id}")
except (OSError, ChannelTemplateNotFound) as e:
logger.warning('[get_channel_by_owner] Error while making request to platform: {}'.format(e))
except Exception:
logger.alert("[get_channel_by_owner] Unexpected error: {}".format(traceback.format_exc(limit=5)))
return ''
def get_device_id(self, channel_id):
"""
To retrieve device_id using channel_id
"""
return self.db.get_device_id(channel_id)
def get_channel_id(self, device_id):
"""
To retrieve channel_id using device_id
"""
return self.db.get_channel_id(device_id)
def get_polling_conf(self):
"""
Required configuration if polling is enabled
Returns a dictionary or a list of dictionaries:
{
url (required): polling manufacturer url
method (required): HTTP method to use: GET / POST
params: URL parameters to append to the URL (used by requests)
data: the body to attach to the request (used by requests)
}
"""
raise NotImplementedError('polling ENABLED but conf NOT DEFINED')
# -------------
# TOKEN REFRESH
# -------------
def get_refresh_token_conf(self):
"""
Required configuration if token refresher is enabled
Returns a dictionary
url - token refresh manufacturer url
headers - if required a dict with necessary headers
"""
raise NotImplementedError('token refresher ENABLED but conf NOT DEFINED')
def refresh_token(self, credentials_dict):
refresh_token = credentials_dict.get('value', {}).get('refresh_token', '')
refresher = TokenRefresherManager(implementer=self)
conf = self.get_refresh_token_conf()
response = refresher.send_request(refresh_token, credentials_dict, conf)
self.log('refresh_token response {}'.format(response), 7)
if type(response) is dict and 'credentials' in response:
self.after_refresh(response)
return response['credentials']
return None
def after_refresh(self, data):
"""
Invoked by the manager itself when successfully refreshing a token
Receives,
data - A dictionary with keys 'channel_id' and 'new_credentials'
+ not required +
"""
pass
def update_expiration_date(self, credentials):
now = int(time.time())
expires_in = int(credentials['expires_in']) - self.before_expires
expiration_date = now + expires_in
credentials['expiration_date'] = expiration_date
return credentials
def store_credentials(self, owner_id, client_app_id, channeltemplate_id, credentials):
try:
url = f"{settings.api_server_full}/managers/{settings.client_id}/store-credentials"
payload = {
'client_id': client_app_id,
'owner_id': owner_id,
'channeltemplate_id': channeltemplate_id,
'credentials': credentials
}
if not (client_app_id and owner_id and channeltemplate_id and credentials):
logger.warning(f'[store_credentials] Invalid payload request client_id: {client_app_id}; '
f'owner_id: {owner_id}; channeltemplate_id: {channeltemplate_id}')
return False
logger.verbose(f"[store_credentials] Try to update credentials for channeltemplate_id {channeltemplate_id}")
resp = requests.post(url, headers=self.header, json=payload)
logger.verbose(f"[store_credentials] Received response code[{resp.status_code}]")
if int(resp.status_code) == 200 and resp.json().get('n_updated'):
return True
elif int(resp.status_code) == 200 and resp.json().get('n_updated', 0) == 0:
payload.pop('credentials', None)
logger.warning(f'[store_credentials] credentials not found to patch with requested data: '
f'{payload}')
return False
else:
logger.warning(f'[store_credentials] Error while making request to platform: {format_response(resp)}')
return False
except Exception:
logger.alert(f"[store_credentials] Unexpected error store_credentials: {traceback.format_exc(limit=5)}")
return False
SkeletonBase.register(SkeletonDevice)
|
nilq/baby-python
|
python
|
from django.forms.widgets import CheckboxSelectMultiple, RadioSelect
class RadioSelectBootstrap(RadioSelect):
template_name = "leprikon/widgets/multiple_input.html"
option_template_name = "leprikon/widgets/input_option.html"
class CheckboxSelectMultipleBootstrap(CheckboxSelectMultiple):
template_name = "leprikon/widgets/multiple_input.html"
option_template_name = "leprikon/widgets/input_option.html"
|
nilq/baby-python
|
python
|
import pandas as pd
class LeakageInspector:
def __init__(self, df1, df2, patient_col):
"""
Args:
df1 (dataframe): dataframe describing first dataset
df2 (dataframe): dataframe describing second dataset
patient_col (str): string name of column with patient IDs
"""
self.df1 = df1
self.df2 = df2
self.patient_col = patient_col
def check_for_leakage(self):
"""
Checks for leakage in patient data if same patient crosses
into training and validation sets.
Returns:
leakage (bool): True if there is leakage, otherwise False
"""
df1_patients_unique = set(self.df1[self.patient_col].unique().tolist())
df2_patients_unique = set(self.df2[self.patient_col].unique().tolist())
patients_in_both_groups = df1_patients_unique.intersection(df2_patients_unique)
# leakage contains true if there is patient overlap, otherwise false.
leakage = len(patients_in_both_groups) >= 1 # boolean (true if there is at least 1 patient in both groups)
return leakage
|
nilq/baby-python
|
python
|
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(name="Pairtree",
version="0.7.5",
description="Pairtree FS implementation.",
long_description="""\
From http://www.cdlib.org/inside/diglib/pairtree/pairtreespec.html : Pairtree, a filesystem hierarchy for holding objects that are located by mapping identifier strings to object directory (or folder) paths two characters at a time. If an object directory (folder) holds all the files, and nothing but the files, that comprise the object, a "pairtree" can be imported by a system that knows nothing about the nature or structure of the objects but can still deliver any object's files by requested identifier. The mapping is reversible, so the importing system can also walk the pairtree and reliably enumerate all the contained object identifiers. To the extent that object dependencies are stored inside the pairtree (e.g., fast indexes stored outside contain only derivative data), simple or complex collections built on top of pairtrees can recover from index failures and reconstruct a collection view simply by walking the trees. Pairtrees have the advantage that many object operations, including backup and restore, can be performed with native operating system tools.
""",
author="Ben O'Steen",
author_email="bosteen@gmail.com",
url="http://packages.python.org/Pairtree/",
scripts = ['bin/ppath'],
license="http://www.apache.org/licenses/LICENSE-2.0",
packages=find_packages(),
test_suite = "tests.test.TestPairtree",
)
|
nilq/baby-python
|
python
|
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from pages.createprojectpage.questionnaire_creation_options_page import QuestionnaireCreationOptionsPage
from pages.dashboardpage.dashboard_locator import *
from pages.page import Page
class DashboardPage(Page):
def __init__(self, driver):
Page.__init__(self, driver)
def navigate_to_create_project_page(self):
self.driver.find(CREATE_PROJECT_LINK).click()
return QuestionnaireCreationOptionsPage(self.driver)
|
nilq/baby-python
|
python
|
'''Use this for development'''
from .base import *
ALLOWED_HOSTS += ['127.0.0.1']
DEBUG = True
WSGI_APPLICATION = 'home.wsgi.dev.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'sarytask',
'USER': 'postgres',
'PASSWORD': 'S3d66221@',
'HOST': 'localhost',
'PORT': '8081',
}
}
CORS_ORIGIN_WHITELIST = (
'http://localhost:3000',
)
# add third party apis keys here masked
|
nilq/baby-python
|
python
|
import re
from jinja2.filters import contextfilter
@contextfilter
def to_dockbarx_items(context, pin_to_launcher_favorites):
'''
returns the DockbarX launcher items
'''
pin_to_launcher_favorites = list(pin_to_launcher_favorites)
omit = context.resolve('omit')
launcher_items = []
for favourite in pin_to_launcher_favorites:
application = favourite.get('application')
if application not in ('', None, omit):
when_desktop = favourite.get('when_desktop')
if when_desktop in ('dockbarx', None):
application_id = favourite.get('application_id')
if application_id is None:
application_id = re.sub(
'(.*)\\.desktop$', '\\1', application)
launcher_items.append(
application_id +
';/usr/share/applications/' +
application)
return launcher_items
@contextfilter
def to_gnome_items(context, pin_to_launcher_favorites):
'''
returns the Gnome launcher items
'''
pin_to_launcher_favorites = list(pin_to_launcher_favorites)
omit = context.resolve('omit')
launcher_items = []
for favourite in pin_to_launcher_favorites:
application = favourite.get('application')
if application not in ('', None, omit):
when_desktop = favourite.get('when_desktop')
if when_desktop in ('gnome', None):
launcher_items.append("'" + application + "'")
return launcher_items
@contextfilter
def to_unity_items(context, pin_to_launcher_favorites):
'''
returns the Unity launcher items
'''
pin_to_launcher_favorites = list(pin_to_launcher_favorites)
omit = context.resolve('omit')
launcher_items = []
for favourite in pin_to_launcher_favorites:
application = favourite.get('application')
if application not in ('', None, omit):
when_desktop = favourite.get('when_desktop')
if when_desktop in ('unity', None):
launcher_items.append("'application://" + application + "'")
unity = favourite.get('unity')
if unity not in ('', None, omit):
launcher_items.append("'unity://" + unity + "'")
return launcher_items
class FilterModule(object):
''' Launcher filter '''
def filters(self):
return {
'to_dockbarx_items': to_dockbarx_items,
'to_gnome_items': to_gnome_items,
'to_unity_items': to_unity_items
}
|
nilq/baby-python
|
python
|
import collections
import itertools
import typing
from checkmerge import analysis, report
class AnalysisResultMaxSeverityMetric(report.Metric):
"""
Metric for the maximum analysis result severity within a type.
"""
name = 'Max. severity'
low = .5
high = 1.5
def __init__(self, items: typing.List[analysis.AnalysisResult]):
"""
:param cls: The type of analysis result.
:param items: The results of the given type.
"""
value = max((item.severity for item in items))
super(AnalysisResultMaxSeverityMetric, self).__init__(value)
class AnalysisResultAvgSeverityMetric(report.Metric):
"""
Metric for the average analysis result severity within a type.
"""
name = 'Avg. severity'
low = .5
high = 1.5
def __init__(self, items: typing.List[analysis.AnalysisResult]):
"""
:param cls: The type of analysis result.
:param items: The results of the given type.
"""
value = sum((item.severity for item in items)) / float(len(items))
super(AnalysisResultAvgSeverityMetric, self).__init__(value)
class AnalysisResultMetric(report.Metric):
"""
Parent metric for types of analysis results.
"""
low = 1
high = 5
def __init__(self, cls: typing.Type[analysis.AnalysisResult], items: typing.List[analysis.AnalysisResult]):
self.name = cls.name
items = list(items)
max_severity = AnalysisResultMaxSeverityMetric(items)
avg_severity = AnalysisResultAvgSeverityMetric(items)
super(AnalysisResultMetric, self).__init__(len(items), children=[max_severity, avg_severity])
class AnalysisReport(report.Report):
"""
Report for analysis results.
"""
has_metrics = True
has_conflicts = True
def __init__(self, results: typing.Iterable[analysis.AnalysisResult]):
self.results_by_type = collections.defaultdict(list)
for result in results:
self.results_by_type[result.__class__].append(result)
def get_metrics(self) -> typing.Iterable[report.Metric]:
for cls, items in sorted(self.results_by_type.items(), key=lambda i: i[0].name):
yield AnalysisResultMetric(cls, items)
def get_conflicts(self) -> typing.Iterable[analysis.AnalysisResult]:
return sorted(itertools.chain(*self.results_by_type.values()), key=lambda r: -r.severity)
|
nilq/baby-python
|
python
|
"""
parser.py
products.json의 반찬 상세 페이지 url에 요청을 보내 크롤링하는 메소드 정의
메소들을 crawl.py 파일에서 사용함
"""
import requests
from bs4 import BeautifulSoup
import re
def get_soup(url):
webpage = requests.get(url).text
soup = BeautifulSoup(webpage, 'lxml')
return soup
def parse_name(name):
# content에 중량이 포함되어 있는지 체크
# (140g*2개)와 같은 표현은 중량으로 취급하지 않음(그대로 반찬이름으로 들어감)
if re.compile('.+?\(?(\d*,?\.?\d+)k?g(?!\*)\)?').match(name):
# 중량이 포함되어 있으면 생산자, 반찬이름, 중량을 함께 추출하는 정규표현식 작성 - 미노리키친, 아게다시두부곤약조림, 150
# 중량 단위가 kg이면 문자열에 k 포함
parse = re.findall('\[(.*)\]\s?(.+?)\s?\(?(\d*,?\.?\d+k?)g\)?(\s.+)?', name)
else:
# 중량이 없으면 생산자, 반찬이름만 추출하는 정규표현식 작성 - 소중한식사, 명절실속세트
parse = re.findall('\[(.*)\]\s?(.+)', name)
# 생산자가 없는 경우
if not parse:
supplier_name = ''
food_name = name
weight_check = '0'
else:
supplier_name = parse[0][0]
food_name = ''.join(parse[0][1::2])
# 중량이 없으면 문자열 0
weight_check = parse[0][2] if len(parse[0]) >= 4 else '0'
# 중량이 kg 단위인지 확인 후 그램 단위의 정수로 변환
if 'k' in weight_check:
weight_check = int(float(weight_check[:-1]) * 1000)
else:
weight_check = int(weight_check.replace(',', ''))
result = (supplier_name, food_name, weight_check)
return result
def parse_product(soup):
"""
반찬 상세 페이지에서 Product 인스턴스를 만들기 위한 정보 크롤링
:param soup: 상세 페이지의 BeautifulSoup 인스턴스
:return result: 크롤링한 정보를 저장한 딕셔너리 인스턴스
"""
result = dict()
product_name = soup.select_one('h1.desc_product_name').text
result['raw_name'] = product_name
result['supplier'], result['name'], result['weight'] = parse_name(product_name)
result['description'] = soup.select_one('p.desc_bt_txt').text if soup.select_one('p.desc_bt_txt') else ''
result['thumbnail_url1'] = soup.select_one('div.image_top > img').get('src')
for index, img in enumerate(soup.select('a.top_thumb > img'), start=2):
result[f'thumbnail_url{index}'] = img.get('src')
origin_price = soup.select_one('del.origin-price')
sale_price = int(soup.select_one('strong.sale-price').text[:-1].replace(',', ''))
result['sale_price'] = sale_price
if origin_price is not None:
price = int(soup.select_one('del.origin-price').text[:-1].replace(',', ''))
result['price'] = price
result['discount_rate'] = round((1 - sale_price / price) * 100)
else:
result['price'] = 0
result['discount_rate'] = 0
details = soup.select('table.table_detail_info > tbody > tr')
for detail in details:
if detail.select_one('th').text == '식품의 유형':
result['type'] = detail.select_one('td').text
elif detail.select_one('th').text == '원재료명 및 함량':
result['materials'] = detail.select_one('td').text
elif detail.select_one('th').text == '알레르기 유발물질':
result['alert_allergy'] = detail.select_one('td').text
result['stock'] = 10
result['available'] = True
details = soup.select('dl.desc_info > dt')
for i, detail in enumerate(details):
if detail.text == '적립금':
result['point_amount'] = soup.select_one(f'dl.desc_info > dd:nth-of-type({i+1})').text[:-1].replace(',', '')
elif detail.text == '배송타입':
result['delivery_type'] = soup.select_one(f'dl.desc_info > dd:nth-of-type({i+1})').text.strip()
elif detail.text == '수령요일':
result['delivery_days'] = soup.select_one('dl.desc_info > dd > strong').text
print(result)
return result
def parse_category(soup):
result = dict()
result['category'] = soup.select_one('ul.breadcrumb > li:nth-of-type(3) > a').text
return result
def parse_product_image(soup):
result = list()
imgs = soup.select('div.product_detail_img_box > img')
for img in imgs:
result.append(img.get('src'))
return result
|
nilq/baby-python
|
python
|
import pandas as pd
import os
import sys
import subprocess
import numpy as np
import streamlit as st
import time
import random
from random import randint
from streamlit_player import st_player
from streamlit_autorefresh import st_autorefresh
import altair as alt
import back as fl
import usessss as fh
#from pytransform import _load_library
#m = _load_library(path='kryp/dist/pytransform')
#m
#
#[browser]
#serverAddress = "Nikolai"
#from dist.tost import pyarmor_runtime
#import dist.tost as fl
#import dist.tost as fl
#fl.pyarmor_runtime()
#st.set_page_config(
page_title="Really cool app",
page_icon="random",
#page_icon="🧊",
layout="centered",
initial_sidebar_state="collapsed",
#)
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
footer {
footer:after {
content:'goodbye';
visibility: visible;
display: block;
position: relative;
#background-color: red;
padding: 5px;
top: 2px;
}
"""
padding = 0
st.markdown(f""" <style>
.reportview-container .main .block-container{{
padding-top: {padding}rem;
padding-right: {padding}rem;
padding-left: {padding}rem;
padding-bottom: {padding}rem;
}} </style> """, unsafe_allow_html=True)
#st.button(f"Click Me {st.session_state.emoji}", on_click=random_emoji)
option1, option2, option3, option4, option5, usertext1 = False, False, False, False, False, "default_text"
st.title('Welcome!')
emojis = ["🐶", "🐱", "🐭", "🐹", "🐰", "🦊", "🐻", "🐼"]
#st.session_state.emoji = random.choice(emojis)
# initialize emoji as a Session State variable
#if "emoji" not in st.session_state:
#st.session_state.emoji = "👈"
#font_size = st.sidebar.number_input(
#"emoji", min_value=0.5, max_value=4.0, value=2.0, step=0.1
#)
# Run the autorefresh about every 2000 milliseconds (2 seconds) and stop
# after it's been refreshed 100 times.
def hello():
count = st_autorefresh(interval=2000,limit=2, key="fizzbuzzcounter")
#The function returns a counter for number of refreshes. This allows the
#ability to make special requests at different intervals based on the count
if count == 0:
st.write("Count is zero")
elif count % 3 == 0 and count % 5 == 0:
st.write("FizzBuzz")
elif count % 3 == 0:
st.write("Fizz")
elif count % 5 == 0:
st.write("Buzz")
else:
st.write(f"Count: {count}")
#https://discuss.streamlit.io/t/regarding-layout-of-streamlit-web-app/9602/2
#st.write(f"\n|Vil du Søge i databasen så skriv 1.|\n|Vil du se hele databasen skriv 2. |\n|Vil du tilføje til databasen tast 3.|\n|Vil du slette fra databasen tast 4: |\n|Vil du ændre på værdier i databasen tast 6.|\n| Vil du clear cmd tast 5:|\n|")
st.text('Made by Nikolai Berthelsen')
#os.system('python dist/usessss.py')
#input = ""
#st.text_area("Input text")
b = True
c1, c2, c3 = st.columns(3)
with c1:
if st.button("Contact me", st.write(random.choice(emojis))):
option4 = True
code ='''Nikolai2002b@gmail.com'''
st.code(code, language='python')
#st.write("https://discuss.codecademy.com/")
#st.write("https://www.w3schools.com/")
#st.write("Vil du have flere kode øvelser? prøv:","https://www.codingame.com/home")
b = False
with c2:
#l = c2.button("Et eller andet")
if st.button("Social media", st.write(random.choice(emojis))):
option3 = True
b = False
st.write("Here")
with c3:
o=st.button("Tilbage til forsiden", st.write(random.choice(emojis)))
#st.header("Tighten up left buttons with empty right columns!")
cont1, cont2, _, _, _, _ = st.columns(6)
#cont1.button("Tight")
#with cont2:
#st.button("Tighter")
#st.header("You can even control relative size of columns")
#tc1, tc2, _= st.columns([1,1,9])
#tc1.button("Tighty")
#with tc2:
#st.button("Tighterer")
key = (np.arange(9) * 2)
x3 = ('')
x2 = ('')
x1 = ('')
x5 = ()
#https://www.delftstack.com/howto/python/python-clear-console/
def clearConsole():
command = 'clear'
if os.name in ('nt', 'cls'):
command = 'cls'
os.system(command)
df = pd.read_csv('out.zip')
total_rows2= len(df.index)+1
total_rows= len(df.index)-1
df4 = pd.DataFrame({'Produkt': 'Cheeseburger nuggets chilicheesetops milkshake bigmac apple cola water bigTastyBacon'.split(),
'Butik': 'Macdonalds Macdonalds Macdonalds Macdonalds Macdonalds Macdonalds Macdonalds Macdonalds Macdonalds '.split(),
'Pris': 'Macdonalds Macdonalds Macdonalds Macdonalds Macdonalds Macdonalds Macdonalds Macdonalds 20'.split(),
'iD': (total_rows)})
#st.write(df)
u = ()
#https://stackoverflow.com/questions/15943769/how-do-i-get-the-row-count-of-a-pandas-dataframe
submit1 = ()
submit = ()
submit2 = ()
submit3 = ()
so = ()
#https://discuss.streamlit.io/t/the-button-inside-a-button-seems-to-reset-the-whole-app-why/1051/6
c1, c2, c3 = st.columns([50,60,70])
#if p:
if st.sidebar.checkbox(f"Projekt 1 - Database"):
fh.hej()
input = ""
#os.system('streamlit run dist/usessss.py')
#input=""
#from dist.pytransform import pyarmor_runtime
#pyarmor_runtime()
#__pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x09\x00\x61\x0d\x0d\x0a\x08\x2d\xa0\x01\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\xef\x00\x00\x00\x00\x00\x00\x00\xd4\x0c\x74\xc0\x06\x34\x69\x5a\x4e\xd5\x0f\x20\xb3\x45\xfb\xbd\x00\x00\x00\x00\x00\x00\x00\x00\x71\x82\x82\x64\x72\x3a\xdf\x24\xb1\xd6\x39\x33\xdf\x8c\xe8\xc5\x04\xa3\x86\xa3\x78\x15\xdf\x21\x0a\x1e\x71\xd9\xf4\x3e\xd2\x7b\xa3\x84\xa1\x45\xdf\xaf\x18\x5f\x5d\x9e\x64\x56\xed\xa6\xbc\x71\x53\x02\x1e\xe7\x41\xbb\x13\xb8\xda\x00\x2e\xae\x8e\x4a\x93\x5c\xf4\xd7\xe7\x33\x35\xe8\x94\xde\xac\x52\xeb\x20\xce\xa9\x07\xe9\xee\x43\x6e\x96\x91\x74\x17\xcc\x38\xe5\x65\xed\x8c\xd5\xf6\xaa\x4d\x30\xc3\x3c\xc3\x97\xf4\x5f\x26\x4d\x82\xb2\xcb\xe0\x0f\xed\x18\x0d\x2a\x80\x81\x38\x3a\x7e\x9f\x8f\x5f\x96\x1e\x65\x96\x63\xef\x09\x32\x45\x77\xaa\x07\x9f\xd0\xf1\x40\x1a\xdc\x4d\xbb\xd6\xcb\x61\x7e\x02\x0e\x12\x62\xf8\x3f\xea\xeb\x03\xc9\x03\xf3\x9c\xe5\xb4\x52\x56\xcb\x4e\xf4\x96\x92\x31\x1a\xe3\x81\x50\x02\x1a\x3d\x91\x36\x70\x21\x1a\xa8\xed\x96\xdb\x35\xc6\xd7\xb9\x32\x57\xac\x24\x72\xec\xad\x1c\xe6\x8d\x56\x58\x16\x11\xe1\xe6\x13\xd4\xdc\x9c\x9c\x3b\xc4\x1f\x9b\x8d\x5e\xce\x4c\xba\x68\x8c\xf4\xaf\x50\x05\xd6\x6a\x59\x52\xf2\xd5\xd3\xaa\xd0\x5c\x08\xb5\x88', 2)
option2 = True
st.write(fl.g())
slider_ph = st.empty()
info_ph = st.empty()
b = False
value = slider_ph.slider("slider", 1, 10, 1, 1)
info_ph.info(value)
st.image(str(value) + ".png",)
if st.button('Vis alle slides'):
b = False
for x in range(10):
value = int(value)
value = slider_ph.slider("slider", 0, 10, value + 1, 1)
info_ph.info(value)
time.sleep(4)
value = str(value)
st.image(str(value) + ".png",)
st.title("What is Recursion?")
st.write(f"Recursion Defined What is recursion? Sometimes a problem is too difficult or too complex to solve because it is too big. If the problem can be broken down into smaller versions of itself, we may be able to find a way to solve one of these smaller versions and then be able to build up to a solution to the entire problem. This is the idea behind recursion; recursive algorithms break down a problem into smaller pieces which you either already know the answer to, or can solve by applying the same algorithm to each piece, and then combining the results. Stated more concisely, a recursive definition is defined in terms of itself. Recursion is a computer programming technique involving the use of a procedure, subroutine, function, or algorithm that calls itself in a step having a termination condition so that successive repetitions are processed up to the critical step where the condition is met at which time the rest of each repetition is processed from the last one called to the first. \n Don't worry about the details of that definition. The main point of it is that it is defined in terms of itself: Recursion: ... for more information, see Recursion. \n")
st.write("https://www.sparknotes.com/cs/recursion/whatisrecursion/section1/")
if st.sidebar.checkbox("Project 2 - programming quiz"):
option1 = True
b = False
st.write("Hvad skal der i opg.1 for at køre loopet 10 gange?")
with c1:
st.header("Opg. 1 \n for (let i = 0; i < ")
#st.write("for (let i = 0; i < ")
st.write("text += cars[i]; }")
new_title = '<p style="font-family:sans-serif; color:Green; font-size: 42px;">New image</p>'
with c2:
st.header("")
so = (st.text_input((""), key = '91' ))
st.write({so})
if so != ("10"):
st.write("False")
x1 = int(1)
#form6 = st.form(key='my-form6')
#x45 = form6.text_input('')
#submit4 = form6.form_submit_button('Submit')
#st.write(x45)
#so = x45
#st.write(so)
with c3:
st.header(".\n ;i++) {")
if so == ("10"):
original_title = '<p style="font-family:Courier; color:Blue; font-size: 20px;">True</p>'
st.markdown(original_title, unsafe_allow_html=True)
x2 = int(2)
c4, c5, c6 = st.columns([50,60,70])
#if p:
st.write("Hvad skal der i opg.2 for at køre loopet 20 gange?")
with c4:
st.header("Opg. 2 \n for (let i = 0; i < ")
#st.write("for (let i = 0; i < ")
st.write("text += cars[i]; }")
new_title = '<p style="font-family:sans-serif; color:Green; font-size: 42px;">New image</p>'
with c5:
st.header("")
su = (st.text_input((""), key = '910' ))
st.write({su})
if su != ("20"):
st.write("False")
x1 = int(1)
#form6 = st.form(key='my-form6')
#x45 = form6.text_input('')
#submit4 = form6.form_submit_button('Submit')
#st.write(x45)
#so = x45
#st.write(so)
with c6:
st.header(".\n ;i++) {")
if su == ("20"):
original_title3 = '<p style="font-family:Courier; color:Blue; font-size: 20px;">True</p>'
st.markdown(original_title3, unsafe_allow_html=True)
x2 = int(2)
form = st.form(key='my-form')
submit = form.form_submit_button('Tilføj til Databasen')
if submit:
df2 = pd.DataFrame({'Antal forkerte svar': [x1],
'Antal rigtige svar': [x2]})
df = df.append((df2), ignore_index=False)
df = df.append((df2), ignore_index=False)
#https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_csv.html
compression_opts = dict(method = 'zip',
archive_name='out.csv')
df.to_csv('out.zip', index=False,
compression=compression_opts)
st.write(df)
if st.sidebar.checkbox("Tilbage til forsiden"):
option5 = True
initial_sidebar_state: "expanded"
option1, option2, option3, option4, option5, usertext1 = False, False, False, False, False, "default_text"
if o:
#form1 = st.form(key='my-form1')
#x10 = form1.text_input('Indtast Burger')
#submit1 = form1.form_submit_button('Submit')
#form2 = st.form(key='my-form2')
#x20 = form2.text_input('Indtast Butik')
#submit2 = form2.form_submit_button('Submit')
#form3 = st.form(key='my-form3')
#x30 = form3.text_input('Indtast Pris')
#submit3 = form3.form_submit_button('Submit')
#st.write(f'hello {x1}')
#st.write("hvilket produkt vil du tilføje?")
#x11= st.text_input((''), key = '25')
#st.write("Hvilket butik er produktet fra?")
#x22= st.text_input((''), key = '24')
#st.write("Hvad kostede produktet?")
#x33= st.text_input((''), key = '23')
#st.write('Press submit to have your name printed below')
#form = st.form(key='my-form')
#submit = form.form_submit_button('Tilføj til Databasen')
#if submit1:
#x1 = ({x10})
#if submit2:
#x2 = ({x20})
if submit3:
df2 = pd.DataFrame({'Antal forkerte svar': [x1],
'Antal rigtige svar': [x2]})
df = df.append((df2), ignore_index=False)
#https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_csv.html
compression_opts = dict(method = 'zip',
archive_name='out.csv')
df.to_csv('out.zip', index=False,
compression=compression_opts)
#df = df.drop([(total_rows)], axis = 0)
#df = df.drop([(total_rows)], axis = 0)
#i = st.text_input((''), key = "<25>")
#if i == str("ok"):
#hello()
submit4 = ()
if u == str("4"):
print("Hvilket række vil du slette?")
#x5 = int(input())
#x5 = int(st.text_input(''),key='30')
#df = df.drop([x5], axis = 0)
form4 = st.form(key='my-form4')
x40 = form4.text_input('')
submit4 = form4.form_submit_button('Submit')
st.write(x40)
x5 = x40
if submit4:
x5 = int(x5)
st.write(x5)
df = df.drop([x5], axis=0)
compression_opts = dict(method = 'zip',
archive_name='out.csv')
df.to_csv('out.zip', index=False,
compression=compression_opts)
#df = df.drop([0], axis = 0)
#df = df.set_index("Burger")
#df = df.drop('hej', axis = 0)
x10 = ()
x11 = ()
options = [x10]
search = [x11]
x9 = ()
x8 = ()
if u == str("6"):
st.write("Hvilken burger vil du ændre på?")
x10 = st.text_input((''), key = '60')
options = [x10]
st.write("Indtast gamle værdi")
x8 = st.text_input((''), key = '61')
st.write("Indtast nye værdi")
x9 = st.text_input((''), key = '62')
if st.button('Click func too'):
df[df['Produkt'].isin(options)] = df[df['Produkt'].isin(options)].replace(x8,x9)
compression_opts = dict(method = 'zip',
archive_name='out.csv')
df.to_csv('out.zip', index=False,
compression=compression_opts)
#https://youtu.be/F-gDgQ6kuuk?t=460
#https://www.geeksforgeeks.org/selecting-rows-in-pandas-dataframe-based-on-conditions/
#rslt_df = df[df['Burger'].isin(options)]
#print (rslt_df)
#if u == str("3"):
#df = df.append((df2), ignore_index=False)
#https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_csv.html
#compression_opts = dict(method = 'zip',
#archive_name='out.csv')
#df.to_csv('out.zip', index=False,
#compression=compression_opts)
#x5 = int(input())
# print(type(x5))
line2 = ()
def søg(l):
#st.write(df.loc[df[g]])
df.sort_values(by='Produkt', inplace=True, key=lambda col: col.str.lower())
for line in df['Produkt']:
if line == '':
pass
elif l in str(line).lower():
st.write(line)
st.write(df.loc[df['Produkt'] == (line)])
elif l in str(line):
st.write(df.loc[df['Produkt'] == (line)])
#st.write(df.loc(line).isin(df['Produkt']))
if u == str("5"):
clearConsole()
#if o:
#st.title("Sorteret fra a-z")
#df.sort_values(by='Produkt',inplace=True)
#st.write(df)
ur2=()
ur=5000
if u == str("1"):
st.title("Hvad vil ud søge på?")
ur = st.text_input(("").lower(), key = '70' )
søg(str(ur))
if b == True:
code = '''def hello():
print("Hello, and welcome to my portefolio website!!")'''
st.code(code, language='python')
st_player("https://www.youtube.com/watch?v=r5kfkpYtOiw")
#if ur == str("5"):
# = st.text_input((''), key = '77' )
# for x in range(len([df['Butik']])):
# if df['Butik'[x]] in h:
#st.write(df.loc[df['Butik'] == h])
#if ur == str("6"):
#b = (input(''))
#b = st.text_input((''), key = '71' )
#x11 = input('')
#search = [x11]
#st.write(df.loc[df['Pris'] == b])
#print(df.loc[['Pris'].isin[(search)] == b])
#print(df.loc[['Pris'] == b])
#if ur == str("7"):
#c = st.text_input((''), key = '73' )
#st.write(df.loc[df['iD'] == c])
#if ur == str("8"):
#d = st.text_input((''), key = '72' )
#st.write(df.loc[df['Produkt'] == d])
#os.system("web.py")
#input = ""
|
nilq/baby-python
|
python
|
GREY = (0.78, 0.78, 0.78) # uninfected
RED = (0.96, 0.15, 0.15) # infected
GREEN = (0, 0.86, 0.03) # recovered
BLACK = (0, 0, 0) # dead
COVID19_PARAMS = {
"r0": 2.28,
"incubation": 5,
"percent_mild": 0.8,
"mild_recovery": (7, 14),
"percent_severe": 0.2,
"severe_recovery": (21, 42),
"severe_death": (14, 56),
"fatality_rate": 0.034,
"serial_interval": 7,
"population": 10000
}
|
nilq/baby-python
|
python
|
from troposphere import FindInMap, GetAtt, Join, Output, Parameter, Ref, Template
from troposphere.awslambda import MINIMUM_MEMORY, MAXIMUM_MEMORY, Code, Function
from troposphere.cloudformation import CustomResource
from troposphere.constants import NUMBER
from troposphere.ec2 import Instance, SecurityGroup
from troposphere.iam import Policy, Role
t = Template()
t.set_version("2010-09-09")
ExistingVPC = t.add_parameter(
Parameter(
"ExistingVPC",
Type="AWS::EC2::VPC::Id",
Description=(
"The VPC ID that includes the security groups in the "
"ExistingSecurityGroups parameter."
),
)
)
InstanceType = t.add_parameter(
Parameter(
"InstanceType",
Default="t2.micro",
Type="String",
AllowedValues=["t2.micro", "m1.small"],
)
)
ExistingSecurityGroups = t.add_parameter(
Parameter(
"ExistingSecurityGroups",
Type="List<AWS::EC2::SecurityGroup::Id>",
)
)
MemorySize = t.add_parameter(
Parameter(
"LambdaMemorySize",
Type=NUMBER,
Description="Amount of memory to allocate to the Lambda Function",
Default="128",
MinValue=MINIMUM_MEMORY,
MaxValue=MAXIMUM_MEMORY,
)
)
Timeout = t.add_parameter(
Parameter(
"LambdaTimeout",
Type=NUMBER,
Description="Timeout in seconds for the Lambda function",
Default="60",
)
)
t.add_mapping(
"AWSInstanceType2Arch",
{"m1.small": {"Arch": "PV64"}, "t2.micro": {"Arch": "HVM64"}},
)
t.add_mapping(
"AWSRegionArch2AMI",
{
"ap-northeast-1": {"HVM64": "ami-cbf90ecb", "PV64": "ami-27f90e27"},
"ap-southeast-1": {"HVM64": "ami-68d8e93a", "PV64": "ami-acd9e8fe"},
"ap-southeast-2": {"HVM64": "ami-fd9cecc7", "PV64": "ami-ff9cecc5"},
"cn-north-1": {"HVM64": "ami-f239abcb", "PV64": "ami-fa39abc3"},
"eu-central-1": {"HVM64": "ami-a8221fb5", "PV64": "ami-ac221fb1"},
"eu-west-1": {"HVM64": "ami-a10897d6", "PV64": "ami-bf0897c8"},
"sa-east-1": {"HVM64": "ami-b52890a8", "PV64": "ami-bb2890a6"},
"us-east-1": {"HVM64": "ami-1ecae776", "PV64": "ami-1ccae774"},
"us-west-1": {"HVM64": "ami-d114f295", "PV64": "ami-d514f291"},
"us-west-2": {"HVM64": "ami-e7527ed7", "PV64": "ami-ff527ecf"},
},
)
code = [
"var response = require('cfn-response');",
"exports.handler = function(event, context) {",
" var responseData = {Value: event.ResourceProperties.List};",
" responseData.Value.push(event.ResourceProperties.AppendedItem);",
" response.send(event, context, response.SUCCESS, responseData);",
"};",
]
AppendItemToListFunction = t.add_resource(
Function(
"AppendItemToListFunction",
Code=Code(ZipFile=Join("", code)),
Handler="index.handler",
Role=GetAtt("LambdaExecutionRole", "Arn"),
Runtime="nodejs",
MemorySize=Ref(MemorySize),
Timeout=Ref(Timeout),
)
)
LambdaExecutionRole = t.add_resource(
Role(
"LambdaExecutionRole",
Path="/",
Policies=[
Policy(
PolicyName="root",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": ["logs:*"],
"Resource": "arn:aws:logs:*:*:*",
"Effect": "Allow",
}
],
},
)
],
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {"Service": ["lambda.amazonaws.com"]},
}
],
},
)
)
MyEC2Instance = t.add_resource(
Instance(
"MyEC2Instance",
SecurityGroupIds=GetAtt("AllSecurityGroups", "Value"),
InstanceType=Ref(InstanceType),
ImageId=FindInMap(
"AWSRegionArch2AMI",
Ref("AWS::Region"),
FindInMap("AWSInstanceType2Arch", Ref(InstanceType), "Arch"),
),
)
)
AllSecurityGroups = t.add_resource(
CustomResource(
"AllSecurityGroups",
List=Ref(ExistingSecurityGroups),
AppendedItem=Ref("SecurityGroup"),
ServiceToken=GetAtt(AppendItemToListFunction, "Arn"),
)
)
SecurityGroup = t.add_resource(
SecurityGroup(
"SecurityGroup",
SecurityGroupIngress=[
{
"ToPort": "80",
"IpProtocol": "tcp",
"CidrIp": "0.0.0.0/0",
"FromPort": "80",
}
],
VpcId=Ref(ExistingVPC),
GroupDescription="Allow HTTP traffic to the host",
SecurityGroupEgress=[
{
"ToPort": "80",
"IpProtocol": "tcp",
"CidrIp": "0.0.0.0/0",
"FromPort": "80",
}
],
)
)
AllSecurityGroups = t.add_output(
Output(
"AllSecurityGroups",
Description="Security Groups that are associated with the EC2 instance",
Value=Join(", ", GetAtt(AllSecurityGroups, "Value")),
)
)
print(t.to_json())
|
nilq/baby-python
|
python
|
from flask import Flask, render_template, redirect
app = Flask(__name__)
#--- routes start ---
from user import routes
@app.route("/")
def home():
return render_template('home.html')
@app.route("/login/")
def login_page():
return render_template('login.html')
@app.route("/about/")
def about():
return render_template('aboutus.html')
@app.route("/dashboard/")
def dashboard():
return redirect("http://ml-hub.herokuapp.com")
|
nilq/baby-python
|
python
|
# qmpy/materials/element
"""
Django models representing elements and species.
"""
from django.db import models
from qmpy.db.custom import DictField
from qmpy.utils import *
class Element(models.Model):
"""
Core model for an element.
Relationships:
| :mod:`~qmpy.Atom` via atom_set
| :mod:`~qmpy.Species` via species_set
| :mod:`~qmpy.Structure` via structure_set
| :mod:`~qmpy.Entry` via entry_set
| :mod:`~qmpy.Composition` via composition_set
| :mod:`~qmpy.Calculation` via calculation_set
| :mod:`~qmpy.Potential` via potential_set
| :mod:`~qmpy.Hubbard` via hubbards
| :mod:`~qmpy.HubbardCorrection` via hubbardcorrection_set
| :mod:`~qmpy.ReferenceEnergy` via referenceenergy_set
Attributes:
| **Identification**
| z: atomic number
| name: full atomic name
| symbol: atomic symbol
| group: group in the periodic table
| period: period in the periodic table
|
| **Physical properties**
| mass: Atomic mass, in AMU (float)
| density: Density at STP, in g/cm^3 (float)
| volume: Atomic volume at STP, in A^3/atom (float)
| atomic_radii: in A (float)
| van_der_waals radii: in A (float)
| covalent_radii: in A (float)
| scattering_factors: A dictionary of scattering factor coeffs.
|
| **Thermodynamic properties**
| melt: melting point in K
| boil: boiling point in K
| specific_heat: C_p in J/K
|
| **Electronic properties**
| electronegativity: Pauling electronegativity
| ion_energy: First ionization energy. (eV)
| s_elec: # of s electrons
| p_elec: # of p electrons
| d_elec: # of d electrons
| f_elec: # of f electrons
|
| **Additional information**
| production: Annual tons of element produced.
| abundance: Amount in earths crust (ppm)
| radioactive: Are all isotopes unstable?
| HHI_P: Herfindahl-Hirschman Index for production.
| HHI_R: Herfindahl-Hirschman Index for reserve
Note:
HHI values from Gaultois, M. et al. Chem. Mater. 25, 2911-2920 (2013).
"""
### Identification
z = models.IntegerField()
name = models.CharField(max_length=20)
symbol = models.CharField(max_length=9, primary_key=True)
### Periodic table
group = models.IntegerField()
period = models.IntegerField()
### Phyical characteristics
mass = models.FloatField()
density = models.FloatField()
volume = models.FloatField()
atomic_radii = models.IntegerField()
van_der_waals_radii = models.IntegerField()
covalent_radii = models.IntegerField()
scattering_factors = DictField()
### Thermodynamics
melt = models.FloatField()
boil = models.FloatField()
specific_heat = models.FloatField()
### Electonic structure
electronegativity = models.FloatField()
first_ionization_energy = models.FloatField()
s_elec = models.IntegerField()
p_elec = models.IntegerField()
d_elec = models.IntegerField()
f_elec = models.IntegerField()
### misc
HHI_P = models.FloatField(default=0)
HHI_R = models.FloatField(default=0)
production = models.FloatField(default=0)
radioactive = models.BooleanField(default=False)
class Meta:
app_label = "qmpy"
db_table = "elements"
# builtins
def __str__(self):
return self.symbol
# accessor
@classmethod
def get(cls, value):
"""
Return an element object. Accepts symbols and atomic numbers, or a list
of symbols/atomic numbers.
Examples::
>>> Element.get('Fe')
>>> Element.get(26)
>>> Element.get(['Fe', 'O'])
"""
if isinstance(value, cls):
return value
elif isinstance(value, list):
return [cls.get(v) for v in value]
elif isinstance(value, int):
return cls.objects.get(z=value)
elif isinstance(value, str):
return cls.objects.get(symbol=value)
# methods
def species_distribution(self):
counts = {}
for s in self.species_set.all():
counts[s.ox] = s.structure_set.count()
return counts
class Species(models.Model):
"""
Base model for an atomic species. (Element + charge state).
Relationships:
| :mod:`~qmpy.Element` via element
| :mod:`~qmpy.Entry` via entry_set
| :mod:`~qmpy.Structure` via structure_set
Attributes:
| name: Species name. e.g. Fe3+, O2-
| ox: Oxidation state (float)
"""
name = models.CharField(max_length=8, primary_key=True)
element = models.ForeignKey(
Element, blank=True, null=True, on_delete=models.CASCADE
)
ox = models.FloatField(blank=True, null=True)
class Meta:
app_label = "qmpy"
db_table = "species"
# builtins
def __str__(self):
return str(self.name)
# accessor
@classmethod
def get(cls, value):
"""
Gets or creates the specified species.
Arguments:
value:
Accepts multiple input types. Can be a string, e.g. Fe3+
or a tuple of (symbol, oxidation state) pairs, e.g. (Fe, 3).
Return:
A :mod:`~qmpy.Species` or list of :mod:`~qmpy.Species`.
Examples::
>>> Species.get('Fe3+')
>>> Species.get('Fe3')
>>> Species.get(('Fe', 3))
>>> Species.get([ 'Fe3+', 'O2-', 'Li1+'])
"""
if isinstance(value, cls):
return value
elif isinstance(value, str):
spec, new = cls.objects.get_or_create(name=value)
if new:
elt, ox = parse_species(value)
spec.element_id = elt
spec.ox = ox
spec.save()
return spec
elif isinstance(value, list):
return [cls.get(value) for value in list]
@property
def ox_format(self):
if self.ox is None:
return 0
elif is_integer(self.ox):
return int(self.ox)
else:
return float(round(self.ox, 3))
|
nilq/baby-python
|
python
|
# # coding=utf-8
import unittest
import uuid
from google.appengine.ext import testbed
from application import create_app
from application.routes import create_routes
class FlaskClient(unittest.TestCase):
def setUp(self):
self.tb = testbed.Testbed()
self.tb.activate()
self.tb.init_memcache_stub()
self.tb.init_urlfetch_stub()
self.app = create_app('testing')
self.client = self.app.test_client(use_cookies=True)
def tearDown(self):
self.tb.deactivate()
def test_flask(self):
self.assertIsNotNone(self.app)
def test_routes(self):
create_routes(self.app)
self.assertIsNotNone(self.app.url_map)
rule_count = 0
expected_static_endpoints = 5
for _ in self.app.url_map.iter_rules():
rule_count += 1
self.assertGreaterEqual(rule_count, expected_static_endpoints)
def test_404(self):
response = self.client.get('/%s' % uuid.uuid4())
self.assertEqual(response.status_code, 404)
def test_static_route_for_404(self):
response = self.client.get('/404')
self.assertEqual(response.status_code, 404)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from django.db import models
from django.template.defaultfilters import date
from django.core.validators import MinValueValidator
from django.urls import reverse_lazy
from decimal import Decimal
from djangosige.apps.vendas.models import TIPOS_DESCONTO_ESCOLHAS, MOD_FRETE_ESCOLHAS, STATUS_ORCAMENTO_ESCOLHAS
from djangosige.apps.estoque.models import DEFAULT_LOCAL_ID
import locale
locale.setlocale(locale.LC_ALL, '')
STATUS_PEDIDO_COMPRA_ESCOLHAS = (
(u'0', u'Aberto'),
(u'1', u'Realizado'),
(u'2', u'Cancelado'),
(u'3', u'Importado por XML'),
(u'4', u'Recebido')
)
class ItensCompra(models.Model):
produto = models.ForeignKey('cadastro.Produto', related_name="compra_produto",
on_delete=models.CASCADE, null=True, blank=True)
compra_id = models.ForeignKey(
'compras.Compra', related_name="itens_compra", on_delete=models.CASCADE)
quantidade = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], null=True, blank=True)
valor_unit = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], null=True, blank=True)
tipo_desconto = models.CharField(
max_length=1, choices=TIPOS_DESCONTO_ESCOLHAS, null=True, blank=True)
desconto = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], null=True, blank=True)
subtotal = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], null=True, blank=True)
inf_ad_prod = models.CharField(max_length=500, null=True, blank=True)
vicms = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], null=True, blank=True)
vipi = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], null=True, blank=True)
p_icms = models.DecimalField(max_digits=5, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], null=True, blank=True)
p_ipi = models.DecimalField(max_digits=5, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], null=True, blank=True)
# Opcoes
icms_incluido_preco = models.BooleanField(default=False)
ipi_incluido_preco = models.BooleanField(default=False)
incluir_bc_icms = models.BooleanField(
default=False) # incluir IPI na BC do ICMS
auto_calcular_impostos = models.BooleanField(default=True)
@property
def vprod(self):
return round(self.quantidade * self.valor_unit, 2)
def get_total_sem_desconto(self):
if self.tipo_desconto == '0':
return self.subtotal + self.desconto
else:
tot_sem_desc = (self.subtotal * 100) / (100 - self.desconto)
return tot_sem_desc
def get_valor_desconto(self):
if self.tipo_desconto == '0':
return self.desconto
else:
tot_sem_desc = self.get_total_sem_desconto()
v_desconto = tot_sem_desc * (self.desconto / 100)
return v_desconto
def get_total_impostos(self):
return sum(filter(None, [self.vicms, self.vipi]))
def get_total_com_impostos(self):
total_com_impostos = self.subtotal + self.get_total_impostos()
return total_com_impostos
def format_total_impostos(self):
return locale.format(u'%.2f', self.get_total_impostos(), 1)
def format_total_com_imposto(self):
return locale.format(u'%.2f', self.get_total_com_impostos(), 1)
def format_desconto(self):
return '{0}'.format(locale.format(u'%.2f', self.get_valor_desconto(), 1))
def format_quantidade(self):
return locale.format(u'%.2f', self.quantidade, 1)
def format_valor_unit(self):
return locale.format(u'%.2f', self.valor_unit, 1)
def format_total(self):
return locale.format(u'%.2f', self.subtotal, 1)
def format_vprod(self):
return locale.format(u'%.2f', self.vprod, 1)
def format_valor_attr(self, nome_attr):
valor = getattr(self, nome_attr)
if valor is not None:
return locale.format(u'%.2f', valor, 1)
class Compra(models.Model):
# Fornecedor
fornecedor = models.ForeignKey(
'cadastro.Fornecedor', related_name="compra_fornecedor", on_delete=models.CASCADE)
# Transporte
mod_frete = models.CharField(
max_length=1, choices=MOD_FRETE_ESCOLHAS, default='9')
# Estoque
local_dest = models.ForeignKey(
'estoque.LocalEstoque', related_name="compra_local_estoque", default=DEFAULT_LOCAL_ID, on_delete=models.PROTECT)
movimentar_estoque = models.BooleanField(default=True)
# Info
data_emissao = models.DateField(null=True, blank=True)
valor_total = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
tipo_desconto = models.CharField(
max_length=1, choices=TIPOS_DESCONTO_ESCOLHAS, default='0')
desconto = models.DecimalField(max_digits=15, decimal_places=4, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
despesas = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
frete = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
seguro = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
total_icms = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
total_ipi = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
cond_pagamento = models.ForeignKey(
'vendas.CondicaoPagamento', related_name="compra_pagamento", on_delete=models.SET_NULL, null=True, blank=True)
observacoes = models.CharField(max_length=1055, null=True, blank=True)
def get_total_sem_imposto(self):
total_sem_imposto = self.valor_total - self.impostos
return total_sem_imposto
def get_total_produtos(self):
itens = ItensCompra.objects.filter(compra_id=self.id)
tot = 0
for it in itens:
tot += it.vprod
return tot
def get_total_produtos_estoque(self):
itens = self.itens_compra.all()
tot = 0
for it in itens:
if it.produto.controlar_estoque:
tot += it.vprod
return tot
def format_total_produtos(self):
return locale.format(u'%.2f', self.get_total_produtos(), 1)
@property
def impostos(self):
return (self.total_icms + self.total_ipi)
@property
def format_data_emissao(self):
return '%s' % date(self.data_emissao, "d/m/Y")
def format_valor_total(self):
return locale.format(u'%.2f', self.valor_total, 1)
def format_frete(self):
return locale.format(u'%.2f', self.frete, 1)
def format_impostos(self):
return locale.format(u'%.2f', self.impostos, 1)
def format_vicms(self):
return locale.format(u'%.2f', self.total_icms, 1)
def format_vipi(self):
return locale.format(u'%.2f', self.total_ipi, 1)
def format_total_sem_imposto(self):
return locale.format(u'%.2f', self.get_total_sem_imposto(), 1)
def format_desconto(self):
if self.tipo_desconto == '0':
return locale.format(u'%.2f', self.desconto, 1)
else:
itens = ItensCompra.objects.filter(compra_id=self.id)
tot = 0
for it in itens:
tot += it.get_total_sem_desconto()
v_desconto = tot * (self.desconto / 100)
return locale.format(u'%.2f', v_desconto, 1)
def format_seguro(self):
return locale.format(u'%.2f', self.seguro, 1)
def format_despesas(self):
return locale.format(u'%.2f', self.despesas, 1)
def format_total_sem_desconto(self):
total_sem_desconto = self.valor_total - self.desconto
return locale.format(u'%.2f', total_sem_desconto, 1)
def get_forma_pagamento(self):
if self.cond_pagamento:
return self.cond_pagamento.get_forma_display()
else:
return ""
def get_local_dest_id(self):
if self.local_dest:
return self.local_dest.id
else:
return ""
def get_child(self):
try:
return PedidoCompra.objects.get(id=self.id)
except PedidoCompra.DoesNotExist:
return OrcamentoCompra.objects.get(id=self.id)
def __unicode__(self):
s = u'Compra nº %s' % (self.id)
return s
def __str__(self):
s = u'Compra nº %s' % (self.id)
return s
class OrcamentoCompra(Compra):
data_vencimento = models.DateField(null=True, blank=True)
status = models.CharField(
max_length=1, choices=STATUS_ORCAMENTO_ESCOLHAS, default='0')
class Meta:
verbose_name = "Orçamento de Compra"
@property
def format_data_vencimento(self):
return '%s' % date(self.data_vencimento, "d/m/Y")
@property
def tipo_compra(self):
return 'Orçamento'
def edit_url(self):
return reverse_lazy('compras:editarorcamentocompraview', kwargs={'pk': self.id})
def __unicode__(self):
s = u'Orçamento nº %s' % (self.id)
return s
def __str__(self):
s = u'Orçamento nº %s' % (self.id)
return s
class PedidoCompra(Compra):
orcamento = models.ForeignKey(
'compras.OrcamentoCompra', related_name="orcamento_pedido", on_delete=models.SET_NULL, null=True, blank=True)
data_entrega = models.DateField(null=True, blank=True)
status = models.CharField(
max_length=1, choices=STATUS_PEDIDO_COMPRA_ESCOLHAS, default='0')
class Meta:
verbose_name = "Pedido de Compra"
permissions = (
("faturar_pedidocompra", "Pode faturar Pedidos de Compra"),
)
@property
def format_data_entrega(self):
return '%s' % date(self.data_entrega, "d/m/Y")
@property
def tipo_compra(self):
return 'Pedido'
def edit_url(self):
return reverse_lazy('compras:editarpedidocompraview', kwargs={'pk': self.id})
def __unicode__(self):
s = u'Pedido de compra nº %s (%s)' % (
self.id, self.get_status_display())
return s
def __str__(self):
s = u'Pedido de compra nº %s (%s)' % (
self.id, self.get_status_display())
return s
|
nilq/baby-python
|
python
|
class Stack(object):
"""This class implements all the need functions for a stack"""
def __init__(self):
self.items = []
def is_empty(self):
"""Returns true if stack is empty, returns false if stack has an
item"""
return self.items == []
def push(self, item):
"""pushes an item onto the top of the stack"""
self.items.append(item)
def pop(self):
"""pops an item off of the top of the stack and returns it"""
return self.items.pop()
def peek(self):
"""returns the item on the top of the stack but does NOT pop it off!"""
return self.items[len(self.items)-1]
def size(self):
"""returns the size of the stack, how many items are in the stack"""
return len(self.items)
|
nilq/baby-python
|
python
|
# Implementacion generica de Arboles de Decisiones.
from math import log, inf
class Node:
def __init__(self, parent, X, Y, atr_types, default):
self.parent = parent
# Ejemplos del entrenamiento que pertenecen a este nodo.
self.X = X
# Etiquetas de los ejemplos.
self.Y = Y
# Tipos de atributos de los ejemplos.
self.atr_types = atr_types
# Moda de las etiquetas.
self.default = default
self.childs = []
# La i-esima condicion corresponde al i-esimo hijo.
self.cond = []
self.leaf = True
# Etiqueta que recibe el patron al alcanzar esta nodo en caso de ser hoja.
self.value = None
class DecisionTree:
def __init__(self, X, Y, atr_types, atr_name, atr_avail):
# Ejemplos de entrenamiento.
self.X = X
# Etiquetas de los ejemplos.
self.Y = Y
# Tipos de atributos de los ejemplos de entrenamiento. Hay dos tipos:
# "Catg" -> Categorico
# "Cont" -> Continuo
self.atr_types = atr_types
# Nombres de los atributos de los ejemplos de entrenamiento.
self.atr_name = atr_name
# Atributos disponibles
self.atr_avail = atr_avail
def gini(self, *P):
""" Gini impurity."""
return 1 - sum(p**2 for p in P)
def entropy(self, *P):
""" Entropy for measure of randomness."""
r = 0
for p in P:
if p==1: return 0
elif p>0: r -= p*log(p,2)
return r
def mayoria(self, Y):
""" Retorna la moda de un arreglo de elementos unitarios, ejemplo:
[[0], [1], [0]] -> mayoria = 0"""
dic = {}
for y in Y:
if y[0] in dic: dic[y[0]] += 1
else: dic[y[0]] = 1
best = None
max_c = 0
for d in dic:
if dic[d] > max_c:
max_c = dic[d]
best = d
return d
def get_values(self, X, a):
""" Obtenemos los posibles valores de un determinado atributo."""
n = len(X[0])
values = []
for x in X:
if not x[a] in values: values.append(x[a])
return values
def gain_catg(self, a, values, X, Y):
""" Calculamos la ganancia de un atributo categorico en especifico. """
# Calculamos la probabilidad de aparicion de cada etiqueta.
N = len(Y)
dic = {}
for y in Y:
if y[0] in dic: dic[y[0]] += 1/N
else: dic[y[0]] = 1/N
# Calculamos la entropia del nodo actual.
r = self.crit(*[dic[d] for d in dic])
# Calculamos la entropia de cada nodo luego de la division
# y se lo restamos a la entropia del nodo actual.
# Por cada valor del atributo indicado.
for v in values:
# Calculamos la probabilidad de aparicion de cada etiqueta dado
# que el atributo indicado tiene el valor v.
dic = {}
N_i = 0
for i, y in enumerate(Y):
if y[0] in dic and X[i][a]==v:
dic[y[0]] += 1
N_i += 1
elif X[i][a]==v:
dic[y[0]] = 1
N_i += 1
# Calculamos la entropia de una de las divisiones.
r -= N_i*self.crit(*[dic[d]/N_i for d in dic])/N
return r
def gain_cont(self, a, values, X, Y):
""" Calculamos la ganancia de un atributo continuo en especifico. """
# Calculamos la probabilidad de aparicion de cada etiqueta.
N = len(Y)
dic = {}
for y in Y:
if y[0] in dic: dic[y[0]] += 1/N
else: dic[y[0]] = 1/N
# Calculamos la entropia del nodo actual.
r = self.crit(*[dic[d] for d in dic])
# Obtenemos las posibles divisiones binarias
values.sort()
divs = [(values[i]+values[i+1])/2 for i in range(len(values)-1)]
# Elegimos la division con la entropia minima
min_e = inf
best_d = -1
for d in divs:
# Calculamos la probabilidad de aparicion de cada etiqueta dado
# que el atributo es mayor o igual a la division.
dic = {}
N_i = 0
for i, y in enumerate(Y):
if y[0] in dic and X[i][a]>=d:
dic[y[0]] += 1
N_i += 1
elif X[i][a]>=d:
dic[y[0]] = 1
N_i += 1
# Calculamos la entropia de una de las divisiones.
e = N_i*self.crit(*[dic[d]/N_i for d in dic])/N
# Calculamos la probabilidad de aparicion de cada etiqueta dado
# que el atributo es menor a la division.
dic = {}
N_i = 0
for i, y in enumerate(Y):
if y[0] in dic and X[i][a]<d:
dic[y[0]] += 1
N_i += 1
elif X[i][a]<d:
dic[y[0]] = 1
N_i += 1
# Calculamos la entropia de una de las divisiones.
e += N_i*self.crit(*[dic[d]/N_i for d in dic])/N
if e < min_e:
min_e = e
best_d = d
# Retornamos la entropia actual menos la de las divisiones
return r - min_e, best_d
def train(self, splits = -1, criterio="Gini"):
""" Entrenamos el arbol de decisiones segun el numero de divisiones
y el criterio de division."""
if criterio == "Entropy": self.crit = self.entropy
elif criterio == "Gini": self.crit = self.gini
root = Node(None, self.X, self.Y, self.atr_types, self.mayoria(self.Y))
queue = [root]
self.tree = root
atr_avail = self.atr_avail
# Usaremos un BFS en vez de DFS.
while len(queue) > 0:
node = queue.pop(0)
# Si no hay mas ejemplos, tomamos el default del padre.
if len(node.X) == 0: node.value = node.parent.default
# Si todos los ejemplos tienen la misma etiqueta, entonces sesa etiqueta
# sera el valor del nodo.
elif all(node.Y[0] == y for y in node.Y): node.value = node.Y[0][0]
# Si los ejemplos no tienen mas atributos, tomamos la moda de las etiquetas.
elif all(atr == 0 for atr in atr_avail) or splits == 0: node.value = self.mayoria(node.Y)
# Si no, se realizara una division.
else:
node.leaf = False
splits -= 1
# Obtenemos el mejor atributo calculando la ganancia de informacion
# de cada uno de ellos.
best = -1
best_g = -1
div = -1
for a in range(len(node.X[0])):
if atr_avail[a] != 0:
values = self.get_values(node.X, a)
if node.atr_types[a] == "Catg":
g = self.gain_catg(a, values, node.X, node.Y)
if g > best_g:
best_g = g
best = a
else:
g, div = self.gain_cont(a, values, node.X, node.Y)
if g > best_g:
best_g = g
best = a
best_d = div
# Verificamos si el mejor atributo es categorico o continuo.
if node.atr_types[best] == "Catg":
atr_avail[best] = 0
# Particionamos los ejemplos segun cada valor del mejor atributo.
for v in self.get_values(node.X, best):
X_i, Y_i = [], []
for i in range(len(node.X)):
if node.X[i][best] == v:
x = node.X[i].copy()
x[best] = None
X_i.append(x)
Y_i.append(node.Y[i])
# Creamos un nuevo nodo hijo con un bloque de la particion de los ejemplos.
atr_types_i = node.atr_types.copy()
atr_types_i[best] = None
child = Node(node, X_i, Y_i, atr_types_i, self.mayoria(Y_i))
node.childs.append(child)
node.cond.append((best, v))
queue.append(child)
else:
# Particionamos los ejemplos en menor y mayor o igual que la divison obtenida.
X_M, X_m, Y_M, Y_m = [], [], [], []
for i in range(len(node.X)):
x = node.X[i].copy()
if node.X[i][best] < best_d:
X_m.append(x)
Y_m.append(node.Y[i])
else:
X_M.append(x)
Y_M.append(node.Y[i])
# Con esa particion creamos dos nuevos nodos.
atr_types_i = node.atr_types.copy()
child_m = Node(node, X_m, Y_m, atr_types_i, self.mayoria(Y_m))
child_M = Node(node, X_M, Y_M, atr_types_i, self.mayoria(Y_M))
node.childs.append(child_m)
node.childs.append(child_M)
node.cond.append((best, "<", best_d))
node.cond.append((best, ">=", best_d))
queue.append(child_m)
queue.append(child_M)
def predict(self, x):
""" Predecimos la etiqueta de un patron recorriendo el arbol."""
# Partimos de la raiz.
node_i = self.tree
x_i = x.copy()
# Mientras no estemos en una hoja
while not node_i.leaf:
# En caso contrario, verificamos cual condicion del nodo cumple el patron
# y lo enviamos al hijo correspondiente.
cond = False
for i, c in enumerate(node_i.cond):
if len(c) == 2:
if x_i[c[0]] == c[1]:
node_i = node_i.childs[i]
cond = True
break
elif (c[1] == "<" and x_i[c[0]] < c[2]) or \
(c[1] == ">=" and x_i[c[0]] >= c[2]):
node_i = node_i.childs[i]
cond = True
break
if not cond: return node_i.default
return node_i.value
def print_tree(self, node_i = None, level = 0, atr = None):
""" Retornamos una representacion del arbol. """
if node_i == None: node_i = self.tree
if atr == None: atr_i = self.atr_name.copy()
else: atr_i = atr.copy()
if node_i.leaf:
text = " -> " + str(node_i.value)
else:
best = node_i.cond[0][0]
if len(node_i.cond[0]) == 2: text = "\n" + level*"| " + "_ " + atr_i[best]
else: text = "\n" + level*"| " + "_ " + atr_i[best]
for i, c in enumerate(node_i.cond):
text += "\n" + (level+1)*"| " + " * " + str(c[1])
if len(c) == 3: text += str(c[2])
text += self.print_tree(node_i.childs[i], level+1, atr_i)
text += "\n" + (level)*"| " + "|_"
return text
if __name__ == "__main__":
X = [
[5,"Esp"], [9,"Esp"], [0,"Eur"], [3,"Esp"], [8,"Eur"], [7,"Esp"],
[11,"Esp"], [45,"Esp"], [24,"Eur"], [30,"Eur"], [25,"Eur"],
[58,"Esp"], [60,"Esp"], [65,"Eur"], [78,"Esp"], [52,"Eur"],
[40,"Esp"],
]
Y = [[0],[0],[0],[0],[0],[0],[1],[1],[0],[0],[0],[2],[2],[1],[2],[1],[1]]
IA = DecisionTree(X, Y,
["Cont", "Catg"],
["UNIDADES", "DESTINO"],
[1,1]
)
IA.train(5, "Gini")
print(IA.print_tree())
|
nilq/baby-python
|
python
|
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class ISCSIDataIn(Base):
__slots__ = ()
_SDM_NAME = 'iSCSI_Data_In'
_SDM_ATT_MAP = {
'HeaderOpcode': 'iSCSI_Data_In.header.Opcode-1',
'HeaderFlags': 'iSCSI_Data_In.header.Flags-2',
'HeaderTotalAHSLength': 'iSCSI_Data_In.header.TotalAHSLength-3',
'HeaderUnknown': 'iSCSI_Data_In.header.Unknown-4',
'HeaderDataSegmentLength': 'iSCSI_Data_In.header.DataSegmentLength-5',
'HeaderLUN': 'iSCSI_Data_In.header.LUN-6',
'HeaderInitiatorTaskTag': 'iSCSI_Data_In.header.InitiatorTaskTag-7',
'HeaderTargetTransferTag': 'iSCSI_Data_In.header.TargetTransferTag-8',
'HeaderStatSN': 'iSCSI_Data_In.header.StatSN-9',
'HeaderExpCmdSN': 'iSCSI_Data_In.header.ExpCmdSN-10',
'HeaderMaxCmdSN': 'iSCSI_Data_In.header.MaxCmdSN-11',
'HeaderDataSN': 'iSCSI_Data_In.header.DataSN-12',
'HeaderBufferoffset': 'iSCSI_Data_In.header.Bufferoffset-13',
'HeaderResidualCount': 'iSCSI_Data_In.header.ResidualCount-14',
'HeaderHeaderDigest': 'iSCSI_Data_In.header.HeaderDigest-15',
}
def __init__(self, parent, list_op=False):
super(ISCSIDataIn, self).__init__(parent, list_op)
@property
def HeaderOpcode(self):
"""
Display Name: Opcode
Default Value: 0x25
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderOpcode']))
@property
def HeaderFlags(self):
"""
Display Name: Flags
Default Value: 0x80
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderFlags']))
@property
def HeaderTotalAHSLength(self):
"""
Display Name: TotalAHSLength
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderTotalAHSLength']))
@property
def HeaderUnknown(self):
"""
Display Name: Unknown
Default Value: 0x0000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderUnknown']))
@property
def HeaderDataSegmentLength(self):
"""
Display Name: DataSegmentLength
Default Value: 0x000016
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderDataSegmentLength']))
@property
def HeaderLUN(self):
"""
Display Name: LUN
Default Value: 0x0000000000000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderLUN']))
@property
def HeaderInitiatorTaskTag(self):
"""
Display Name: InitiatorTaskTag
Default Value: 0x00000010
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderInitiatorTaskTag']))
@property
def HeaderTargetTransferTag(self):
"""
Display Name: TargetTransferTag
Default Value: 0xFFFFFFFF
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderTargetTransferTag']))
@property
def HeaderStatSN(self):
"""
Display Name: StatSN
Default Value: 0x00000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderStatSN']))
@property
def HeaderExpCmdSN(self):
"""
Display Name: ExpCmdSN
Default Value: 0x00000010
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderExpCmdSN']))
@property
def HeaderMaxCmdSN(self):
"""
Display Name: MaxCmdSN
Default Value: 0x00000051
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderMaxCmdSN']))
@property
def HeaderDataSN(self):
"""
Display Name: DataSN
Default Value: 0x00000001
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderDataSN']))
@property
def HeaderBufferoffset(self):
"""
Display Name: Bufferoffset
Default Value: 0x00000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderBufferoffset']))
@property
def HeaderResidualCount(self):
"""
Display Name: ResidualCount
Default Value: 0x00000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderResidualCount']))
@property
def HeaderHeaderDigest(self):
"""
Display Name: HeaderDigest
Default Value: 0x5F7F4CAE
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderHeaderDigest']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
|
nilq/baby-python
|
python
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is more akin to a .pyl/JSON file, so it's expected to be long.
# pylint: disable=too-many-lines
from gpu_tests import common_browser_args as cba
from gpu_tests import skia_gold_matching_algorithms as algo
CRASH_TYPE_GPU = 'gpu'
# Meant to be used when we know a test is going to be noisy, and we want any
# images it generates to be auto-triaged until we have enough data to calculate
# more suitable/less permissive parameters.
VERY_PERMISSIVE_SOBEL_ALGO = algo.SobelMatchingAlgorithm(
max_different_pixels=100000000,
pixel_delta_threshold=255,
edge_threshold=0,
ignored_border_thickness=1)
class PixelTestPage(object):
"""A wrapper class mimicking the functionality of the PixelTestsStorySet
from the old-style GPU tests.
"""
def __init__( # pylint: disable=too-many-arguments
self,
url,
name,
test_rect,
tolerance=2,
browser_args=None,
expected_colors=None,
gpu_process_disabled=False,
optional_action=None,
restart_browser_after_test=False,
other_args=None,
grace_period_end=None,
expected_per_process_crashes=None,
matching_algorithm=None):
super(PixelTestPage, self).__init__()
self.url = url
self.name = name
self.test_rect = test_rect
# The tolerance when comparing against the reference image.
self.tolerance = tolerance
self.browser_args = browser_args
# The expected colors can be specified as a list of dictionaries,
# in which case these specific pixels will be sampled instead of
# comparing the entire image snapshot. The format is only defined
# by contract with _CompareScreenshotSamples in
# cloud_storage_integration_test_base.py.
self.expected_colors = expected_colors
# Only a couple of tests run with the GPU process completely
# disabled. To prevent regressions, only allow the GPU information
# to be incomplete in these cases.
self.gpu_process_disabled = gpu_process_disabled
# Some of the tests require custom actions to be run. These are
# specified as a string which is the name of a method to call in
# PixelIntegrationTest. For example if the action here is
# "CrashGpuProcess" then it would be defined in a
# "_CrashGpuProcess" method in PixelIntegrationTest.
self.optional_action = optional_action
# Whether the browser should be forcibly restarted after the test
# runs. The browser is always restarted after running tests with
# optional_actions.
self.restart_browser_after_test = restart_browser_after_test
# These are used to pass additional arguments to the test harness.
# VideoPathTraceTest and OverlayModeTest support the following boolean
# arguments: expect_yuy2, zero_copy, video_is_rotated, and no_overlay.
self.other_args = other_args
# This allows a newly added test to be exempted from failures for a
# (hopefully) short period after being added. This is so that any slightly
# different but valid images that get produced by the waterfall bots can
# be triaged without turning the bots red.
# This should be a datetime.date object.
self.grace_period_end = grace_period_end
# This lets the test runner know that one or more crashes are expected as
# part of the test. Should be a map of process type (str) to expected number
# of crashes (int).
self.expected_per_process_crashes = expected_per_process_crashes or {}
# This should be a child of
# skia_gold_matching_algorithms.SkiaGoldMatchingAlgorithm. This specifies
# which matching algorithm Skia Gold should use for the test.
self.matching_algorithm = (matching_algorithm
or algo.ExactMatchingAlgorithm())
def CopyWithNewBrowserArgsAndSuffix(self, browser_args, suffix):
return PixelTestPage(self.url, self.name + suffix, self.test_rect,
self.tolerance, browser_args, self.expected_colors)
def CopyWithNewBrowserArgsAndPrefix(self, browser_args, prefix):
# Assuming the test name is 'Pixel'.
split = self.name.split('_', 1)
return PixelTestPage(self.url, split[0] + '_' + prefix + split[1],
self.test_rect, self.tolerance, browser_args,
self.expected_colors)
def CopyPagesWithNewBrowserArgsAndSuffix(pages, browser_args, suffix):
return [
p.CopyWithNewBrowserArgsAndSuffix(browser_args, suffix) for p in pages
]
def CopyPagesWithNewBrowserArgsAndPrefix(pages, browser_args, prefix):
return [
p.CopyWithNewBrowserArgsAndPrefix(browser_args, prefix) for p in pages
]
# TODO(kbr): consider refactoring this into pixel_integration_test.py.
SCALE_FACTOR_OVERRIDES = {
"comment":
"scale factor overrides",
"scale_factor_overrides": [
{
"device_type": "Nexus 5",
"scale_factor": 1.105
},
{
"device_type": "Nexus 5X",
"scale_factor": 1.105
},
{
"device_type": "Nexus 6",
"scale_factor": 1.47436
},
{
"device_type": "Nexus 6P",
"scale_factor": 1.472
},
{
"device_type": "Nexus 9",
"scale_factor": 1.566
},
{
"comment": "NVIDIA Shield",
"device_type": "sb_na_wf",
"scale_factor": 1.226
},
{
"device_type": "Pixel 2",
"scale_factor": 1.1067
},
]
}
class PixelTestPages(object):
@staticmethod
def DefaultPages(base_name):
sw_compositing_args = [cba.DISABLE_GPU_COMPOSITING]
# The optimizer script spat out pretty similar values for most MP4 tests, so
# combine into a single set of parameters.
general_mp4_algo = algo.SobelMatchingAlgorithm(max_different_pixels=56300,
pixel_delta_threshold=35,
edge_threshold=80)
return [
PixelTestPage('pixel_background_image.html',
base_name + '_BackgroundImage',
test_rect=[20, 20, 370, 370]),
PixelTestPage('pixel_reflected_div.html',
base_name + '_ReflectedDiv',
test_rect=[0, 0, 100, 300]),
PixelTestPage('pixel_canvas2d.html',
base_name + '_Canvas2DRedBox',
test_rect=[0, 0, 300, 300]),
PixelTestPage('pixel_canvas2d_untagged.html',
base_name + '_Canvas2DUntagged',
test_rect=[0, 0, 257, 257]),
PixelTestPage('pixel_css3d.html',
base_name + '_CSS3DBlueBox',
test_rect=[0, 0, 300, 300],
matching_algorithm=algo.SobelMatchingAlgorithm(
max_different_pixels=0,
pixel_delta_threshold=0,
edge_threshold=100)),
PixelTestPage('pixel_webgl_aa_alpha.html',
base_name + '_WebGLGreenTriangle_AA_Alpha',
test_rect=[0, 0, 300, 300]),
PixelTestPage('pixel_webgl_noaa_alpha.html',
base_name + '_WebGLGreenTriangle_NoAA_Alpha',
test_rect=[0, 0, 300, 300]),
PixelTestPage('pixel_webgl_aa_noalpha.html',
base_name + '_WebGLGreenTriangle_AA_NoAlpha',
test_rect=[0, 0, 300, 300]),
PixelTestPage('pixel_webgl_noaa_noalpha.html',
base_name + '_WebGLGreenTriangle_NoAA_NoAlpha',
test_rect=[0, 0, 300, 300]),
PixelTestPage('pixel_webgl_noalpha_implicit_clear.html',
base_name +
'_WebGLTransparentGreenTriangle_NoAlpha_ImplicitClear',
test_rect=[0, 0, 300, 300]),
PixelTestPage('pixel_webgl_sad_canvas.html',
base_name + '_WebGLSadCanvas',
test_rect=[0, 0, 300, 300],
optional_action='CrashGpuProcess'),
PixelTestPage('pixel_scissor.html',
base_name + '_ScissorTestWithPreserveDrawingBuffer',
test_rect=[0, 0, 300, 300]),
PixelTestPage('pixel_canvas2d_webgl.html',
base_name + '_2DCanvasWebGL',
test_rect=[0, 0, 300, 300]),
PixelTestPage('pixel_background.html',
base_name + '_SolidColorBackground',
test_rect=[500, 500, 100, 100]),
PixelTestPage(
'pixel_video_mp4.html',
base_name + '_Video_MP4',
test_rect=[0, 0, 240, 135],
# Most images are actually very similar, but Pixel 2
# tends to produce images with all colors shifted by a
# small amount.
matching_algorithm=general_mp4_algo),
# Surprisingly stable, does not appear to require inexact matching.
PixelTestPage('pixel_video_mp4.html',
base_name + '_Video_MP4_DXVA',
browser_args=[cba.DISABLE_FEATURES_D3D11_VIDEO_DECODER],
test_rect=[0, 0, 240, 135]),
PixelTestPage('pixel_video_mp4_four_colors_aspect_4x3.html',
base_name + '_Video_MP4_FourColors_Aspect_4x3',
test_rect=[0, 0, 240, 135],
matching_algorithm=algo.SobelMatchingAlgorithm(
max_different_pixels=41700,
pixel_delta_threshold=15,
edge_threshold=40)),
PixelTestPage('pixel_video_mp4_four_colors_rot_90.html',
base_name + '_Video_MP4_FourColors_Rot_90',
test_rect=[0, 0, 270, 240],
matching_algorithm=general_mp4_algo),
PixelTestPage('pixel_video_mp4_four_colors_rot_180.html',
base_name + '_Video_MP4_FourColors_Rot_180',
test_rect=[0, 0, 240, 135],
matching_algorithm=general_mp4_algo),
PixelTestPage('pixel_video_mp4_four_colors_rot_270.html',
base_name + '_Video_MP4_FourColors_Rot_270',
test_rect=[0, 0, 270, 240],
matching_algorithm=general_mp4_algo),
PixelTestPage('pixel_video_mp4_rounded_corner.html',
base_name + '_Video_MP4_Rounded_Corner',
test_rect=[0, 0, 240, 135],
matching_algorithm=algo.SobelMatchingAlgorithm(
max_different_pixels=30500,
pixel_delta_threshold=15,
edge_threshold=70)),
PixelTestPage('pixel_video_vp9.html',
base_name + '_Video_VP9',
test_rect=[0, 0, 240, 135],
matching_algorithm=algo.SobelMatchingAlgorithm(
max_different_pixels=114000,
pixel_delta_threshold=30,
edge_threshold=20)),
PixelTestPage('pixel_video_vp9.html',
base_name + '_Video_VP9_DXVA',
browser_args=[cba.DISABLE_FEATURES_D3D11_VIDEO_DECODER],
test_rect=[0, 0, 240, 135],
matching_algorithm=algo.SobelMatchingAlgorithm(
max_different_pixels=31100,
pixel_delta_threshold=30,
edge_threshold=250)),
# The MP4 contains H.264 which is primarily hardware decoded on bots.
PixelTestPage(
'pixel_video_context_loss.html?src='
'/media/test/data/four-colors.mp4',
base_name + '_Video_Context_Loss_MP4',
test_rect=[0, 0, 240, 135],
# Optimizer script spat out a value of 255 for the Sobel edge
# threshold, so use fuzzy for now since it's slightly more
# efficient.
matching_algorithm=algo.FuzzyMatchingAlgorithm(
max_different_pixels=31700, pixel_delta_threshold=20),
expected_per_process_crashes={
CRASH_TYPE_GPU: 1,
}),
# The VP9 test clip is primarily software decoded on bots.
PixelTestPage(('pixel_video_context_loss.html'
'?src=/media/test/data/four-colors-vp9.webm'),
base_name + '_Video_Context_Loss_VP9',
test_rect=[0, 0, 240, 135],
matching_algorithm=algo.SobelMatchingAlgorithm(
max_different_pixels=54400,
pixel_delta_threshold=30,
edge_threshold=250),
expected_per_process_crashes={
CRASH_TYPE_GPU: 1,
}),
PixelTestPage('pixel_video_backdrop_filter.html',
base_name + '_Video_BackdropFilter',
test_rect=[0, 0, 240, 135],
matching_algorithm=algo.SobelMatchingAlgorithm(
max_different_pixels=1000,
pixel_delta_threshold=20,
edge_threshold=40,
ignored_border_thickness=1)),
PixelTestPage('pixel_webgl_premultiplied_alpha_false.html',
base_name + '_WebGL_PremultipliedAlpha_False',
test_rect=[0, 0, 150, 150]),
PixelTestPage('pixel_webgl2_blitframebuffer_result_displayed.html',
base_name + '_WebGL2_BlitFramebuffer_Result_Displayed',
test_rect=[0, 0, 200, 200]),
PixelTestPage('pixel_webgl2_clearbufferfv_result_displayed.html',
base_name + '_WebGL2_ClearBufferfv_Result_Displayed',
test_rect=[0, 0, 200, 200]),
PixelTestPage('pixel_repeated_webgl_to_2d.html',
base_name + '_RepeatedWebGLTo2D',
test_rect=[0, 0, 256, 256]),
PixelTestPage('pixel_repeated_webgl_to_2d.html',
base_name + '_RepeatedWebGLTo2D_SoftwareCompositing',
test_rect=[0, 0, 256, 256],
browser_args=sw_compositing_args),
PixelTestPage('pixel_canvas2d_tab_switch.html',
base_name + '_Canvas2DTabSwitch',
test_rect=[0, 0, 100, 100],
optional_action='SwitchTabs'),
PixelTestPage('pixel_canvas2d_tab_switch.html',
base_name + '_Canvas2DTabSwitch_SoftwareCompositing',
test_rect=[0, 0, 100, 100],
browser_args=sw_compositing_args,
optional_action='SwitchTabs'),
PixelTestPage('pixel_webgl_copy_image.html',
base_name + '_WebGLCopyImage',
test_rect=[0, 0, 200, 100]),
PixelTestPage('pixel_webgl_read_pixels_tab_switch.html',
base_name + '_WebGLReadPixelsTabSwitch',
test_rect=[0, 0, 100, 100],
optional_action='SwitchTabs'),
PixelTestPage('pixel_webgl_read_pixels_tab_switch.html',
base_name +
'_WebGLReadPixelsTabSwitch_SoftwareCompositing',
test_rect=[0, 0, 100, 100],
browser_args=sw_compositing_args,
optional_action='SwitchTabs'),
PixelTestPage('pixel_offscreen_canvas_ibrc_webgl_main.html',
base_name + '_OffscreenCanvasIBRCWebGLMain',
test_rect=[0, 0, 300, 300],
optional_action='RunOffscreenCanvasIBRCWebGLTest'),
PixelTestPage('pixel_offscreen_canvas_ibrc_webgl_worker.html',
base_name + '_OffscreenCanvasIBRCWebGLWorker',
test_rect=[0, 0, 300, 300],
optional_action='RunOffscreenCanvasIBRCWebGLTest'),
]
# Pages that should be run with GPU rasterization enabled.
@staticmethod
def GpuRasterizationPages(base_name):
browser_args = [
cba.FORCE_GPU_RASTERIZATION,
cba.DISABLE_SOFTWARE_COMPOSITING_FALLBACK,
]
return [
PixelTestPage('pixel_background.html',
base_name + '_GpuRasterization_BlueBox',
test_rect=[0, 0, 220, 220],
browser_args=browser_args),
PixelTestPage('concave_paths.html',
base_name + '_GpuRasterization_ConcavePaths',
test_rect=[0, 0, 100, 100],
browser_args=browser_args),
PixelTestPage('pixel_precision_rounded_corner.html',
base_name + '_PrecisionRoundedCorner',
test_rect=[0, 0, 400, 400],
browser_args=browser_args,
matching_algorithm=algo.SobelMatchingAlgorithm(
max_different_pixels=10,
pixel_delta_threshold=30,
edge_threshold=100)),
]
# Pages that should be run with off-thread paint worklet flags.
@staticmethod
def PaintWorkletPages(base_name):
browser_args = [
'--enable-blink-features=OffMainThreadCSSPaint',
'--enable-gpu-rasterization', '--enable-oop-rasterization'
]
return [
PixelTestPage(
'pixel_paintWorklet_transform.html',
base_name + '_PaintWorkletTransform',
test_rect=[0, 0, 200, 200],
browser_args=browser_args),
]
# Pages that should be run with experimental canvas features.
@staticmethod
def ExperimentalCanvasFeaturesPages(base_name):
browser_args = [
cba.ENABLE_EXPERIMENTAL_WEB_PLATFORM_FEATURES,
]
accelerated_args = [
cba.DISABLE_SOFTWARE_COMPOSITING_FALLBACK,
]
unaccelerated_args = [
cba.DISABLE_ACCELERATED_2D_CANVAS,
cba.DISABLE_GPU_COMPOSITING,
]
return [
PixelTestPage('pixel_offscreenCanvas_transfer_after_style_resize.html',
base_name + '_OffscreenCanvasTransferAfterStyleResize',
test_rect=[0, 0, 350, 350],
browser_args=browser_args),
PixelTestPage('pixel_offscreenCanvas_transfer_before_style_resize.html',
base_name + '_OffscreenCanvasTransferBeforeStyleResize',
test_rect=[0, 0, 350, 350],
browser_args=browser_args),
PixelTestPage('pixel_offscreenCanvas_webgl_paint_after_resize.html',
base_name + '_OffscreenCanvasWebGLPaintAfterResize',
test_rect=[0, 0, 200, 200],
browser_args=browser_args),
PixelTestPage('pixel_offscreenCanvas_transferToImageBitmap_main.html',
base_name + '_OffscreenCanvasTransferToImageBitmap',
test_rect=[0, 0, 300, 300],
browser_args=browser_args),
PixelTestPage('pixel_offscreenCanvas_transferToImageBitmap_worker.html',
base_name + '_OffscreenCanvasTransferToImageBitmapWorker',
test_rect=[0, 0, 300, 300],
browser_args=browser_args),
PixelTestPage('pixel_offscreenCanvas_webgl_commit_main.html',
base_name + '_OffscreenCanvasWebGLDefault',
test_rect=[0, 0, 360, 200],
browser_args=browser_args),
PixelTestPage('pixel_offscreenCanvas_webgl_commit_worker.html',
base_name + '_OffscreenCanvasWebGLDefaultWorker',
test_rect=[0, 0, 360, 200],
browser_args=browser_args),
PixelTestPage('pixel_offscreenCanvas_webgl_commit_main.html',
base_name + '_OffscreenCanvasWebGLSoftwareCompositing',
test_rect=[0, 0, 360, 200],
browser_args=browser_args +
[cba.DISABLE_GPU_COMPOSITING]),
PixelTestPage(
'pixel_offscreenCanvas_webgl_commit_worker.html',
base_name + '_OffscreenCanvasWebGLSoftwareCompositingWorker',
test_rect=[0, 0, 360, 200],
browser_args=browser_args + [cba.DISABLE_GPU_COMPOSITING]),
PixelTestPage('pixel_offscreenCanvas_2d_commit_main.html',
base_name + '_OffscreenCanvasAccelerated2D',
test_rect=[0, 0, 360, 200],
browser_args=browser_args + accelerated_args),
PixelTestPage('pixel_offscreenCanvas_2d_commit_worker.html',
base_name + '_OffscreenCanvasAccelerated2DWorker',
test_rect=[0, 0, 360, 200],
browser_args=browser_args + accelerated_args),
PixelTestPage('pixel_offscreenCanvas_2d_commit_main.html',
base_name + '_OffscreenCanvasUnaccelerated2D',
test_rect=[0, 0, 360, 200],
browser_args=browser_args + unaccelerated_args),
PixelTestPage('pixel_offscreenCanvas_2d_commit_worker.html',
base_name + '_OffscreenCanvasUnaccelerated2DWorker',
test_rect=[0, 0, 360, 200],
browser_args=browser_args + unaccelerated_args),
PixelTestPage(
'pixel_offscreenCanvas_2d_commit_main.html',
base_name + '_OffscreenCanvasUnaccelerated2DGPUCompositing',
test_rect=[0, 0, 360, 200],
browser_args=browser_args + [cba.DISABLE_ACCELERATED_2D_CANVAS]),
PixelTestPage(
'pixel_offscreenCanvas_2d_commit_worker.html',
base_name + '_OffscreenCanvasUnaccelerated2DGPUCompositingWorker',
test_rect=[0, 0, 360, 200],
browser_args=browser_args + [cba.DISABLE_ACCELERATED_2D_CANVAS]),
PixelTestPage('pixel_offscreenCanvas_2d_resize_on_worker.html',
base_name + '_OffscreenCanvas2DResizeOnWorker',
test_rect=[0, 0, 200, 200],
browser_args=browser_args),
PixelTestPage('pixel_offscreenCanvas_webgl_resize_on_worker.html',
base_name + '_OffscreenCanvasWebglResizeOnWorker',
test_rect=[0, 0, 200, 200],
browser_args=browser_args),
PixelTestPage('pixel_canvas_display_srgb.html',
base_name + '_CanvasDisplaySRGBAccelerated2D',
test_rect=[0, 0, 140, 140],
browser_args=browser_args + accelerated_args),
PixelTestPage('pixel_canvas_display_srgb.html',
base_name + '_CanvasDisplaySRGBUnaccelerated2D',
test_rect=[0, 0, 140, 140],
browser_args=browser_args + unaccelerated_args),
PixelTestPage(
'pixel_canvas_display_srgb.html',
base_name + '_CanvasDisplaySRGBUnaccelerated2DGPUCompositing',
test_rect=[0, 0, 140, 140],
browser_args=browser_args + [cba.DISABLE_ACCELERATED_2D_CANVAS]),
]
@staticmethod
def LowLatencyPages(base_name):
unaccelerated_args = [
cba.DISABLE_ACCELERATED_2D_CANVAS,
cba.DISABLE_GPU_COMPOSITING,
]
return [
PixelTestPage('pixel_canvas_low_latency_2d.html',
base_name + '_CanvasLowLatency2D',
test_rect=[0, 0, 100, 100]),
PixelTestPage('pixel_canvas_low_latency_2d.html',
base_name + '_CanvasUnacceleratedLowLatency2D',
test_rect=[0, 0, 100, 100],
browser_args=unaccelerated_args),
PixelTestPage('pixel_canvas_low_latency_webgl.html',
base_name + '_CanvasLowLatencyWebGL',
test_rect=[0, 0, 200, 200]),
PixelTestPage('pixel_canvas_low_latency_webgl_alpha_false.html',
base_name + '_CanvasLowLatencyWebGLAlphaFalse',
test_rect=[0, 0, 200, 200]),
PixelTestPage('pixel_canvas_low_latency_2d_draw_image.html',
base_name + '_CanvasLowLatency2DDrawImage',
test_rect=[0, 0, 200, 100]),
PixelTestPage('pixel_canvas_low_latency_webgl_draw_image.html',
base_name + '_CanvasLowLatencyWebGLDrawImage',
test_rect=[0, 0, 200, 100]),
PixelTestPage('pixel_canvas_low_latency_2d_image_data.html',
base_name + '_CanvasLowLatency2DImageData',
test_rect=[0, 0, 200, 100]),
]
# Only add these tests on platforms where SwiftShader is enabled.
# Currently this is Windows and Linux.
@staticmethod
def SwiftShaderPages(base_name):
browser_args = [cba.DISABLE_GPU]
suffix = "_SwiftShader"
return [
PixelTestPage('pixel_canvas2d.html',
base_name + '_Canvas2DRedBox' + suffix,
test_rect=[0, 0, 300, 300],
browser_args=browser_args),
PixelTestPage('pixel_css3d.html',
base_name + '_CSS3DBlueBox' + suffix,
test_rect=[0, 0, 300, 300],
browser_args=browser_args),
PixelTestPage('pixel_webgl_aa_alpha.html',
base_name + '_WebGLGreenTriangle_AA_Alpha' + suffix,
test_rect=[0, 0, 300, 300],
browser_args=browser_args),
PixelTestPage('pixel_repeated_webgl_to_2d.html',
base_name + '_RepeatedWebGLTo2D' + suffix,
test_rect=[0, 0, 256, 256],
browser_args=browser_args),
]
# Test rendering where GPU process is blocked.
@staticmethod
def NoGpuProcessPages(base_name):
browser_args = [cba.DISABLE_GPU, cba.DISABLE_SOFTWARE_RASTERIZER]
suffix = "_NoGpuProcess"
return [
PixelTestPage(
'pixel_canvas2d.html',
base_name + '_Canvas2DRedBox' + suffix,
test_rect=[0, 0, 300, 300],
browser_args=browser_args,
gpu_process_disabled=True),
PixelTestPage(
'pixel_css3d.html',
base_name + '_CSS3DBlueBox' + suffix,
test_rect=[0, 0, 300, 300],
browser_args=browser_args,
gpu_process_disabled=True),
]
# Pages that should be run with various macOS specific command line
# arguments.
@staticmethod
def MacSpecificPages(base_name):
iosurface_2d_canvas_args = ['--enable-accelerated-2d-canvas']
non_chromium_image_args = ['--disable-webgl-image-chromium']
# This disables the Core Animation compositor, falling back to the
# old GLRenderer path, but continuing to allocate IOSurfaces for
# WebGL's back buffer.
no_overlays_args = ['--disable-mac-overlays']
# The filter effect tests produce images with lots of gradients and blurs
# which don't play nicely with Sobel filters, so a fuzzy algorithm instead
# of Sobel. The images are also relatively large (360k pixels), and large
# portions of the image are prone to noise, hence the large max different
# pixels value.
filter_effect_fuzzy_algo = algo.FuzzyMatchingAlgorithm(
max_different_pixels=57500, pixel_delta_threshold=10)
return [
# On macOS, test the IOSurface 2D Canvas compositing path.
PixelTestPage('pixel_canvas2d_accelerated.html',
base_name + '_IOSurface2DCanvas',
test_rect=[0, 0, 400, 400],
browser_args=iosurface_2d_canvas_args),
PixelTestPage('pixel_canvas2d_webgl.html',
base_name + '_IOSurface2DCanvasWebGL',
test_rect=[0, 0, 300, 300],
browser_args=iosurface_2d_canvas_args),
# On macOS, test WebGL non-Chromium Image compositing path.
PixelTestPage('pixel_webgl_aa_alpha.html',
base_name +
'_WebGLGreenTriangle_NonChromiumImage_AA_Alpha',
test_rect=[0, 0, 300, 300],
browser_args=non_chromium_image_args),
PixelTestPage('pixel_webgl_noaa_alpha.html',
base_name +
'_WebGLGreenTriangle_NonChromiumImage_NoAA_Alpha',
test_rect=[0, 0, 300, 300],
browser_args=non_chromium_image_args),
PixelTestPage('pixel_webgl_aa_noalpha.html',
base_name +
'_WebGLGreenTriangle_NonChromiumImage_AA_NoAlpha',
test_rect=[0, 0, 300, 300],
browser_args=non_chromium_image_args),
PixelTestPage('pixel_webgl_noaa_noalpha.html',
base_name +
'_WebGLGreenTriangle_NonChromiumImage_NoAA_NoAlpha',
test_rect=[0, 0, 300, 300],
browser_args=non_chromium_image_args),
# On macOS, test CSS filter effects with and without the CA compositor.
PixelTestPage('filter_effects.html',
base_name + '_CSSFilterEffects',
test_rect=[0, 0, 300, 300],
matching_algorithm=filter_effect_fuzzy_algo),
PixelTestPage('filter_effects.html',
base_name + '_CSSFilterEffects_NoOverlays',
test_rect=[0, 0, 300, 300],
tolerance=10,
browser_args=no_overlays_args,
matching_algorithm=filter_effect_fuzzy_algo),
# Test WebGL's premultipliedAlpha:false without the CA compositor.
PixelTestPage('pixel_webgl_premultiplied_alpha_false.html',
base_name + '_WebGL_PremultipliedAlpha_False_NoOverlays',
test_rect=[0, 0, 150, 150],
browser_args=no_overlays_args),
]
# Pages that should be run only on dual-GPU MacBook Pros (at the
# present time, anyway).
@staticmethod
def DualGPUMacSpecificPages(base_name):
return [
PixelTestPage('pixel_webgl_high_to_low_power.html',
base_name + '_WebGLHighToLowPower',
test_rect=[0, 0, 300, 300],
optional_action='RunTestWithHighPerformanceTab'),
PixelTestPage('pixel_webgl_low_to_high_power.html',
base_name + '_WebGLLowToHighPower',
test_rect=[0, 0, 300, 300],
optional_action='RunLowToHighPowerTest'),
PixelTestPage('pixel_webgl_low_to_high_power_alpha_false.html',
base_name + '_WebGLLowToHighPowerAlphaFalse',
test_rect=[0, 0, 300, 300],
optional_action='RunLowToHighPowerTest'),
PixelTestPage(
'pixel_offscreen_canvas_ibrc_webgl_main.html',
base_name + '_OffscreenCanvasIBRCWebGLHighPerfMain',
test_rect=[0, 0, 300, 300],
optional_action='RunOffscreenCanvasIBRCWebGLHighPerfTest'),
PixelTestPage(
'pixel_offscreen_canvas_ibrc_webgl_worker.html',
base_name + '_OffscreenCanvasIBRCWebGLHighPerfWorker',
test_rect=[0, 0, 300, 300],
optional_action='RunOffscreenCanvasIBRCWebGLHighPerfTest'),
]
@staticmethod
def DirectCompositionPages(base_name):
browser_args = [
'--enable-direct-composition-video-overlays',
# All bots are connected with a power source, however, we want to to
# test with the code path that's enabled with battery power.
cba.DISABLE_VP_SCALING,
]
browser_args_YUY2 = browser_args + [
'--disable-features=DirectCompositionPreferNV12Overlays'
]
browser_args_DXVA = browser_args + [
cba.DISABLE_FEATURES_D3D11_VIDEO_DECODER
]
# Most tests fall roughly into 3 tiers of noisiness.
# Parameter values were determined using the automated optimization script,
# and similar values combined into a single set using the most permissive
# value for each parameter in that tier.
strict_dc_sobel_algorithm = algo.SobelMatchingAlgorithm(
max_different_pixels=1000,
pixel_delta_threshold=5,
edge_threshold=250,
ignored_border_thickness=1)
permissive_dc_sobel_algorithm = algo.SobelMatchingAlgorithm(
max_different_pixels=16800,
pixel_delta_threshold=20,
edge_threshold=30,
ignored_border_thickness=1)
very_permissive_dc_sobel_algorithm = algo.SobelMatchingAlgorithm(
max_different_pixels=30400,
pixel_delta_threshold=45,
edge_threshold=10,
ignored_border_thickness=1,
)
return [
PixelTestPage('pixel_video_mp4.html',
base_name + '_DirectComposition_Video_MP4',
test_rect=[0, 0, 240, 135],
browser_args=browser_args,
matching_algorithm=permissive_dc_sobel_algorithm),
PixelTestPage('pixel_video_mp4.html',
base_name + '_DirectComposition_Video_MP4_DXVA',
browser_args=browser_args_DXVA,
test_rect=[0, 0, 240, 135],
matching_algorithm=permissive_dc_sobel_algorithm),
PixelTestPage('pixel_video_mp4_fullsize.html',
base_name + '_DirectComposition_Video_MP4_Fullsize',
browser_args=browser_args,
test_rect=[0, 0, 960, 540],
other_args={'zero_copy': True},
matching_algorithm=strict_dc_sobel_algorithm),
PixelTestPage('pixel_video_mp4.html',
base_name + '_DirectComposition_Video_MP4_YUY2',
test_rect=[0, 0, 240, 135],
browser_args=browser_args_YUY2,
other_args={'expect_yuy2': True},
matching_algorithm=permissive_dc_sobel_algorithm),
PixelTestPage('pixel_video_mp4_four_colors_aspect_4x3.html',
base_name +
'_DirectComposition_Video_MP4_FourColors_Aspect_4x3',
test_rect=[0, 0, 240, 135],
browser_args=browser_args,
matching_algorithm=permissive_dc_sobel_algorithm),
PixelTestPage('pixel_video_mp4_four_colors_rot_90.html',
base_name +
'_DirectComposition_Video_MP4_FourColors_Rot_90',
test_rect=[0, 0, 270, 240],
browser_args=browser_args,
other_args={'video_is_rotated': True},
matching_algorithm=strict_dc_sobel_algorithm),
PixelTestPage('pixel_video_mp4_four_colors_rot_180.html',
base_name +
'_DirectComposition_Video_MP4_FourColors_Rot_180',
test_rect=[0, 0, 240, 135],
browser_args=browser_args,
other_args={'video_is_rotated': True},
matching_algorithm=strict_dc_sobel_algorithm),
PixelTestPage('pixel_video_mp4_four_colors_rot_270.html',
base_name +
'_DirectComposition_Video_MP4_FourColors_Rot_270',
test_rect=[0, 0, 270, 240],
browser_args=browser_args,
other_args={'video_is_rotated': True},
matching_algorithm=strict_dc_sobel_algorithm),
PixelTestPage('pixel_video_vp9.html',
base_name + '_DirectComposition_Video_VP9',
test_rect=[0, 0, 240, 135],
browser_args=browser_args,
matching_algorithm=very_permissive_dc_sobel_algorithm),
PixelTestPage('pixel_video_vp9.html',
base_name + '_DirectComposition_Video_VP9_DXVA',
browser_args=browser_args_DXVA,
test_rect=[0, 0, 240, 135],
matching_algorithm=very_permissive_dc_sobel_algorithm),
PixelTestPage(
'pixel_video_vp9_fullsize.html',
base_name + '_DirectComposition_Video_VP9_Fullsize',
test_rect=[0, 0, 960, 540],
browser_args=browser_args,
other_args={'zero_copy': True},
# Much larger image than other VP9 tests.
matching_algorithm=algo.SobelMatchingAlgorithm(
max_different_pixels=504000,
pixel_delta_threshold=10,
edge_threshold=10,
ignored_border_thickness=1,
)),
PixelTestPage('pixel_video_vp9.html',
base_name + '_DirectComposition_Video_VP9_YUY2',
test_rect=[0, 0, 240, 135],
browser_args=browser_args_YUY2,
other_args={'expect_yuy2': True},
matching_algorithm=very_permissive_dc_sobel_algorithm),
PixelTestPage('pixel_video_vp9_i420a.html',
base_name + '_DirectComposition_Video_VP9_I420A',
test_rect=[0, 0, 240, 135],
browser_args=browser_args,
other_args={'no_overlay': True},
matching_algorithm=strict_dc_sobel_algorithm),
PixelTestPage('pixel_video_underlay.html',
base_name + '_DirectComposition_Underlay',
test_rect=[0, 0, 240, 136],
browser_args=browser_args,
matching_algorithm=permissive_dc_sobel_algorithm),
PixelTestPage('pixel_video_underlay.html',
base_name + '_DirectComposition_Underlay_DXVA',
test_rect=[0, 0, 240, 136],
browser_args=browser_args_DXVA,
matching_algorithm=permissive_dc_sobel_algorithm),
PixelTestPage('pixel_video_underlay_fullsize.html',
base_name + '_DirectComposition_Underlay_Fullsize',
test_rect=[0, 0, 960, 540],
browser_args=browser_args,
other_args={'zero_copy': True},
matching_algorithm=strict_dc_sobel_algorithm),
PixelTestPage('pixel_video_mp4_rounded_corner.html',
base_name + '_DirectComposition_Video_MP4_Rounded_Corner',
test_rect=[0, 0, 240, 135],
browser_args=browser_args,
other_args={'no_overlay': True}),
PixelTestPage('pixel_video_backdrop_filter.html',
base_name + '_DirectComposition_Video_BackdropFilter',
test_rect=[0, 0, 240, 135],
browser_args=browser_args,
other_args={'no_overlay': True}),
PixelTestPage(
'pixel_video_mp4.html',
base_name + '_DirectComposition_Video_Disable_Overlays',
test_rect=[0, 0, 240, 135],
browser_args=['--disable-direct-composition-video-overlays'],
other_args={'no_overlay': True},
matching_algorithm=very_permissive_dc_sobel_algorithm),
]
@staticmethod
def HdrTestPages(base_name):
return [
PixelTestPage(
'pixel_canvas2d.html',
base_name + '_Canvas2DRedBoxScrgbLinear',
test_rect=[0, 0, 300, 300],
browser_args=['--force-color-profile=scrgb-linear']),
PixelTestPage(
'pixel_canvas2d.html',
base_name + '_Canvas2DRedBoxHdr10',
test_rect=[0, 0, 300, 300],
browser_args=['--force-color-profile=hdr10']),
]
|
nilq/baby-python
|
python
|
import plotly.plotly as py
import plotly.graph_objs as go
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#transform to [[x,y,z]...] [[x...],[y...],[z...]]
#plot
def plot_sample_list(sample_list,lim_val=10):
sample_mat = np.array(sample_list)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(sample_mat[:,0], sample_mat[:,1], sample_mat[:,2], c='r', marker='o')
ax.set_xlim(-lim_val, lim_val)
ax.set_ylim(-lim_val, lim_val)
ax.set_zlim(-lim_val, lim_val)
plt.show()
Axes3D.plot()
|
nilq/baby-python
|
python
|
### ---------------------------------------------------------------------------
from .core.startup import initialize
from .sublime import commands, events
__version_tuple = (1, 0, 0)
__version__ = ".".join([str(num) for num in __version_tuple])
### ---------------------------------------------------------------------------
__all__ = [
"initialize",
"commands",
"events",
"version"
]
### ---------------------------------------------------------------------------
def version():
"""
Get the version of the installed dependency package as a tuple. This is
used during the bootstrap check to see if the version of the dependency has
changed.
"""
return __version_tuple
### ---------------------------------------------------------------------------
|
nilq/baby-python
|
python
|
# coding:utf-8
import redis
SCRIPT_PUSH = '''
local q = KEYS[1]
local q_set = KEYS[1] .. "_set"
local v = redis.call("SADD", q_set, ARGV[1])
if v == 1
then
return redis.call("RPUSH", q, ARGV[1]) and 1
else
return 0
end
'''
SCRIPT_POP = '''
local q = KEYS[1]
local q_set = KEYS[1] .. "_set"
local v = redis.call("LPOP", q)
if v then
redis.call("SREM", q_set, v)
end
return v
'''
r = redis.Redis()
queue_push = r.register_script(SCRIPT_PUSH)
queue_pop = r.register_script(SCRIPT_POP)
if __name__ == "__main__":
mq_key = "ct:kafka:mq"
count = 10
for i in range(count):
for i in range(count):
queue_push(keys=[mq_key], args=['Test_key_{}'.format(i)])
for i in range(count + 10):
print queue_pop(keys=[mq_key])
|
nilq/baby-python
|
python
|
import os
import datetime
os.system("powercfg /batteryreport")
d = datetime.datetime.now()
try:
os.rename('battery-report.html', f'battery-health/reports/battery-report-{d.month}-{d.day}-{d.year}.html')
os.startfile(f'battery-health\\reports\\battery-report-{d.month}-{d.day}-{d.year}.html')
except WindowsError:
print(' > Data already collected today')
files = os.listdir('battery-health/reports')
for i in range(len(files)):
f = open(f'battery-health/reports/{files[i]}', 'r')
lines = f.readlines()
for line in lines:
if 'FULL CHARGE CAPACITY' in line and 'mWh' in line:
l = line.split('<td>')
l = l[2].replace('\n', '')
name = files[i].split('-')
files[i] = f'{name[2]}/{name[3]}/{name[4][:4]}: {l}\n'
log = open(f'battery-health/log.txt', 'w')
log.writelines(files)
log.close()
print(' > Done')
|
nilq/baby-python
|
python
|
import aspose.words as aw
from docs_examples_base import DocsExamplesBase, MY_DIR, ARTIFACTS_DIR
class WorkingWithPclSaveOptions(DocsExamplesBase):
def test_rasterize_transformed_elements(self):
#ExStart:RasterizeTransformedElements
doc = aw.Document(MY_DIR + "Rendering.docx")
save_options = aw.saving.PclSaveOptions()
save_options.save_format = aw.SaveFormat.PCL
save_options.rasterize_transformed_elements = False
doc.save(ARTIFACTS_DIR + "WorkingWithPclSaveOptions.rasterize_transformed_elements.pcl", save_options)
#ExEnd:RasterizeTransformedElements
|
nilq/baby-python
|
python
|
# MINLP written by GAMS Convert at 04/21/18 13:54:00
#
# Equation counts
# Total E G L N X C B
# 750 619 34 97 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 785 725 60 0 0 0 0 0
# FX 10 10 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 2445 2433 12 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x10 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x11 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x13 = Var(within=Reals,bounds=(0.1,39.6),initialize=1)
m.x14 = Var(within=Reals,bounds=(0.1,39.6),initialize=1)
m.x15 = Var(within=Reals,bounds=(0.1,39.6),initialize=1)
m.x16 = Var(within=Reals,bounds=(0.1,39.6),initialize=1)
m.x17 = Var(within=Reals,bounds=(0.1,39.6),initialize=1)
m.x18 = Var(within=Reals,bounds=(0.1,39.6),initialize=1)
m.x19 = Var(within=Reals,bounds=(0.1,49.5),initialize=1)
m.x20 = Var(within=Reals,bounds=(0.1,49.5),initialize=1)
m.x21 = Var(within=Reals,bounds=(0.1,49.5),initialize=1)
m.x22 = Var(within=Reals,bounds=(0.1,49.5),initialize=1)
m.x23 = Var(within=Reals,bounds=(0.1,49.5),initialize=1)
m.x24 = Var(within=Reals,bounds=(0.1,49.5),initialize=1)
m.x25 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x46 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x47 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x48 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x49 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x50 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x51 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x55 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x56 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x57 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x58 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x59 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x60 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x61 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x62 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x63 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x64 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x65 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x66 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x67 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x68 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x69 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x70 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x71 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x72 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x73 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x74 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x75 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x76 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x77 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x78 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x79 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x80 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x81 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x82 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x83 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x84 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x85 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x86 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x87 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x88 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x89 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x90 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x91 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x92 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x93 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x94 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x95 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x96 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x97 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x98 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x99 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x100 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x101 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x102 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x103 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x104 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x105 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x106 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x107 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x108 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x109 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x110 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x111 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x112 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x113 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x114 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x115 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x116 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x117 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x118 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x119 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x120 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x121 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x122 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x123 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x124 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x125 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x126 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x127 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x128 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x129 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x130 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x131 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x132 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x133 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x134 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x135 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x136 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x137 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x138 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x139 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x140 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x141 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x142 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x143 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x144 = Var(within=Reals,bounds=(0,85),initialize=0)
m.x145 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x146 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x147 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x148 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x149 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x150 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x151 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x152 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x153 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x154 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x155 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x156 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x157 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x158 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x159 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x160 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x161 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x162 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x163 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x164 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x165 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x166 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x167 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x168 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x169 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x170 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x171 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x172 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x173 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x174 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x175 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x176 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x177 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x178 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x179 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x180 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x181 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x182 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x183 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x184 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x185 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x186 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x187 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x188 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x189 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x190 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x191 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x192 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x193 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x194 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x195 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x196 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x197 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x198 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x199 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x200 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x201 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x202 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x203 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x204 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x205 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x206 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x207 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x208 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x209 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x210 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x211 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x212 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x213 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x214 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x215 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x216 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x217 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x218 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x219 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x220 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x221 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x222 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x223 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x224 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x225 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x226 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x227 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x228 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x229 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x230 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x231 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x232 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x233 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x234 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x235 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x236 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x237 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x238 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x239 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x240 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x241 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x242 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x243 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x244 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x245 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x246 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x247 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x248 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x249 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x250 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x251 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x252 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x253 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x254 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x255 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x256 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x257 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x258 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x259 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x260 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x261 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x262 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x263 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x264 = Var(within=Reals,bounds=(0,35),initialize=0)
m.x265 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x266 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x267 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x268 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x269 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x270 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x271 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x272 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x273 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x274 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x275 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x276 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x277 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x278 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x279 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x280 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x281 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x282 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x283 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x284 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x285 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x286 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x287 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x288 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x289 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x290 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x291 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x292 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x293 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x294 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x295 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x296 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x297 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x298 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x299 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x300 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x301 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x302 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x303 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x304 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x305 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x306 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x307 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x308 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x309 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x310 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x311 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x312 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x313 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x314 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x315 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x316 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x317 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x318 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x319 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x320 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x321 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x322 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x323 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x324 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x325 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x326 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x327 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x328 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x329 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x330 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x331 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x332 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x333 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x334 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x335 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x336 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x337 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x338 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x339 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x340 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x341 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x342 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x343 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x344 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x345 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x346 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x347 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x348 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x349 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x350 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x351 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x352 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x353 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x354 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x355 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x356 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x357 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x358 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x359 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x360 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x361 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x362 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x363 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x364 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x365 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x366 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x367 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x368 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x369 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x370 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x371 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x372 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x373 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x374 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x375 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x376 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x377 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x378 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x379 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x380 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x381 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x382 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x383 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x384 = Var(within=Reals,bounds=(0,65),initialize=0)
m.x385 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x386 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x387 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x388 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x389 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x390 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x391 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x392 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x393 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x394 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x395 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x396 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x397 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x398 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x399 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x400 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x401 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x402 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x403 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x404 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x405 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x406 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x407 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x408 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x409 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x410 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x411 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x412 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x413 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x414 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x415 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x416 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x417 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x418 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x419 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x420 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x421 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x422 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x423 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x424 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x425 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x426 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x427 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x428 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x429 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x430 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x431 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x432 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x433 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x434 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x435 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x436 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x437 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x438 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x439 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x440 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x441 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x442 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x443 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x444 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x445 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x446 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x447 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x448 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x449 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x450 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x451 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x452 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x453 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x454 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x455 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x456 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x457 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x458 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x459 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x460 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x461 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x462 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x463 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x464 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x465 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x466 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x467 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x468 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x469 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x470 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x471 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x472 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x473 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x474 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x475 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x476 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x477 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x478 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x479 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x480 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x481 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x482 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x483 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x484 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x485 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x486 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x487 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x488 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x489 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x490 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x491 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x492 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x493 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x494 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x495 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x496 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x497 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x498 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x499 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x500 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x501 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x502 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x503 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x504 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x505 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x506 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x507 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x508 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x509 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x510 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x511 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x512 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x513 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x514 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x515 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x516 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x517 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x518 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x519 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x520 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x521 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x522 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x523 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x524 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x525 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x526 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x527 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x528 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x529 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x530 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x531 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x532 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x533 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x534 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x535 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x536 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x537 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x538 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x539 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x540 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x541 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x542 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x543 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x544 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x545 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x546 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x547 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x548 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x549 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x550 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x551 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x552 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x553 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x554 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x555 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x556 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x557 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x558 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x559 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x560 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x561 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x562 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x563 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x564 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x565 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x566 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x567 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x568 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x569 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x570 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x571 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x572 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x573 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x574 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x575 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x576 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x577 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x578 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x579 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x580 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x581 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x582 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x583 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x584 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x585 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x586 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x587 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x588 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x589 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x590 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x591 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x592 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x593 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x594 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x595 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x596 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x597 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x598 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x599 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x600 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x601 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x602 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x603 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x604 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x605 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x606 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x607 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x608 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x609 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x610 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x611 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x612 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x613 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x614 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x615 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x616 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x617 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x618 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x619 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x620 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x621 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x622 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x623 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x624 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x626 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x627 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x628 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x629 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x630 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x631 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x632 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x633 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x634 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x635 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x636 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x637 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x638 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x639 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x640 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x641 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x642 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x643 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x644 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x645 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x646 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x647 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x648 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x649 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x650 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x651 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x652 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x653 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x654 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x655 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x656 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x657 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x658 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x659 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x660 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x661 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x662 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x663 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x664 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x665 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x666 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x667 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x668 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x669 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x670 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x671 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x672 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x673 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x674 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x675 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x676 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x677 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x678 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x679 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x680 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x681 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x682 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x683 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x684 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x685 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x686 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x687 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x688 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x689 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x690 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x691 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x692 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x693 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x694 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x695 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x696 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x697 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x698 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x699 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x700 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x701 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x702 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x703 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x704 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x705 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x706 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x707 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x708 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x709 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x710 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x711 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x712 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x713 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x714 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x715 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x716 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x717 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x718 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x719 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x720 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x721 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x722 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x723 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x724 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x725 = Var(within=Reals,bounds=(0,None),initialize=0)
m.b726 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b727 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b728 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b729 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b730 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b731 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b732 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b733 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b734 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b735 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b736 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b737 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b738 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b739 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b740 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b741 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b742 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b743 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b744 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b745 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b746 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b747 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b748 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b749 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b750 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b751 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b752 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b753 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b754 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b755 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b756 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b757 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b758 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b759 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b760 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b761 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b762 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b763 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b764 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b765 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b766 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b767 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b768 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b769 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b770 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b771 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b772 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b773 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b774 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b775 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b776 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b777 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b778 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b779 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b780 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b781 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b782 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b783 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b784 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b785 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr=0.1*((2.85714285714286 + 1.42857142857143*log((40 - m.x13)/m.x13))*m.x13 - 0.17779*m.x13 + (
2.85714285714286 + 1.42857142857143*log((40 - m.x14)/m.x14))*m.x14 - 0.17779*m.x14 + (
2.85714285714286 + 1.42857142857143*log((40 - m.x15)/m.x15))*m.x15 - 0.17779*m.x15 + (
2.85714285714286 + 1.42857142857143*log((40 - m.x16)/m.x16))*m.x16 - 0.17779*m.x16 + (
2.85714285714286 + 1.42857142857143*log((40 - m.x17)/m.x17))*m.x17 - 0.17779*m.x17 + (
2.85714285714286 + 1.42857142857143*log((40 - m.x18)/m.x18))*m.x18 - 0.17779*m.x18 + (3 + log((50
- m.x19)/m.x19))*m.x19 - 0.1063176*m.x19 + (3 + log((50 - m.x20)/m.x20))*m.x20 - 0.1063176*m.x20
+ (3 + log((50 - m.x21)/m.x21))*m.x21 - 0.1063176*m.x21 + (3 + log((50 - m.x22)/m.x22))*m.x22 -
0.1063176*m.x22 + (3 + log((50 - m.x23)/m.x23))*m.x23 - 0.1063176*m.x23 + (3 + log((50 - m.x24)/
m.x24))*m.x24 - 0.1063176*m.x24) + 0.1*((2.85714285714286 + 1.42857142857143*log((40 - m.x13)/
m.x13))*m.x13 - 0.1699941*m.x13 + (2.85714285714286 + 1.42857142857143*log((40 - m.x14)/m.x14))*
m.x14 - 0.1699941*m.x14 + (2.85714285714286 + 1.42857142857143*log((40 - m.x15)/m.x15))*m.x15 -
0.1699941*m.x15 + (2.85714285714286 + 1.42857142857143*log((40 - m.x16)/m.x16))*m.x16 - 0.1699941
*m.x16 + (2.85714285714286 + 1.42857142857143*log((40 - m.x17)/m.x17))*m.x17 - 0.1699941*m.x17 +
(2.85714285714286 + 1.42857142857143*log((40 - m.x18)/m.x18))*m.x18 - 0.1699941*m.x18 + (3 + log(
(50 - m.x19)/m.x19))*m.x19 + 0.510392*m.x19 + (3 + log((50 - m.x20)/m.x20))*m.x20 + 0.510392*
m.x20 + (3 + log((50 - m.x21)/m.x21))*m.x21 + 0.510392*m.x21 + (3 + log((50 - m.x22)/m.x22))*
m.x22 + 0.510392*m.x22 + (3 + log((50 - m.x23)/m.x23))*m.x23 + 0.510392*m.x23 + (3 + log((50 -
m.x24)/m.x24))*m.x24 + 0.510392*m.x24) + 0.1*((2.85714285714286 + 1.42857142857143*log((40 -
m.x13)/m.x13))*m.x13 - 1.3723019*m.x13 + (2.85714285714286 + 1.42857142857143*log((40 - m.x14)/
m.x14))*m.x14 - 1.3723019*m.x14 + (2.85714285714286 + 1.42857142857143*log((40 - m.x15)/m.x15))*
m.x15 - 1.3723019*m.x15 + (2.85714285714286 + 1.42857142857143*log((40 - m.x16)/m.x16))*m.x16 -
1.3723019*m.x16 + (2.85714285714286 + 1.42857142857143*log((40 - m.x17)/m.x17))*m.x17 - 1.3723019
*m.x17 + (2.85714285714286 + 1.42857142857143*log((40 - m.x18)/m.x18))*m.x18 - 1.3723019*m.x18 +
(3 + log((50 - m.x19)/m.x19))*m.x19 + 3.411928*m.x19 + (3 + log((50 - m.x20)/m.x20))*m.x20 +
3.411928*m.x20 + (3 + log((50 - m.x21)/m.x21))*m.x21 + 3.411928*m.x21 + (3 + log((50 - m.x22)/
m.x22))*m.x22 + 3.411928*m.x22 + (3 + log((50 - m.x23)/m.x23))*m.x23 + 3.411928*m.x23 + (3 + log(
(50 - m.x24)/m.x24))*m.x24 + 3.411928*m.x24) + 0.1*((2.85714285714286 + 1.42857142857143*log((40
- m.x13)/m.x13))*m.x13 - 0.1737872*m.x13 + (2.85714285714286 + 1.42857142857143*log((40 - m.x14)
/m.x14))*m.x14 - 0.1737872*m.x14 + (2.85714285714286 + 1.42857142857143*log((40 - m.x15)/m.x15))*
m.x15 - 0.1737872*m.x15 + (2.85714285714286 + 1.42857142857143*log((40 - m.x16)/m.x16))*m.x16 -
0.1737872*m.x16 + (2.85714285714286 + 1.42857142857143*log((40 - m.x17)/m.x17))*m.x17 - 0.1737872
*m.x17 + (2.85714285714286 + 1.42857142857143*log((40 - m.x18)/m.x18))*m.x18 - 0.1737872*m.x18 +
(3 + log((50 - m.x19)/m.x19))*m.x19 + 2.0030265*m.x19 + (3 + log((50 - m.x20)/m.x20))*m.x20 +
2.0030265*m.x20 + (3 + log((50 - m.x21)/m.x21))*m.x21 + 2.0030265*m.x21 + (3 + log((50 - m.x22)/
m.x22))*m.x22 + 2.0030265*m.x22 + (3 + log((50 - m.x23)/m.x23))*m.x23 + 2.0030265*m.x23 + (3 +
log((50 - m.x24)/m.x24))*m.x24 + 2.0030265*m.x24) + 0.1*((2.85714285714286 + 1.42857142857143*
log((40 - m.x13)/m.x13))*m.x13 + 0.8502323*m.x13 + (2.85714285714286 + 1.42857142857143*log((40
- m.x14)/m.x14))*m.x14 + 0.8502323*m.x14 + (2.85714285714286 + 1.42857142857143*log((40 - m.x15)
/m.x15))*m.x15 + 0.8502323*m.x15 + (2.85714285714286 + 1.42857142857143*log((40 - m.x16)/m.x16))*
m.x16 + 0.8502323*m.x16 + (2.85714285714286 + 1.42857142857143*log((40 - m.x17)/m.x17))*m.x17 +
0.8502323*m.x17 + (2.85714285714286 + 1.42857142857143*log((40 - m.x18)/m.x18))*m.x18 + 0.8502323
*m.x18 + (3 + log((50 - m.x19)/m.x19))*m.x19 - 0.9911669*m.x19 + (3 + log((50 - m.x20)/m.x20))*
m.x20 - 0.9911669*m.x20 + (3 + log((50 - m.x21)/m.x21))*m.x21 - 0.9911669*m.x21 + (3 + log((50 -
m.x22)/m.x22))*m.x22 - 0.9911669*m.x22 + (3 + log((50 - m.x23)/m.x23))*m.x23 - 0.9911669*m.x23 +
(3 + log((50 - m.x24)/m.x24))*m.x24 - 0.9911669*m.x24) + 0.1*((2.85714285714286 +
1.42857142857143*log((40 - m.x13)/m.x13))*m.x13 + 0.6976087*m.x13 + (2.85714285714286 +
1.42857142857143*log((40 - m.x14)/m.x14))*m.x14 + 0.6976087*m.x14 + (2.85714285714286 +
1.42857142857143*log((40 - m.x15)/m.x15))*m.x15 + 0.6976087*m.x15 + (2.85714285714286 +
1.42857142857143*log((40 - m.x16)/m.x16))*m.x16 + 0.6976087*m.x16 + (2.85714285714286 +
1.42857142857143*log((40 - m.x17)/m.x17))*m.x17 + 0.6976087*m.x17 + (2.85714285714286 +
1.42857142857143*log((40 - m.x18)/m.x18))*m.x18 + 0.6976087*m.x18 + (3 + log((50 - m.x19)/m.x19))
*m.x19 + 0.7111006*m.x19 + (3 + log((50 - m.x20)/m.x20))*m.x20 + 0.7111006*m.x20 + (3 + log((50
- m.x21)/m.x21))*m.x21 + 0.7111006*m.x21 + (3 + log((50 - m.x22)/m.x22))*m.x22 + 0.7111006*m.x22
+ (3 + log((50 - m.x23)/m.x23))*m.x23 + 0.7111006*m.x23 + (3 + log((50 - m.x24)/m.x24))*m.x24 +
0.7111006*m.x24) + 0.1*((2.85714285714286 + 1.42857142857143*log((40 - m.x13)/m.x13))*m.x13 +
0.5499974*m.x13 + (2.85714285714286 + 1.42857142857143*log((40 - m.x14)/m.x14))*m.x14 + 0.5499974
*m.x14 + (2.85714285714286 + 1.42857142857143*log((40 - m.x15)/m.x15))*m.x15 + 0.5499974*m.x15 +
(2.85714285714286 + 1.42857142857143*log((40 - m.x16)/m.x16))*m.x16 + 0.5499974*m.x16 + (
2.85714285714286 + 1.42857142857143*log((40 - m.x17)/m.x17))*m.x17 + 0.5499974*m.x17 + (
2.85714285714286 + 1.42857142857143*log((40 - m.x18)/m.x18))*m.x18 + 0.5499974*m.x18 + (3 + log((
50 - m.x19)/m.x19))*m.x19 - 2.2692161*m.x19 + (3 + log((50 - m.x20)/m.x20))*m.x20 - 2.2692161*
m.x20 + (3 + log((50 - m.x21)/m.x21))*m.x21 - 2.2692161*m.x21 + (3 + log((50 - m.x22)/m.x22))*
m.x22 - 2.2692161*m.x22 + (3 + log((50 - m.x23)/m.x23))*m.x23 - 2.2692161*m.x23 + (3 + log((50 -
m.x24)/m.x24))*m.x24 - 2.2692161*m.x24) + 0.1*((2.85714285714286 + 1.42857142857143*log((40 -
m.x13)/m.x13))*m.x13 - 0.402732*m.x13 + (2.85714285714286 + 1.42857142857143*log((40 - m.x14)/
m.x14))*m.x14 - 0.402732*m.x14 + (2.85714285714286 + 1.42857142857143*log((40 - m.x15)/m.x15))*
m.x15 - 0.402732*m.x15 + (2.85714285714286 + 1.42857142857143*log((40 - m.x16)/m.x16))*m.x16 -
0.402732*m.x16 + (2.85714285714286 + 1.42857142857143*log((40 - m.x17)/m.x17))*m.x17 - 0.402732*
m.x17 + (2.85714285714286 + 1.42857142857143*log((40 - m.x18)/m.x18))*m.x18 - 0.402732*m.x18 + (3
+ log((50 - m.x19)/m.x19))*m.x19 + 1.7564073*m.x19 + (3 + log((50 - m.x20)/m.x20))*m.x20 +
1.7564073*m.x20 + (3 + log((50 - m.x21)/m.x21))*m.x21 + 1.7564073*m.x21 + (3 + log((50 - m.x22)/
m.x22))*m.x22 + 1.7564073*m.x22 + (3 + log((50 - m.x23)/m.x23))*m.x23 + 1.7564073*m.x23 + (3 +
log((50 - m.x24)/m.x24))*m.x24 + 1.7564073*m.x24) + 0.1*((2.85714285714286 + 1.42857142857143*
log((40 - m.x13)/m.x13))*m.x13 - 0.1915938*m.x13 + (2.85714285714286 + 1.42857142857143*log((40
- m.x14)/m.x14))*m.x14 - 0.1915938*m.x14 + (2.85714285714286 + 1.42857142857143*log((40 - m.x15)
/m.x15))*m.x15 - 0.1915938*m.x15 + (2.85714285714286 + 1.42857142857143*log((40 - m.x16)/m.x16))*
m.x16 - 0.1915938*m.x16 + (2.85714285714286 + 1.42857142857143*log((40 - m.x17)/m.x17))*m.x17 -
0.1915938*m.x17 + (2.85714285714286 + 1.42857142857143*log((40 - m.x18)/m.x18))*m.x18 - 0.1915938
*m.x18 + (3 + log((50 - m.x19)/m.x19))*m.x19 + 1.9458335*m.x19 + (3 + log((50 - m.x20)/m.x20))*
m.x20 + 1.9458335*m.x20 + (3 + log((50 - m.x21)/m.x21))*m.x21 + 1.9458335*m.x21 + (3 + log((50 -
m.x22)/m.x22))*m.x22 + 1.9458335*m.x22 + (3 + log((50 - m.x23)/m.x23))*m.x23 + 1.9458335*m.x23 +
(3 + log((50 - m.x24)/m.x24))*m.x24 + 1.9458335*m.x24) + 0.1*((2.85714285714286 +
1.42857142857143*log((40 - m.x13)/m.x13))*m.x13 - 1.1945279*m.x13 + (2.85714285714286 +
1.42857142857143*log((40 - m.x14)/m.x14))*m.x14 - 1.1945279*m.x14 + (2.85714285714286 +
1.42857142857143*log((40 - m.x15)/m.x15))*m.x15 - 1.1945279*m.x15 + (2.85714285714286 +
1.42857142857143*log((40 - m.x16)/m.x16))*m.x16 - 1.1945279*m.x16 + (2.85714285714286 +
1.42857142857143*log((40 - m.x17)/m.x17))*m.x17 - 1.1945279*m.x17 + (2.85714285714286 +
1.42857142857143*log((40 - m.x18)/m.x18))*m.x18 - 1.1945279*m.x18 + (3 + log((50 - m.x19)/m.x19))
*m.x19 + 4.2422342*m.x19 + (3 + log((50 - m.x20)/m.x20))*m.x20 + 4.2422342*m.x20 + (3 + log((50
- m.x21)/m.x21))*m.x21 + 4.2422342*m.x21 + (3 + log((50 - m.x22)/m.x22))*m.x22 + 4.2422342*m.x22
+ (3 + log((50 - m.x23)/m.x23))*m.x23 + 4.2422342*m.x23 + (3 + log((50 - m.x24)/m.x24))*m.x24 +
4.2422342*m.x24) - 0.026*m.x85 - 0.026*m.x86 - 0.026*m.x87 - 0.026*m.x88 - 0.026*m.x89
- 0.026*m.x90 - 0.026*m.x91 - 0.026*m.x92 - 0.026*m.x93 - 0.026*m.x94 - 0.026*m.x95
- 0.026*m.x96 - 0.026*m.x97 - 0.026*m.x98 - 0.026*m.x99 - 0.026*m.x100 - 0.026*m.x101
- 0.026*m.x102 - 0.026*m.x103 - 0.026*m.x104 - 0.026*m.x105 - 0.026*m.x106 - 0.026*m.x107
- 0.026*m.x108 - 0.026*m.x109 - 0.026*m.x110 - 0.026*m.x111 - 0.026*m.x112 - 0.026*m.x113
- 0.026*m.x114 - 0.014*m.x115 - 0.014*m.x116 - 0.014*m.x117 - 0.014*m.x118 - 0.014*m.x119
- 0.014*m.x120 - 0.014*m.x121 - 0.014*m.x122 - 0.014*m.x123 - 0.014*m.x124 - 0.014*m.x125
- 0.014*m.x126 - 0.014*m.x127 - 0.014*m.x128 - 0.014*m.x129 - 0.014*m.x130 - 0.014*m.x131
- 0.014*m.x132 - 0.014*m.x133 - 0.014*m.x134 - 0.014*m.x135 - 0.014*m.x136 - 0.014*m.x137
- 0.014*m.x138 - 0.014*m.x139 - 0.014*m.x140 - 0.014*m.x141 - 0.014*m.x142 - 0.014*m.x143
- 0.014*m.x144 - 0.016*m.x205 - 0.016*m.x206 - 0.016*m.x207 - 0.016*m.x208 - 0.016*m.x209
- 0.016*m.x210 - 0.016*m.x211 - 0.016*m.x212 - 0.016*m.x213 - 0.016*m.x214 - 0.016*m.x215
- 0.016*m.x216 - 0.016*m.x217 - 0.016*m.x218 - 0.016*m.x219 - 0.016*m.x220 - 0.016*m.x221
- 0.016*m.x222 - 0.016*m.x223 - 0.016*m.x224 - 0.016*m.x225 - 0.016*m.x226 - 0.016*m.x227
- 0.016*m.x228 - 0.016*m.x229 - 0.016*m.x230 - 0.016*m.x231 - 0.016*m.x232 - 0.016*m.x233
- 0.016*m.x234 - 0.013*m.x235 - 0.013*m.x236 - 0.013*m.x237 - 0.013*m.x238 - 0.013*m.x239
- 0.013*m.x240 - 0.013*m.x241 - 0.013*m.x242 - 0.013*m.x243 - 0.013*m.x244 - 0.013*m.x245
- 0.013*m.x246 - 0.013*m.x247 - 0.013*m.x248 - 0.013*m.x249 - 0.013*m.x250 - 0.013*m.x251
- 0.013*m.x252 - 0.013*m.x253 - 0.013*m.x254 - 0.013*m.x255 - 0.013*m.x256 - 0.013*m.x257
- 0.013*m.x258 - 0.013*m.x259 - 0.013*m.x260 - 0.013*m.x261 - 0.013*m.x262 - 0.013*m.x263
- 0.013*m.x264 - 0.032*m.x325 - 0.032*m.x326 - 0.032*m.x327 - 0.032*m.x328 - 0.032*m.x329
- 0.032*m.x330 - 0.032*m.x331 - 0.032*m.x332 - 0.032*m.x333 - 0.032*m.x334 - 0.032*m.x335
- 0.032*m.x336 - 0.032*m.x337 - 0.032*m.x338 - 0.032*m.x339 - 0.032*m.x340 - 0.032*m.x341
- 0.032*m.x342 - 0.032*m.x343 - 0.032*m.x344 - 0.032*m.x345 - 0.032*m.x346 - 0.032*m.x347
- 0.032*m.x348 - 0.032*m.x349 - 0.032*m.x350 - 0.032*m.x351 - 0.032*m.x352 - 0.032*m.x353
- 0.032*m.x354 - 0.032*m.x355 - 0.032*m.x356 - 0.032*m.x357 - 0.032*m.x358 - 0.032*m.x359
- 0.032*m.x360 - 0.032*m.x361 - 0.032*m.x362 - 0.032*m.x363 - 0.032*m.x364 - 0.032*m.x365
- 0.032*m.x366 - 0.032*m.x367 - 0.032*m.x368 - 0.032*m.x369 - 0.032*m.x370 - 0.032*m.x371
- 0.032*m.x372 - 0.032*m.x373 - 0.032*m.x374 - 0.032*m.x375 - 0.032*m.x376 - 0.032*m.x377
- 0.032*m.x378 - 0.032*m.x379 - 0.032*m.x380 - 0.032*m.x381 - 0.032*m.x382 - 0.032*m.x383
- 0.032*m.x384 - 0.1*m.x505 - 0.1*m.x506 - 0.1*m.x507 - 0.1*m.x508 - 0.1*m.x509 - 0.1*m.x510
- 0.1*m.x511 - 0.1*m.x512 - 0.1*m.x513 - 0.1*m.x514 - 0.1*m.x515 - 0.1*m.x516 - 0.1*m.x517
- 0.1*m.x518 - 0.1*m.x519 - 0.1*m.x520 - 0.1*m.x521 - 0.1*m.x522 - 0.1*m.x523 - 0.1*m.x524
- 0.1*m.x525 - 0.1*m.x526 - 0.1*m.x527 - 0.1*m.x528 - 0.1*m.x529 - 0.1*m.x530 - 0.1*m.x531
- 0.1*m.x532 - 0.1*m.x533 - 0.1*m.x534 - 0.1*m.x535 - 0.1*m.x536 - 0.1*m.x537 - 0.1*m.x538
- 0.1*m.x539 - 0.1*m.x540 - 0.1*m.x541 - 0.1*m.x542 - 0.1*m.x543 - 0.1*m.x544 - 0.1*m.x545
- 0.1*m.x546 - 0.1*m.x547 - 0.1*m.x548 - 0.1*m.x549 - 0.1*m.x550 - 0.1*m.x551 - 0.1*m.x552
- 0.1*m.x553 - 0.1*m.x554 - 0.1*m.x555 - 0.1*m.x556 - 0.1*m.x557 - 0.1*m.x558 - 0.1*m.x559
- 0.1*m.x560 - 0.1*m.x561 - 0.1*m.x562 - 0.1*m.x563 - 0.1*m.x564 - 0.003*m.x565 - 0.003*m.x566
- 0.003*m.x567 - 0.003*m.x568 - 0.003*m.x569 - 0.003*m.x570 - 0.003*m.x571 - 0.003*m.x572
- 0.003*m.x573 - 0.003*m.x574 - 0.003*m.x575 - 0.003*m.x576 - 0.003*m.x577 - 0.003*m.x578
- 0.003*m.x579 - 0.003*m.x580 - 0.003*m.x581 - 0.003*m.x582 - 0.003*m.x583 - 0.003*m.x584
- 0.003*m.x585 - 0.003*m.x586 - 0.003*m.x587 - 0.003*m.x588 - 0.003*m.x589 - 0.003*m.x590
- 0.003*m.x591 - 0.003*m.x592 - 0.003*m.x593 - 0.003*m.x594 - 0.003*m.x595 - 0.003*m.x596
- 0.003*m.x597 - 0.003*m.x598 - 0.003*m.x599 - 0.003*m.x600 - 0.003*m.x601 - 0.003*m.x602
- 0.003*m.x603 - 0.003*m.x604 - 0.003*m.x605 - 0.003*m.x606 - 0.003*m.x607 - 0.003*m.x608
- 0.003*m.x609 - 0.003*m.x610 - 0.003*m.x611 - 0.003*m.x612 - 0.003*m.x613 - 0.003*m.x614
- 0.003*m.x615 - 0.003*m.x616 - 0.003*m.x617 - 0.003*m.x618 - 0.003*m.x619 - 0.003*m.x620
- 0.003*m.x621 - 0.003*m.x622 - 0.003*m.x623 - 0.003*m.x624, sense=maximize)
m.c1 = Constraint(expr= m.x25 - 0.83*m.x85 == 0)
m.c2 = Constraint(expr= m.x26 - 0.83*m.x86 == 0)
m.c3 = Constraint(expr= m.x27 - 0.83*m.x87 == 0)
m.c4 = Constraint(expr= m.x28 - 0.83*m.x88 == 0)
m.c5 = Constraint(expr= m.x29 - 0.83*m.x89 == 0)
m.c6 = Constraint(expr= m.x30 - 0.83*m.x90 == 0)
m.c7 = Constraint(expr= m.x31 - 0.83*m.x91 == 0)
m.c8 = Constraint(expr= m.x32 - 0.83*m.x92 == 0)
m.c9 = Constraint(expr= m.x33 - 0.83*m.x93 == 0)
m.c10 = Constraint(expr= m.x34 - 0.83*m.x94 == 0)
m.c11 = Constraint(expr= m.x35 - 0.83*m.x95 == 0)
m.c12 = Constraint(expr= m.x36 - 0.83*m.x96 == 0)
m.c13 = Constraint(expr= m.x37 - 0.83*m.x97 == 0)
m.c14 = Constraint(expr= m.x38 - 0.83*m.x98 == 0)
m.c15 = Constraint(expr= m.x39 - 0.83*m.x99 == 0)
m.c16 = Constraint(expr= m.x40 - 0.83*m.x100 == 0)
m.c17 = Constraint(expr= m.x41 - 0.83*m.x101 == 0)
m.c18 = Constraint(expr= m.x42 - 0.83*m.x102 == 0)
m.c19 = Constraint(expr= m.x43 - 0.83*m.x103 == 0)
m.c20 = Constraint(expr= m.x44 - 0.83*m.x104 == 0)
m.c21 = Constraint(expr= m.x45 - 0.83*m.x105 == 0)
m.c22 = Constraint(expr= m.x46 - 0.83*m.x106 == 0)
m.c23 = Constraint(expr= m.x47 - 0.83*m.x107 == 0)
m.c24 = Constraint(expr= m.x48 - 0.83*m.x108 == 0)
m.c25 = Constraint(expr= m.x49 - 0.83*m.x109 == 0)
m.c26 = Constraint(expr= m.x50 - 0.83*m.x110 == 0)
m.c27 = Constraint(expr= m.x51 - 0.83*m.x111 == 0)
m.c28 = Constraint(expr= m.x52 - 0.83*m.x112 == 0)
m.c29 = Constraint(expr= m.x53 - 0.83*m.x113 == 0)
m.c30 = Constraint(expr= m.x54 - 0.83*m.x114 == 0)
m.c31 = Constraint(expr= m.x55 - 0.83*m.x115 == 0)
m.c32 = Constraint(expr= m.x56 - 0.83*m.x116 == 0)
m.c33 = Constraint(expr= m.x57 - 0.83*m.x117 == 0)
m.c34 = Constraint(expr= m.x58 - 0.83*m.x118 == 0)
m.c35 = Constraint(expr= m.x59 - 0.83*m.x119 == 0)
m.c36 = Constraint(expr= m.x60 - 0.83*m.x120 == 0)
m.c37 = Constraint(expr= m.x61 - 0.83*m.x121 == 0)
m.c38 = Constraint(expr= m.x62 - 0.83*m.x122 == 0)
m.c39 = Constraint(expr= m.x63 - 0.83*m.x123 == 0)
m.c40 = Constraint(expr= m.x64 - 0.83*m.x124 == 0)
m.c41 = Constraint(expr= m.x65 - 0.83*m.x125 == 0)
m.c42 = Constraint(expr= m.x66 - 0.83*m.x126 == 0)
m.c43 = Constraint(expr= m.x67 - 0.83*m.x127 == 0)
m.c44 = Constraint(expr= m.x68 - 0.83*m.x128 == 0)
m.c45 = Constraint(expr= m.x69 - 0.83*m.x129 == 0)
m.c46 = Constraint(expr= m.x70 - 0.83*m.x130 == 0)
m.c47 = Constraint(expr= m.x71 - 0.83*m.x131 == 0)
m.c48 = Constraint(expr= m.x72 - 0.83*m.x132 == 0)
m.c49 = Constraint(expr= m.x73 - 0.83*m.x133 == 0)
m.c50 = Constraint(expr= m.x74 - 0.83*m.x134 == 0)
m.c51 = Constraint(expr= m.x75 - 0.83*m.x135 == 0)
m.c52 = Constraint(expr= m.x76 - 0.83*m.x136 == 0)
m.c53 = Constraint(expr= m.x77 - 0.83*m.x137 == 0)
m.c54 = Constraint(expr= m.x78 - 0.83*m.x138 == 0)
m.c55 = Constraint(expr= m.x79 - 0.83*m.x139 == 0)
m.c56 = Constraint(expr= m.x80 - 0.83*m.x140 == 0)
m.c57 = Constraint(expr= m.x81 - 0.83*m.x141 == 0)
m.c58 = Constraint(expr= m.x82 - 0.83*m.x142 == 0)
m.c59 = Constraint(expr= m.x83 - 0.83*m.x143 == 0)
m.c60 = Constraint(expr= m.x84 - 0.83*m.x144 == 0)
m.c61 = Constraint(expr= m.x145 - 0.95*m.x205 == 0)
m.c62 = Constraint(expr= m.x146 - 0.95*m.x206 == 0)
m.c63 = Constraint(expr= m.x147 - 0.95*m.x207 == 0)
m.c64 = Constraint(expr= m.x148 - 0.95*m.x208 == 0)
m.c65 = Constraint(expr= m.x149 - 0.95*m.x209 == 0)
m.c66 = Constraint(expr= m.x150 - 0.95*m.x210 == 0)
m.c67 = Constraint(expr= m.x151 - 0.95*m.x211 == 0)
m.c68 = Constraint(expr= m.x152 - 0.95*m.x212 == 0)
m.c69 = Constraint(expr= m.x153 - 0.95*m.x213 == 0)
m.c70 = Constraint(expr= m.x154 - 0.95*m.x214 == 0)
m.c71 = Constraint(expr= m.x155 - 0.95*m.x215 == 0)
m.c72 = Constraint(expr= m.x156 - 0.95*m.x216 == 0)
m.c73 = Constraint(expr= m.x157 - 0.95*m.x217 == 0)
m.c74 = Constraint(expr= m.x158 - 0.95*m.x218 == 0)
m.c75 = Constraint(expr= m.x159 - 0.95*m.x219 == 0)
m.c76 = Constraint(expr= m.x160 - 0.95*m.x220 == 0)
m.c77 = Constraint(expr= m.x161 - 0.95*m.x221 == 0)
m.c78 = Constraint(expr= m.x162 - 0.95*m.x222 == 0)
m.c79 = Constraint(expr= m.x163 - 0.95*m.x223 == 0)
m.c80 = Constraint(expr= m.x164 - 0.95*m.x224 == 0)
m.c81 = Constraint(expr= m.x165 - 0.95*m.x225 == 0)
m.c82 = Constraint(expr= m.x166 - 0.95*m.x226 == 0)
m.c83 = Constraint(expr= m.x167 - 0.95*m.x227 == 0)
m.c84 = Constraint(expr= m.x168 - 0.95*m.x228 == 0)
m.c85 = Constraint(expr= m.x169 - 0.95*m.x229 == 0)
m.c86 = Constraint(expr= m.x170 - 0.95*m.x230 == 0)
m.c87 = Constraint(expr= m.x171 - 0.95*m.x231 == 0)
m.c88 = Constraint(expr= m.x172 - 0.95*m.x232 == 0)
m.c89 = Constraint(expr= m.x173 - 0.95*m.x233 == 0)
m.c90 = Constraint(expr= m.x174 - 0.95*m.x234 == 0)
m.c91 = Constraint(expr= m.x175 - 0.95*m.x235 == 0)
m.c92 = Constraint(expr= m.x176 - 0.95*m.x236 == 0)
m.c93 = Constraint(expr= m.x177 - 0.95*m.x237 == 0)
m.c94 = Constraint(expr= m.x178 - 0.95*m.x238 == 0)
m.c95 = Constraint(expr= m.x179 - 0.95*m.x239 == 0)
m.c96 = Constraint(expr= m.x180 - 0.95*m.x240 == 0)
m.c97 = Constraint(expr= m.x181 - 0.95*m.x241 == 0)
m.c98 = Constraint(expr= m.x182 - 0.95*m.x242 == 0)
m.c99 = Constraint(expr= m.x183 - 0.95*m.x243 == 0)
m.c100 = Constraint(expr= m.x184 - 0.95*m.x244 == 0)
m.c101 = Constraint(expr= m.x185 - 0.95*m.x245 == 0)
m.c102 = Constraint(expr= m.x186 - 0.95*m.x246 == 0)
m.c103 = Constraint(expr= m.x187 - 0.95*m.x247 == 0)
m.c104 = Constraint(expr= m.x188 - 0.95*m.x248 == 0)
m.c105 = Constraint(expr= m.x189 - 0.95*m.x249 == 0)
m.c106 = Constraint(expr= m.x190 - 0.95*m.x250 == 0)
m.c107 = Constraint(expr= m.x191 - 0.95*m.x251 == 0)
m.c108 = Constraint(expr= m.x192 - 0.95*m.x252 == 0)
m.c109 = Constraint(expr= m.x193 - 0.95*m.x253 == 0)
m.c110 = Constraint(expr= m.x194 - 0.95*m.x254 == 0)
m.c111 = Constraint(expr= m.x195 - 0.95*m.x255 == 0)
m.c112 = Constraint(expr= m.x196 - 0.95*m.x256 == 0)
m.c113 = Constraint(expr= m.x197 - 0.95*m.x257 == 0)
m.c114 = Constraint(expr= m.x198 - 0.95*m.x258 == 0)
m.c115 = Constraint(expr= m.x199 - 0.95*m.x259 == 0)
m.c116 = Constraint(expr= m.x200 - 0.95*m.x260 == 0)
m.c117 = Constraint(expr= m.x201 - 0.95*m.x261 == 0)
m.c118 = Constraint(expr= m.x202 - 0.95*m.x262 == 0)
m.c119 = Constraint(expr= m.x203 - 0.95*m.x263 == 0)
m.c120 = Constraint(expr= m.x204 - 0.95*m.x264 == 0)
m.c121 = Constraint(expr= m.x265 - 1.11*m.x325 == 0)
m.c122 = Constraint(expr= m.x266 - 1.11*m.x326 == 0)
m.c123 = Constraint(expr= m.x267 - 1.11*m.x327 == 0)
m.c124 = Constraint(expr= m.x268 - 1.11*m.x328 == 0)
m.c125 = Constraint(expr= m.x269 - 1.11*m.x329 == 0)
m.c126 = Constraint(expr= m.x270 - 1.11*m.x330 == 0)
m.c127 = Constraint(expr= m.x271 - 1.11*m.x331 == 0)
m.c128 = Constraint(expr= m.x272 - 1.11*m.x332 == 0)
m.c129 = Constraint(expr= m.x273 - 1.11*m.x333 == 0)
m.c130 = Constraint(expr= m.x274 - 1.11*m.x334 == 0)
m.c131 = Constraint(expr= m.x275 - 1.11*m.x335 == 0)
m.c132 = Constraint(expr= m.x276 - 1.11*m.x336 == 0)
m.c133 = Constraint(expr= m.x277 - 1.11*m.x337 == 0)
m.c134 = Constraint(expr= m.x278 - 1.11*m.x338 == 0)
m.c135 = Constraint(expr= m.x279 - 1.11*m.x339 == 0)
m.c136 = Constraint(expr= m.x280 - 1.11*m.x340 == 0)
m.c137 = Constraint(expr= m.x281 - 1.11*m.x341 == 0)
m.c138 = Constraint(expr= m.x282 - 1.11*m.x342 == 0)
m.c139 = Constraint(expr= m.x283 - 1.11*m.x343 == 0)
m.c140 = Constraint(expr= m.x284 - 1.11*m.x344 == 0)
m.c141 = Constraint(expr= m.x285 - 1.11*m.x345 == 0)
m.c142 = Constraint(expr= m.x286 - 1.11*m.x346 == 0)
m.c143 = Constraint(expr= m.x287 - 1.11*m.x347 == 0)
m.c144 = Constraint(expr= m.x288 - 1.11*m.x348 == 0)
m.c145 = Constraint(expr= m.x289 - 1.11*m.x349 == 0)
m.c146 = Constraint(expr= m.x290 - 1.11*m.x350 == 0)
m.c147 = Constraint(expr= m.x291 - 1.11*m.x351 == 0)
m.c148 = Constraint(expr= m.x292 - 1.11*m.x352 == 0)
m.c149 = Constraint(expr= m.x293 - 1.11*m.x353 == 0)
m.c150 = Constraint(expr= m.x294 - 1.11*m.x354 == 0)
m.c151 = Constraint(expr= m.x295 - 1.11*m.x355 == 0)
m.c152 = Constraint(expr= m.x296 - 1.11*m.x356 == 0)
m.c153 = Constraint(expr= m.x297 - 1.11*m.x357 == 0)
m.c154 = Constraint(expr= m.x298 - 1.11*m.x358 == 0)
m.c155 = Constraint(expr= m.x299 - 1.11*m.x359 == 0)
m.c156 = Constraint(expr= m.x300 - 1.11*m.x360 == 0)
m.c157 = Constraint(expr= m.x301 - 1.11*m.x361 == 0)
m.c158 = Constraint(expr= m.x302 - 1.11*m.x362 == 0)
m.c159 = Constraint(expr= m.x303 - 1.11*m.x363 == 0)
m.c160 = Constraint(expr= m.x304 - 1.11*m.x364 == 0)
m.c161 = Constraint(expr= m.x305 - 1.11*m.x365 == 0)
m.c162 = Constraint(expr= m.x306 - 1.11*m.x366 == 0)
m.c163 = Constraint(expr= m.x307 - 1.11*m.x367 == 0)
m.c164 = Constraint(expr= m.x308 - 1.11*m.x368 == 0)
m.c165 = Constraint(expr= m.x309 - 1.11*m.x369 == 0)
m.c166 = Constraint(expr= m.x310 - 1.11*m.x370 == 0)
m.c167 = Constraint(expr= m.x311 - 1.11*m.x371 == 0)
m.c168 = Constraint(expr= m.x312 - 1.11*m.x372 == 0)
m.c169 = Constraint(expr= m.x313 - 1.11*m.x373 == 0)
m.c170 = Constraint(expr= m.x314 - 1.11*m.x374 == 0)
m.c171 = Constraint(expr= m.x315 - 1.11*m.x375 == 0)
m.c172 = Constraint(expr= m.x316 - 1.11*m.x376 == 0)
m.c173 = Constraint(expr= m.x317 - 1.11*m.x377 == 0)
m.c174 = Constraint(expr= m.x318 - 1.11*m.x378 == 0)
m.c175 = Constraint(expr= m.x319 - 1.11*m.x379 == 0)
m.c176 = Constraint(expr= m.x320 - 1.11*m.x380 == 0)
m.c177 = Constraint(expr= m.x321 - 1.11*m.x381 == 0)
m.c178 = Constraint(expr= m.x322 - 1.11*m.x382 == 0)
m.c179 = Constraint(expr= m.x323 - 1.11*m.x383 == 0)
m.c180 = Constraint(expr= m.x324 - 1.11*m.x384 == 0)
m.c181 = Constraint(expr= - m.x25 + m.x385 == 0)
m.c182 = Constraint(expr= - m.x26 + m.x386 == 0)
m.c183 = Constraint(expr= - m.x27 + m.x387 == 0)
m.c184 = Constraint(expr= - m.x28 + m.x388 == 0)
m.c185 = Constraint(expr= - m.x29 + m.x389 == 0)
m.c186 = Constraint(expr= - m.x30 + m.x390 == 0)
m.c187 = Constraint(expr= - m.x31 + m.x391 == 0)
m.c188 = Constraint(expr= - m.x32 + m.x392 == 0)
m.c189 = Constraint(expr= - m.x33 + m.x393 == 0)
m.c190 = Constraint(expr= - m.x34 + m.x394 == 0)
m.c191 = Constraint(expr= - m.x35 + m.x395 == 0)
m.c192 = Constraint(expr= - m.x36 + m.x396 == 0)
m.c193 = Constraint(expr= - m.x37 + m.x397 == 0)
m.c194 = Constraint(expr= - m.x38 + m.x398 == 0)
m.c195 = Constraint(expr= - m.x39 + m.x399 == 0)
m.c196 = Constraint(expr= - m.x40 + m.x400 == 0)
m.c197 = Constraint(expr= - m.x41 + m.x401 == 0)
m.c198 = Constraint(expr= - m.x42 + m.x402 == 0)
m.c199 = Constraint(expr= - m.x43 + m.x403 == 0)
m.c200 = Constraint(expr= - m.x44 + m.x404 == 0)
m.c201 = Constraint(expr= - m.x45 + m.x405 == 0)
m.c202 = Constraint(expr= - m.x46 + m.x406 == 0)
m.c203 = Constraint(expr= - m.x47 + m.x407 == 0)
m.c204 = Constraint(expr= - m.x48 + m.x408 == 0)
m.c205 = Constraint(expr= - m.x49 + m.x409 == 0)
m.c206 = Constraint(expr= - m.x50 + m.x410 == 0)
m.c207 = Constraint(expr= - m.x51 + m.x411 == 0)
m.c208 = Constraint(expr= - m.x52 + m.x412 == 0)
m.c209 = Constraint(expr= - m.x53 + m.x413 == 0)
m.c210 = Constraint(expr= - m.x54 + m.x414 == 0)
m.c211 = Constraint(expr= - m.x55 + m.x415 == 0)
m.c212 = Constraint(expr= - m.x56 + m.x416 == 0)
m.c213 = Constraint(expr= - m.x57 + m.x417 == 0)
m.c214 = Constraint(expr= - m.x58 + m.x418 == 0)
m.c215 = Constraint(expr= - m.x59 + m.x419 == 0)
m.c216 = Constraint(expr= - m.x60 + m.x420 == 0)
m.c217 = Constraint(expr= - m.x61 + m.x421 == 0)
m.c218 = Constraint(expr= - m.x62 + m.x422 == 0)
m.c219 = Constraint(expr= - m.x63 + m.x423 == 0)
m.c220 = Constraint(expr= - m.x64 + m.x424 == 0)
m.c221 = Constraint(expr= - m.x65 + m.x425 == 0)
m.c222 = Constraint(expr= - m.x66 + m.x426 == 0)
m.c223 = Constraint(expr= - m.x67 + m.x427 == 0)
m.c224 = Constraint(expr= - m.x68 + m.x428 == 0)
m.c225 = Constraint(expr= - m.x69 + m.x429 == 0)
m.c226 = Constraint(expr= - m.x70 + m.x430 == 0)
m.c227 = Constraint(expr= - m.x71 + m.x431 == 0)
m.c228 = Constraint(expr= - m.x72 + m.x432 == 0)
m.c229 = Constraint(expr= - m.x73 + m.x433 == 0)
m.c230 = Constraint(expr= - m.x74 + m.x434 == 0)
m.c231 = Constraint(expr= - m.x75 + m.x435 == 0)
m.c232 = Constraint(expr= - m.x76 + m.x436 == 0)
m.c233 = Constraint(expr= - m.x77 + m.x437 == 0)
m.c234 = Constraint(expr= - m.x78 + m.x438 == 0)
m.c235 = Constraint(expr= - m.x79 + m.x439 == 0)
m.c236 = Constraint(expr= - m.x80 + m.x440 == 0)
m.c237 = Constraint(expr= - m.x81 + m.x441 == 0)
m.c238 = Constraint(expr= - m.x82 + m.x442 == 0)
m.c239 = Constraint(expr= - m.x83 + m.x443 == 0)
m.c240 = Constraint(expr= - m.x84 + m.x444 == 0)
m.c241 = Constraint(expr= m.x85 - m.x145 - m.x265 == 0)
m.c242 = Constraint(expr= m.x86 - m.x146 - m.x266 == 0)
m.c243 = Constraint(expr= m.x87 - m.x147 - m.x267 == 0)
m.c244 = Constraint(expr= m.x88 - m.x148 - m.x268 == 0)
m.c245 = Constraint(expr= m.x89 - m.x149 - m.x269 == 0)
m.c246 = Constraint(expr= m.x90 - m.x150 - m.x270 == 0)
m.c247 = Constraint(expr= m.x91 - m.x151 - m.x271 == 0)
m.c248 = Constraint(expr= m.x92 - m.x152 - m.x272 == 0)
m.c249 = Constraint(expr= m.x93 - m.x153 - m.x273 == 0)
m.c250 = Constraint(expr= m.x94 - m.x154 - m.x274 == 0)
m.c251 = Constraint(expr= m.x95 - m.x155 - m.x275 == 0)
m.c252 = Constraint(expr= m.x96 - m.x156 - m.x276 == 0)
m.c253 = Constraint(expr= m.x97 - m.x157 - m.x277 == 0)
m.c254 = Constraint(expr= m.x98 - m.x158 - m.x278 == 0)
m.c255 = Constraint(expr= m.x99 - m.x159 - m.x279 == 0)
m.c256 = Constraint(expr= m.x100 - m.x160 - m.x280 == 0)
m.c257 = Constraint(expr= m.x101 - m.x161 - m.x281 == 0)
m.c258 = Constraint(expr= m.x102 - m.x162 - m.x282 == 0)
m.c259 = Constraint(expr= m.x103 - m.x163 - m.x283 == 0)
m.c260 = Constraint(expr= m.x104 - m.x164 - m.x284 == 0)
m.c261 = Constraint(expr= m.x105 - m.x165 - m.x285 == 0)
m.c262 = Constraint(expr= m.x106 - m.x166 - m.x286 == 0)
m.c263 = Constraint(expr= m.x107 - m.x167 - m.x287 == 0)
m.c264 = Constraint(expr= m.x108 - m.x168 - m.x288 == 0)
m.c265 = Constraint(expr= m.x109 - m.x169 - m.x289 == 0)
m.c266 = Constraint(expr= m.x110 - m.x170 - m.x290 == 0)
m.c267 = Constraint(expr= m.x111 - m.x171 - m.x291 == 0)
m.c268 = Constraint(expr= m.x112 - m.x172 - m.x292 == 0)
m.c269 = Constraint(expr= m.x113 - m.x173 - m.x293 == 0)
m.c270 = Constraint(expr= m.x114 - m.x174 - m.x294 == 0)
m.c271 = Constraint(expr= m.x115 - m.x175 - m.x295 == 0)
m.c272 = Constraint(expr= m.x116 - m.x176 - m.x296 == 0)
m.c273 = Constraint(expr= m.x117 - m.x177 - m.x297 == 0)
m.c274 = Constraint(expr= m.x118 - m.x178 - m.x298 == 0)
m.c275 = Constraint(expr= m.x119 - m.x179 - m.x299 == 0)
m.c276 = Constraint(expr= m.x120 - m.x180 - m.x300 == 0)
m.c277 = Constraint(expr= m.x121 - m.x181 - m.x301 == 0)
m.c278 = Constraint(expr= m.x122 - m.x182 - m.x302 == 0)
m.c279 = Constraint(expr= m.x123 - m.x183 - m.x303 == 0)
m.c280 = Constraint(expr= m.x124 - m.x184 - m.x304 == 0)
m.c281 = Constraint(expr= m.x125 - m.x185 - m.x305 == 0)
m.c282 = Constraint(expr= m.x126 - m.x186 - m.x306 == 0)
m.c283 = Constraint(expr= m.x127 - m.x187 - m.x307 == 0)
m.c284 = Constraint(expr= m.x128 - m.x188 - m.x308 == 0)
m.c285 = Constraint(expr= m.x129 - m.x189 - m.x309 == 0)
m.c286 = Constraint(expr= m.x130 - m.x190 - m.x310 == 0)
m.c287 = Constraint(expr= m.x131 - m.x191 - m.x311 == 0)
m.c288 = Constraint(expr= m.x132 - m.x192 - m.x312 == 0)
m.c289 = Constraint(expr= m.x133 - m.x193 - m.x313 == 0)
m.c290 = Constraint(expr= m.x134 - m.x194 - m.x314 == 0)
m.c291 = Constraint(expr= m.x135 - m.x195 - m.x315 == 0)
m.c292 = Constraint(expr= m.x136 - m.x196 - m.x316 == 0)
m.c293 = Constraint(expr= m.x137 - m.x197 - m.x317 == 0)
m.c294 = Constraint(expr= m.x138 - m.x198 - m.x318 == 0)
m.c295 = Constraint(expr= m.x139 - m.x199 - m.x319 == 0)
m.c296 = Constraint(expr= m.x140 - m.x200 - m.x320 == 0)
m.c297 = Constraint(expr= m.x141 - m.x201 - m.x321 == 0)
m.c298 = Constraint(expr= m.x142 - m.x202 - m.x322 == 0)
m.c299 = Constraint(expr= m.x143 - m.x203 - m.x323 == 0)
m.c300 = Constraint(expr= m.x144 - m.x204 - m.x324 == 0)
m.c301 = Constraint(expr= - m.x1 + m.x205 == 0)
m.c302 = Constraint(expr= - m.x1 + m.x206 == 0)
m.c303 = Constraint(expr= - m.x1 + m.x207 == 0)
m.c304 = Constraint(expr= - m.x1 + m.x208 == 0)
m.c305 = Constraint(expr= - m.x1 + m.x209 == 0)
m.c306 = Constraint(expr= - m.x1 + m.x210 == 0)
m.c307 = Constraint(expr= - m.x1 + m.x211 == 0)
m.c308 = Constraint(expr= - m.x1 + m.x212 == 0)
m.c309 = Constraint(expr= - m.x1 + m.x213 == 0)
m.c310 = Constraint(expr= - m.x1 + m.x214 == 0)
m.c311 = Constraint(expr= - m.x2 + m.x215 == 0)
m.c312 = Constraint(expr= - m.x2 + m.x216 == 0)
m.c313 = Constraint(expr= - m.x2 + m.x217 == 0)
m.c314 = Constraint(expr= - m.x2 + m.x218 == 0)
m.c315 = Constraint(expr= - m.x2 + m.x219 == 0)
m.c316 = Constraint(expr= - m.x2 + m.x220 == 0)
m.c317 = Constraint(expr= - m.x2 + m.x221 == 0)
m.c318 = Constraint(expr= - m.x2 + m.x222 == 0)
m.c319 = Constraint(expr= - m.x2 + m.x223 == 0)
m.c320 = Constraint(expr= - m.x2 + m.x224 == 0)
m.c321 = Constraint(expr= - m.x3 + m.x225 == 0)
m.c322 = Constraint(expr= - m.x3 + m.x226 == 0)
m.c323 = Constraint(expr= - m.x3 + m.x227 == 0)
m.c324 = Constraint(expr= - m.x3 + m.x228 == 0)
m.c325 = Constraint(expr= - m.x3 + m.x229 == 0)
m.c326 = Constraint(expr= - m.x3 + m.x230 == 0)
m.c327 = Constraint(expr= - m.x3 + m.x231 == 0)
m.c328 = Constraint(expr= - m.x3 + m.x232 == 0)
m.c329 = Constraint(expr= - m.x3 + m.x233 == 0)
m.c330 = Constraint(expr= - m.x3 + m.x234 == 0)
m.c331 = Constraint(expr= - m.x4 + m.x235 == 0)
m.c332 = Constraint(expr= - m.x4 + m.x236 == 0)
m.c333 = Constraint(expr= - m.x4 + m.x237 == 0)
m.c334 = Constraint(expr= - m.x4 + m.x238 == 0)
m.c335 = Constraint(expr= - m.x4 + m.x239 == 0)
m.c336 = Constraint(expr= - m.x4 + m.x240 == 0)
m.c337 = Constraint(expr= - m.x4 + m.x241 == 0)
m.c338 = Constraint(expr= - m.x4 + m.x242 == 0)
m.c339 = Constraint(expr= - m.x4 + m.x243 == 0)
m.c340 = Constraint(expr= - m.x4 + m.x244 == 0)
m.c341 = Constraint(expr= - m.x5 + m.x245 == 0)
m.c342 = Constraint(expr= - m.x5 + m.x246 == 0)
m.c343 = Constraint(expr= - m.x5 + m.x247 == 0)
m.c344 = Constraint(expr= - m.x5 + m.x248 == 0)
m.c345 = Constraint(expr= - m.x5 + m.x249 == 0)
m.c346 = Constraint(expr= - m.x5 + m.x250 == 0)
m.c347 = Constraint(expr= - m.x5 + m.x251 == 0)
m.c348 = Constraint(expr= - m.x5 + m.x252 == 0)
m.c349 = Constraint(expr= - m.x5 + m.x253 == 0)
m.c350 = Constraint(expr= - m.x5 + m.x254 == 0)
m.c351 = Constraint(expr= - m.x6 + m.x255 == 0)
m.c352 = Constraint(expr= - m.x6 + m.x256 == 0)
m.c353 = Constraint(expr= - m.x6 + m.x257 == 0)
m.c354 = Constraint(expr= - m.x6 + m.x258 == 0)
m.c355 = Constraint(expr= - m.x6 + m.x259 == 0)
m.c356 = Constraint(expr= - m.x6 + m.x260 == 0)
m.c357 = Constraint(expr= - m.x6 + m.x261 == 0)
m.c358 = Constraint(expr= - m.x6 + m.x262 == 0)
m.c359 = Constraint(expr= - m.x6 + m.x263 == 0)
m.c360 = Constraint(expr= - m.x6 + m.x264 == 0)
m.c361 = Constraint(expr= - m.x7 + m.x325 - m.x565 == 0)
m.c362 = Constraint(expr= - m.x7 + m.x326 - m.x566 == 0)
m.c363 = Constraint(expr= - m.x7 + m.x327 - m.x567 == 0)
m.c364 = Constraint(expr= - m.x7 + m.x328 - m.x568 == 0)
m.c365 = Constraint(expr= - m.x7 + m.x329 - m.x569 == 0)
m.c366 = Constraint(expr= - m.x7 + m.x330 - m.x570 == 0)
m.c367 = Constraint(expr= - m.x7 + m.x331 - m.x571 == 0)
m.c368 = Constraint(expr= - m.x7 + m.x332 - m.x572 == 0)
m.c369 = Constraint(expr= - m.x7 + m.x333 - m.x573 == 0)
m.c370 = Constraint(expr= - m.x7 + m.x334 - m.x574 == 0)
m.c371 = Constraint(expr= - m.x8 + m.x335 + m.x565 - m.x575 == 0)
m.c372 = Constraint(expr= - m.x8 + m.x336 + m.x566 - m.x576 == 0)
m.c373 = Constraint(expr= - m.x8 + m.x337 + m.x567 - m.x577 == 0)
m.c374 = Constraint(expr= - m.x8 + m.x338 + m.x568 - m.x578 == 0)
m.c375 = Constraint(expr= - m.x8 + m.x339 + m.x569 - m.x579 == 0)
m.c376 = Constraint(expr= - m.x8 + m.x340 + m.x570 - m.x580 == 0)
m.c377 = Constraint(expr= - m.x8 + m.x341 + m.x571 - m.x581 == 0)
m.c378 = Constraint(expr= - m.x8 + m.x342 + m.x572 - m.x582 == 0)
m.c379 = Constraint(expr= - m.x8 + m.x343 + m.x573 - m.x583 == 0)
m.c380 = Constraint(expr= - m.x8 + m.x344 + m.x574 - m.x584 == 0)
m.c381 = Constraint(expr= - m.x9 + m.x345 + m.x575 - m.x585 == 0)
m.c382 = Constraint(expr= - m.x9 + m.x346 + m.x576 - m.x586 == 0)
m.c383 = Constraint(expr= - m.x9 + m.x347 + m.x577 - m.x587 == 0)
m.c384 = Constraint(expr= - m.x9 + m.x348 + m.x578 - m.x588 == 0)
m.c385 = Constraint(expr= - m.x9 + m.x349 + m.x579 - m.x589 == 0)
m.c386 = Constraint(expr= - m.x9 + m.x350 + m.x580 - m.x590 == 0)
m.c387 = Constraint(expr= - m.x9 + m.x351 + m.x581 - m.x591 == 0)
m.c388 = Constraint(expr= - m.x9 + m.x352 + m.x582 - m.x592 == 0)
m.c389 = Constraint(expr= - m.x9 + m.x353 + m.x583 - m.x593 == 0)
m.c390 = Constraint(expr= - m.x9 + m.x354 + m.x584 - m.x594 == 0)
m.c391 = Constraint(expr= - m.x10 + m.x355 + m.x585 - m.x595 == 0)
m.c392 = Constraint(expr= - m.x10 + m.x356 + m.x586 - m.x596 == 0)
m.c393 = Constraint(expr= - m.x10 + m.x357 + m.x587 - m.x597 == 0)
m.c394 = Constraint(expr= - m.x10 + m.x358 + m.x588 - m.x598 == 0)
m.c395 = Constraint(expr= - m.x10 + m.x359 + m.x589 - m.x599 == 0)
m.c396 = Constraint(expr= - m.x10 + m.x360 + m.x590 - m.x600 == 0)
m.c397 = Constraint(expr= - m.x10 + m.x361 + m.x591 - m.x601 == 0)
m.c398 = Constraint(expr= - m.x10 + m.x362 + m.x592 - m.x602 == 0)
m.c399 = Constraint(expr= - m.x10 + m.x363 + m.x593 - m.x603 == 0)
m.c400 = Constraint(expr= - m.x10 + m.x364 + m.x594 - m.x604 == 0)
m.c401 = Constraint(expr= - m.x11 + m.x365 + m.x595 - m.x605 == 0)
m.c402 = Constraint(expr= - m.x11 + m.x366 + m.x596 - m.x606 == 0)
m.c403 = Constraint(expr= - m.x11 + m.x367 + m.x597 - m.x607 == 0)
m.c404 = Constraint(expr= - m.x11 + m.x368 + m.x598 - m.x608 == 0)
m.c405 = Constraint(expr= - m.x11 + m.x369 + m.x599 - m.x609 == 0)
m.c406 = Constraint(expr= - m.x11 + m.x370 + m.x600 - m.x610 == 0)
m.c407 = Constraint(expr= - m.x11 + m.x371 + m.x601 - m.x611 == 0)
m.c408 = Constraint(expr= - m.x11 + m.x372 + m.x602 - m.x612 == 0)
m.c409 = Constraint(expr= - m.x11 + m.x373 + m.x603 - m.x613 == 0)
m.c410 = Constraint(expr= - m.x11 + m.x374 + m.x604 - m.x614 == 0)
m.c411 = Constraint(expr= - m.x12 + m.x375 + m.x605 - m.x615 == 0)
m.c412 = Constraint(expr= - m.x12 + m.x376 + m.x606 - m.x616 == 0)
m.c413 = Constraint(expr= - m.x12 + m.x377 + m.x607 - m.x617 == 0)
m.c414 = Constraint(expr= - m.x12 + m.x378 + m.x608 - m.x618 == 0)
m.c415 = Constraint(expr= - m.x12 + m.x379 + m.x609 - m.x619 == 0)
m.c416 = Constraint(expr= - m.x12 + m.x380 + m.x610 - m.x620 == 0)
m.c417 = Constraint(expr= - m.x12 + m.x381 + m.x611 - m.x621 == 0)
m.c418 = Constraint(expr= - m.x12 + m.x382 + m.x612 - m.x622 == 0)
m.c419 = Constraint(expr= - m.x12 + m.x383 + m.x613 - m.x623 == 0)
m.c420 = Constraint(expr= - m.x12 + m.x384 + m.x614 - m.x624 == 0)
m.c421 = Constraint(expr= - m.x1 + m.x13 == 0)
m.c422 = Constraint(expr= - m.x2 + m.x14 == 0)
m.c423 = Constraint(expr= - m.x3 + m.x15 == 0)
m.c424 = Constraint(expr= - m.x4 + m.x16 == 0)
m.c425 = Constraint(expr= - m.x5 + m.x17 == 0)
m.c426 = Constraint(expr= - m.x6 + m.x18 == 0)
m.c427 = Constraint(expr= - m.x7 + m.x19 == 0)
m.c428 = Constraint(expr= - m.x8 + m.x20 == 0)
m.c429 = Constraint(expr= - m.x9 + m.x21 == 0)
m.c430 = Constraint(expr= - m.x10 + m.x22 == 0)
m.c431 = Constraint(expr= - m.x11 + m.x23 == 0)
m.c432 = Constraint(expr= - m.x12 + m.x24 == 0)
m.c433 = Constraint(expr= - 1.37455*m.x445 + m.x505 - m.x626 - m.x627 - m.x628 == 0)
m.c434 = Constraint(expr= - 2.472633*m.x446 + m.x506 - m.x626 - m.x627 - m.x628 == 0)
m.c435 = Constraint(expr= - 4.976822*m.x447 + m.x507 - m.x626 - m.x627 - m.x628 == 0)
m.c436 = Constraint(expr= - 2.565652*m.x448 + m.x508 - m.x626 - m.x627 - m.x628 == 0)
m.c437 = Constraint(expr= - 3.356331*m.x449 + m.x509 - m.x626 - m.x627 - m.x628 == 0)
m.c438 = Constraint(expr= - 1.44013616*m.x450 + m.x510 - m.x626 - m.x627 - m.x628 == 0)
m.c439 = Constraint(expr= - 1.959312*m.x451 + m.x511 - m.x626 - m.x627 - m.x628 == 0)
m.c440 = Constraint(expr= - 2.5554035*m.x452 + m.x512 - m.x626 - m.x627 - m.x628 == 0)
m.c441 = Constraint(expr= - 6.121276*m.x453 + m.x513 - m.x626 - m.x627 - m.x628 == 0)
m.c442 = Constraint(expr= - 2.268122*m.x454 + m.x514 - m.x626 - m.x627 - m.x628 == 0)
m.c443 = Constraint(expr= - 4.020626*m.x455 + m.x515 - m.x629 - m.x630 - m.x631 == 0)
m.c444 = Constraint(expr= - 2.964906*m.x456 + m.x516 - m.x629 - m.x630 - m.x631 == 0)
m.c445 = Constraint(expr= - 4.504642*m.x457 + m.x517 - m.x629 - m.x630 - m.x631 == 0)
m.c446 = Constraint(expr= - 3.200062*m.x458 + m.x518 - m.x629 - m.x630 - m.x631 == 0)
m.c447 = Constraint(expr= - 2.624108*m.x459 + m.x519 - m.x629 - m.x630 - m.x631 == 0)
m.c448 = Constraint(expr= - 0.04478201*m.x460 + m.x520 - m.x629 - m.x630 - m.x631 == 0)
m.c449 = Constraint(expr= - 3.275987*m.x461 + m.x521 - m.x629 - m.x630 - m.x631 == 0)
m.c450 = Constraint(expr= - 0.9265037*m.x462 + m.x522 - m.x629 - m.x630 - m.x631 == 0)
m.c451 = Constraint(expr= - 3.760758*m.x463 + m.x523 - m.x629 - m.x630 - m.x631 == 0)
m.c452 = Constraint(expr= - 3.826681*m.x464 + m.x524 - m.x629 - m.x630 - m.x631 == 0)
m.c453 = Constraint(expr= - 5.974445*m.x465 + m.x525 - m.x632 - m.x633 - m.x634 == 0)
m.c454 = Constraint(expr= - 2.597016*m.x466 + m.x526 - m.x632 - m.x633 - m.x634 == 0)
m.c455 = Constraint(expr= - 4.248418*m.x467 + m.x527 - m.x632 - m.x633 - m.x634 == 0)
m.c456 = Constraint(expr= - 4.934691*m.x468 + m.x528 - m.x632 - m.x633 - m.x634 == 0)
m.c457 = Constraint(expr= - 5.99296*m.x469 + m.x529 - m.x632 - m.x633 - m.x634 == 0)
m.c458 = Constraint(expr= - 0.68209498*m.x470 + m.x530 - m.x632 - m.x633 - m.x634 == 0)
m.c459 = Constraint(expr= - 2.410622*m.x471 + m.x531 - m.x632 - m.x633 - m.x634 == 0)
m.c460 = Constraint(expr= - 2.4881944*m.x472 + m.x532 - m.x632 - m.x633 - m.x634 == 0)
m.c461 = Constraint(expr= - 7.781311*m.x473 + m.x533 - m.x632 - m.x633 - m.x634 == 0)
m.c462 = Constraint(expr= - 7.257567*m.x474 + m.x534 - m.x632 - m.x633 - m.x634 == 0)
m.c463 = Constraint(expr= - 1.012926*m.x475 + m.x535 - m.x635 - m.x636 - m.x637 == 0)
m.c464 = Constraint(expr= - 2.996514*m.x476 + m.x536 - m.x635 - m.x636 - m.x637 == 0)
m.c465 = Constraint(expr= - 3.493567*m.x477 + m.x537 - m.x635 - m.x636 - m.x637 == 0)
m.c466 = Constraint(expr= - 3.433273*m.x478 + m.x538 - m.x635 - m.x636 - m.x637 == 0)
m.c467 = Constraint(expr= - 4.120419*m.x479 + m.x539 - m.x635 - m.x636 - m.x637 == 0)
m.c468 = Constraint(expr= - 1.90055992*m.x480 + m.x540 - m.x635 - m.x636 - m.x637 == 0)
m.c469 = Constraint(expr= - 2.112299*m.x481 + m.x541 - m.x635 - m.x636 - m.x637 == 0)
m.c470 = Constraint(expr= - 1.4857817*m.x482 + m.x542 - m.x635 - m.x636 - m.x637 == 0)
m.c471 = Constraint(expr= - 4.199485*m.x483 + m.x543 - m.x635 - m.x636 - m.x637 == 0)
m.c472 = Constraint(expr= - 3.512231*m.x484 + m.x544 - m.x635 - m.x636 - m.x637 == 0)
m.c473 = Constraint(expr= - 5.547826*m.x485 + m.x545 - m.x638 - m.x639 - m.x640 == 0)
m.c474 = Constraint(expr= - 3.024617*m.x486 + m.x546 - m.x638 - m.x639 - m.x640 == 0)
m.c475 = Constraint(expr= - 4.285229*m.x487 + m.x547 - m.x638 - m.x639 - m.x640 == 0)
m.c476 = Constraint(expr= - 2.960692*m.x488 + m.x548 - m.x638 - m.x639 - m.x640 == 0)
m.c477 = Constraint(expr= - 4.627118*m.x489 + m.x549 - m.x638 - m.x639 - m.x640 == 0)
m.c478 = Constraint(expr= - 2.6051957*m.x490 + m.x550 - m.x638 - m.x639 - m.x640 == 0)
m.c479 = Constraint(expr= - 2.520239*m.x491 + m.x551 - m.x638 - m.x639 - m.x640 == 0)
m.c480 = Constraint(expr= - 2.207549*m.x492 + m.x552 - m.x638 - m.x639 - m.x640 == 0)
m.c481 = Constraint(expr= - 7.75634*m.x493 + m.x553 - m.x638 - m.x639 - m.x640 == 0)
m.c482 = Constraint(expr= - 8.229719*m.x494 + m.x554 - m.x638 - m.x639 - m.x640 == 0)
m.c483 = Constraint(expr= - 5.486787*m.x495 + m.x555 - m.x641 - m.x642 - m.x643 == 0)
m.c484 = Constraint(expr= - 2.461346*m.x496 + m.x556 - m.x641 - m.x642 - m.x643 == 0)
m.c485 = Constraint(expr= - 8.845282*m.x497 + m.x557 - m.x641 - m.x642 - m.x643 == 0)
m.c486 = Constraint(expr= - 5.157271*m.x498 + m.x558 - m.x641 - m.x642 - m.x643 == 0)
m.c487 = Constraint(expr= - 4.191177*m.x499 + m.x559 - m.x641 - m.x642 - m.x643 == 0)
m.c488 = Constraint(expr= - 5.13465497*m.x500 + m.x560 - m.x641 - m.x642 - m.x643 == 0)
m.c489 = Constraint(expr= - 1.290353*m.x501 + m.x561 - m.x641 - m.x642 - m.x643 == 0)
m.c490 = Constraint(expr= - 2.683989*m.x502 + m.x562 - m.x641 - m.x642 - m.x643 == 0)
m.c491 = Constraint(expr= - 10.832325*m.x503 + m.x563 - m.x641 - m.x642 - m.x643 == 0)
m.c492 = Constraint(expr= - 8.466163*m.x504 + m.x564 - m.x641 - m.x642 - m.x643 == 0)
m.c493 = Constraint(expr= m.x385 - m.x445 - m.x644 - m.x674 - m.x692 == 0)
m.c494 = Constraint(expr= m.x386 - m.x446 - m.x644 - m.x674 - m.x692 == 0)
m.c495 = Constraint(expr= m.x387 - m.x447 - m.x644 - m.x674 - m.x692 == 0)
m.c496 = Constraint(expr= m.x388 - m.x448 - m.x644 - m.x674 - m.x692 == 0)
m.c497 = Constraint(expr= m.x389 - m.x449 - m.x644 - m.x674 - m.x692 == 0)
m.c498 = Constraint(expr= m.x390 - m.x450 - m.x644 - m.x674 - m.x692 == 0)
m.c499 = Constraint(expr= m.x391 - m.x451 - m.x644 - m.x674 - m.x692 == 0)
m.c500 = Constraint(expr= m.x392 - m.x452 - m.x644 - m.x674 - m.x692 == 0)
m.c501 = Constraint(expr= m.x393 - m.x453 - m.x644 - m.x674 - m.x692 == 0)
m.c502 = Constraint(expr= m.x394 - m.x454 - m.x644 - m.x674 - m.x692 == 0)
m.c503 = Constraint(expr= m.x395 - m.x455 - m.x645 - m.x675 - m.x693 == 0)
m.c504 = Constraint(expr= m.x396 - m.x456 - m.x645 - m.x675 - m.x693 == 0)
m.c505 = Constraint(expr= m.x397 - m.x457 - m.x645 - m.x675 - m.x693 == 0)
m.c506 = Constraint(expr= m.x398 - m.x458 - m.x645 - m.x675 - m.x693 == 0)
m.c507 = Constraint(expr= m.x399 - m.x459 - m.x645 - m.x675 - m.x693 == 0)
m.c508 = Constraint(expr= m.x400 - m.x460 - m.x645 - m.x675 - m.x693 == 0)
m.c509 = Constraint(expr= m.x401 - m.x461 - m.x645 - m.x675 - m.x693 == 0)
m.c510 = Constraint(expr= m.x402 - m.x462 - m.x645 - m.x675 - m.x693 == 0)
m.c511 = Constraint(expr= m.x403 - m.x463 - m.x645 - m.x675 - m.x693 == 0)
m.c512 = Constraint(expr= m.x404 - m.x464 - m.x645 - m.x675 - m.x693 == 0)
m.c513 = Constraint(expr= m.x405 - m.x465 - m.x646 - m.x676 - m.x694 == 0)
m.c514 = Constraint(expr= m.x406 - m.x466 - m.x646 - m.x676 - m.x694 == 0)
m.c515 = Constraint(expr= m.x407 - m.x467 - m.x646 - m.x676 - m.x694 == 0)
m.c516 = Constraint(expr= m.x408 - m.x468 - m.x646 - m.x676 - m.x694 == 0)
m.c517 = Constraint(expr= m.x409 - m.x469 - m.x646 - m.x676 - m.x694 == 0)
m.c518 = Constraint(expr= m.x410 - m.x470 - m.x646 - m.x676 - m.x694 == 0)
m.c519 = Constraint(expr= m.x411 - m.x471 - m.x646 - m.x676 - m.x694 == 0)
m.c520 = Constraint(expr= m.x412 - m.x472 - m.x646 - m.x676 - m.x694 == 0)
m.c521 = Constraint(expr= m.x413 - m.x473 - m.x646 - m.x676 - m.x694 == 0)
m.c522 = Constraint(expr= m.x414 - m.x474 - m.x646 - m.x676 - m.x694 == 0)
m.c523 = Constraint(expr= m.x415 - m.x475 - m.x647 - m.x677 - m.x695 == 0)
m.c524 = Constraint(expr= m.x416 - m.x476 - m.x647 - m.x677 - m.x695 == 0)
m.c525 = Constraint(expr= m.x417 - m.x477 - m.x647 - m.x677 - m.x695 == 0)
m.c526 = Constraint(expr= m.x418 - m.x478 - m.x647 - m.x677 - m.x695 == 0)
m.c527 = Constraint(expr= m.x419 - m.x479 - m.x647 - m.x677 - m.x695 == 0)
m.c528 = Constraint(expr= m.x420 - m.x480 - m.x647 - m.x677 - m.x695 == 0)
m.c529 = Constraint(expr= m.x421 - m.x481 - m.x647 - m.x677 - m.x695 == 0)
m.c530 = Constraint(expr= m.x422 - m.x482 - m.x647 - m.x677 - m.x695 == 0)
m.c531 = Constraint(expr= m.x423 - m.x483 - m.x647 - m.x677 - m.x695 == 0)
m.c532 = Constraint(expr= m.x424 - m.x484 - m.x647 - m.x677 - m.x695 == 0)
m.c533 = Constraint(expr= m.x425 - m.x485 - m.x648 - m.x678 - m.x696 == 0)
m.c534 = Constraint(expr= m.x426 - m.x486 - m.x648 - m.x678 - m.x696 == 0)
m.c535 = Constraint(expr= m.x427 - m.x487 - m.x648 - m.x678 - m.x696 == 0)
m.c536 = Constraint(expr= m.x428 - m.x488 - m.x648 - m.x678 - m.x696 == 0)
m.c537 = Constraint(expr= m.x429 - m.x489 - m.x648 - m.x678 - m.x696 == 0)
m.c538 = Constraint(expr= m.x430 - m.x490 - m.x648 - m.x678 - m.x696 == 0)
m.c539 = Constraint(expr= m.x431 - m.x491 - m.x648 - m.x678 - m.x696 == 0)
m.c540 = Constraint(expr= m.x432 - m.x492 - m.x648 - m.x678 - m.x696 == 0)
m.c541 = Constraint(expr= m.x433 - m.x493 - m.x648 - m.x678 - m.x696 == 0)
m.c542 = Constraint(expr= m.x434 - m.x494 - m.x648 - m.x678 - m.x696 == 0)
m.c543 = Constraint(expr= m.x435 - m.x495 - m.x649 - m.x679 - m.x697 == 0)
m.c544 = Constraint(expr= m.x436 - m.x496 - m.x649 - m.x679 - m.x697 == 0)
m.c545 = Constraint(expr= m.x437 - m.x497 - m.x649 - m.x679 - m.x697 == 0)
m.c546 = Constraint(expr= m.x438 - m.x498 - m.x649 - m.x679 - m.x697 == 0)
m.c547 = Constraint(expr= m.x439 - m.x499 - m.x649 - m.x679 - m.x697 == 0)
m.c548 = Constraint(expr= m.x440 - m.x500 - m.x649 - m.x679 - m.x697 == 0)
m.c549 = Constraint(expr= m.x441 - m.x501 - m.x649 - m.x679 - m.x697 == 0)
m.c550 = Constraint(expr= m.x442 - m.x502 - m.x649 - m.x679 - m.x697 == 0)
m.c551 = Constraint(expr= m.x443 - m.x503 - m.x649 - m.x679 - m.x697 == 0)
m.c552 = Constraint(expr= m.x444 - m.x504 - m.x649 - m.x679 - m.x697 == 0)
m.c553 = Constraint(expr= m.b726 + m.b744 + m.b762 <= 1)
m.c554 = Constraint(expr= m.b727 + m.b745 + m.b763 <= 1)
m.c555 = Constraint(expr= m.b728 + m.b746 + m.b764 <= 1)
m.c556 = Constraint(expr= m.b729 + m.b747 + m.b765 <= 1)
m.c557 = Constraint(expr= m.b730 + m.b748 + m.b766 <= 1)
m.c558 = Constraint(expr= m.b731 + m.b749 + m.b767 <= 1)
m.c559 = Constraint(expr= m.x627 - 3.145*m.x650 - 2.465*m.x651 == 0)
m.c560 = Constraint(expr= m.x630 - 3.145*m.x652 - 2.465*m.x653 == 0)
m.c561 = Constraint(expr= m.x633 - 3.145*m.x654 - 2.465*m.x655 == 0)
m.c562 = Constraint(expr= m.x636 - 3.145*m.x656 - 2.465*m.x657 == 0)
m.c563 = Constraint(expr= m.x639 - 3.145*m.x658 - 2.465*m.x659 == 0)
m.c564 = Constraint(expr= m.x642 - 3.145*m.x660 - 2.465*m.x661 == 0)
m.c565 = Constraint(expr= m.x644 - m.x650 - m.x651 == 0)
m.c566 = Constraint(expr= m.x645 - m.x652 - m.x653 == 0)
m.c567 = Constraint(expr= m.x646 - m.x654 - m.x655 == 0)
m.c568 = Constraint(expr= m.x647 - m.x656 - m.x657 == 0)
m.c569 = Constraint(expr= m.x648 - m.x658 - m.x659 == 0)
m.c570 = Constraint(expr= m.x649 - m.x660 - m.x661 == 0)
m.c571 = Constraint(expr= m.x650 - m.x662 - m.x663 == 0)
m.c572 = Constraint(expr= m.x652 - m.x664 - m.x665 == 0)
m.c573 = Constraint(expr= m.x654 - m.x666 - m.x667 == 0)
m.c574 = Constraint(expr= m.x656 - m.x668 - m.x669 == 0)
m.c575 = Constraint(expr= m.x658 - m.x670 - m.x671 == 0)
m.c576 = Constraint(expr= m.x660 - m.x672 - m.x673 == 0)
m.c577 = Constraint(expr= m.x662 - 20*m.b732 <= 0)
m.c578 = Constraint(expr= m.x664 - 20*m.b734 <= 0)
m.c579 = Constraint(expr= m.x666 - 20*m.b736 <= 0)
m.c580 = Constraint(expr= m.x668 - 20*m.b738 <= 0)
m.c581 = Constraint(expr= m.x670 - 20*m.b740 <= 0)
m.c582 = Constraint(expr= m.x672 - 20*m.b742 <= 0)
m.c583 = Constraint(expr= m.x663 - 20*m.b733 == 0)
m.c584 = Constraint(expr= m.x665 - 20*m.b735 == 0)
m.c585 = Constraint(expr= m.x667 - 20*m.b737 == 0)
m.c586 = Constraint(expr= m.x669 - 20*m.b739 == 0)
m.c587 = Constraint(expr= m.x671 - 20*m.b741 == 0)
m.c588 = Constraint(expr= m.x673 - 20*m.b743 == 0)
m.c589 = Constraint(expr= m.x651 - 85*m.b733 <= 0)
m.c590 = Constraint(expr= m.x653 - 85*m.b735 <= 0)
m.c591 = Constraint(expr= m.x655 - 85*m.b737 <= 0)
m.c592 = Constraint(expr= m.x657 - 85*m.b739 <= 0)
m.c593 = Constraint(expr= m.x659 - 85*m.b741 <= 0)
m.c594 = Constraint(expr= m.x661 - 85*m.b743 <= 0)
m.c595 = Constraint(expr= - m.b726 + m.b732 + m.b733 == 0)
m.c596 = Constraint(expr= - m.b727 + m.b734 + m.b735 == 0)
m.c597 = Constraint(expr= - m.b728 + m.b736 + m.b737 == 0)
m.c598 = Constraint(expr= - m.b729 + m.b738 + m.b739 == 0)
m.c599 = Constraint(expr= - m.b730 + m.b740 + m.b741 == 0)
m.c600 = Constraint(expr= - m.b731 + m.b742 + m.b743 == 0)
m.c601 = Constraint(expr= m.x626 - 3.06*m.x680 - 2.38*m.x681 == 0)
m.c602 = Constraint(expr= m.x629 - 3.06*m.x682 - 2.38*m.x683 == 0)
m.c603 = Constraint(expr= m.x632 - 3.06*m.x684 - 2.38*m.x685 == 0)
m.c604 = Constraint(expr= m.x635 - 3.06*m.x686 - 2.38*m.x687 == 0)
m.c605 = Constraint(expr= m.x638 - 3.06*m.x688 - 2.38*m.x689 == 0)
m.c606 = Constraint(expr= m.x641 - 3.06*m.x690 - 2.38*m.x691 == 0)
m.c607 = Constraint(expr= m.x674 - m.x680 - m.x681 == 0)
m.c608 = Constraint(expr= m.x675 - m.x682 - m.x683 == 0)
m.c609 = Constraint(expr= m.x676 - m.x684 - m.x685 == 0)
m.c610 = Constraint(expr= m.x677 - m.x686 - m.x687 == 0)
m.c611 = Constraint(expr= m.x678 - m.x688 - m.x689 == 0)
m.c612 = Constraint(expr= m.x679 - m.x690 - m.x691 == 0)
m.c613 = Constraint(expr= m.x680 - 40*m.b750 <= 0)
m.c614 = Constraint(expr= m.x682 - 40*m.b752 <= 0)
m.c615 = Constraint(expr= m.x684 - 40*m.b754 <= 0)
m.c616 = Constraint(expr= m.x686 - 40*m.b756 <= 0)
m.c617 = Constraint(expr= m.x688 - 40*m.b758 <= 0)
m.c618 = Constraint(expr= m.x690 - 40*m.b760 <= 0)
m.c619 = Constraint(expr= m.x681 - 85*m.b751 <= 0)
m.c620 = Constraint(expr= m.x683 - 85*m.b753 <= 0)
m.c621 = Constraint(expr= m.x685 - 85*m.b755 <= 0)
m.c622 = Constraint(expr= m.x687 - 85*m.b757 <= 0)
m.c623 = Constraint(expr= m.x689 - 85*m.b759 <= 0)
m.c624 = Constraint(expr= m.x691 - 85*m.b761 <= 0)
m.c625 = Constraint(expr= m.x681 - 40*m.b751 >= 0)
m.c626 = Constraint(expr= m.x683 - 40*m.b753 >= 0)
m.c627 = Constraint(expr= m.x685 - 40*m.b755 >= 0)
m.c628 = Constraint(expr= m.x687 - 40*m.b757 >= 0)
m.c629 = Constraint(expr= m.x689 - 40*m.b759 >= 0)
m.c630 = Constraint(expr= m.x691 - 40*m.b761 >= 0)
m.c631 = Constraint(expr= - m.b744 + m.b750 + m.b751 == 0)
m.c632 = Constraint(expr= - m.b745 + m.b752 + m.b753 == 0)
m.c633 = Constraint(expr= - m.b746 + m.b754 + m.b755 == 0)
m.c634 = Constraint(expr= - m.b747 + m.b756 + m.b757 == 0)
m.c635 = Constraint(expr= - m.b748 + m.b758 + m.b759 == 0)
m.c636 = Constraint(expr= - m.b749 + m.b760 + m.b761 == 0)
m.c637 = Constraint(expr= m.x628 - 3.4*m.x698 == 0)
m.c638 = Constraint(expr= m.x631 - 2.04*m.x699 - 3.4*m.x700 - 2.04*m.x701 == 0)
m.c639 = Constraint(expr= m.x634 - 1.7*m.x702 - 2.04*m.x703 - 1.7*m.x704 - 3.4*m.x705 - 2.04*m.x706 - 1.7*m.x707 == 0)
m.c640 = Constraint(expr= m.x637 - 1.7*m.x708 - 2.04*m.x709 - 1.7*m.x710 - 3.4*m.x711 - 2.04*m.x712 - 1.7*m.x713 == 0)
m.c641 = Constraint(expr= m.x640 - 1.7*m.x714 - 2.04*m.x715 - 1.7*m.x716 - 3.4*m.x717 - 2.04*m.x718 - 1.7*m.x719 == 0)
m.c642 = Constraint(expr= m.x643 - 1.7*m.x720 - 2.04*m.x721 - 1.7*m.x722 - 3.4*m.x723 - 2.04*m.x724 - 1.7*m.x725 == 0)
m.c643 = Constraint(expr= m.x692 - m.x698 == 0)
m.c644 = Constraint(expr= m.x693 - m.x699 - m.x700 - m.x701 == 0)
m.c645 = Constraint(expr= m.x694 - m.x702 - m.x703 - m.x704 - m.x705 - m.x706 - m.x707 == 0)
m.c646 = Constraint(expr= m.x695 - m.x708 - m.x709 - m.x710 - m.x711 - m.x712 - m.x713 == 0)
m.c647 = Constraint(expr= m.x696 - m.x714 - m.x715 - m.x716 - m.x717 - m.x718 - m.x719 == 0)
m.c648 = Constraint(expr= m.x697 - m.x720 - m.x721 - m.x722 - m.x723 - m.x724 - m.x725 == 0)
m.c649 = Constraint(expr= m.x698 - 85*m.b768 <= 0)
m.c650 = Constraint(expr= m.x699 - 85*m.b772 <= 0)
m.c651 = Constraint(expr= m.x700 - 85*m.b771 <= 0)
m.c652 = Constraint(expr= m.x701 - 85*m.b772 <= 0)
m.c653 = Constraint(expr= m.x702 - 85*m.b776 <= 0)
m.c654 = Constraint(expr= m.x703 - 85*m.b775 <= 0)
m.c655 = Constraint(expr= m.x704 - 85*m.b776 <= 0)
m.c656 = Constraint(expr= m.x705 - 85*m.b774 <= 0)
m.c657 = Constraint(expr= m.x706 - 85*m.b775 <= 0)
m.c658 = Constraint(expr= m.x707 - 85*m.b776 <= 0)
m.c659 = Constraint(expr= m.x708 - 85*m.b779 <= 0)
m.c660 = Constraint(expr= m.x709 - 85*m.b778 <= 0)
m.c661 = Constraint(expr= m.x710 - 85*m.b779 <= 0)
m.c662 = Constraint(expr= m.x711 - 85*m.b777 <= 0)
m.c663 = Constraint(expr= m.x712 - 85*m.b778 <= 0)
m.c664 = Constraint(expr= m.x713 - 85*m.b779 <= 0)
m.c665 = Constraint(expr= m.x714 - 85*m.b782 <= 0)
m.c666 = Constraint(expr= m.x715 - 85*m.b781 <= 0)
m.c667 = Constraint(expr= m.x716 - 85*m.b782 <= 0)
m.c668 = Constraint(expr= m.x717 - 85*m.b780 <= 0)
m.c669 = Constraint(expr= m.x718 - 85*m.b781 <= 0)
m.c670 = Constraint(expr= m.x719 - 85*m.b782 <= 0)
m.c671 = Constraint(expr= m.x720 - 85*m.b785 <= 0)
m.c672 = Constraint(expr= m.x721 - 85*m.b784 <= 0)
m.c673 = Constraint(expr= m.x722 - 85*m.b785 <= 0)
m.c674 = Constraint(expr= m.x723 - 85*m.b783 <= 0)
m.c675 = Constraint(expr= m.x724 - 85*m.b784 <= 0)
m.c676 = Constraint(expr= m.x725 - 85*m.b785 <= 0)
m.c677 = Constraint(expr= m.x698 - 5*m.b768 >= 0)
m.c678 = Constraint(expr= m.x699 - 25*m.b772 >= 0)
m.c679 = Constraint(expr= m.x700 - 5*m.b771 >= 0)
m.c680 = Constraint(expr= m.x701 - 25*m.b772 >= 0)
m.c681 = Constraint(expr= m.x702 - 30*m.b776 >= 0)
m.c682 = Constraint(expr= m.x703 - 25*m.b775 >= 0)
m.c683 = Constraint(expr= m.x704 - 30*m.b776 >= 0)
m.c684 = Constraint(expr= m.x705 - 5*m.b774 >= 0)
m.c685 = Constraint(expr= m.x706 - 25*m.b775 >= 0)
m.c686 = Constraint(expr= m.x707 - 30*m.b776 >= 0)
m.c687 = Constraint(expr= m.x708 - 30*m.b779 >= 0)
m.c688 = Constraint(expr= m.x709 - 25*m.b778 >= 0)
m.c689 = Constraint(expr= m.x710 - 30*m.b779 >= 0)
m.c690 = Constraint(expr= m.x711 - 5*m.b777 >= 0)
m.c691 = Constraint(expr= m.x712 - 25*m.b778 >= 0)
m.c692 = Constraint(expr= m.x713 - 30*m.b779 >= 0)
m.c693 = Constraint(expr= m.x714 - 30*m.b782 >= 0)
m.c694 = Constraint(expr= m.x715 - 25*m.b781 >= 0)
m.c695 = Constraint(expr= m.x716 - 30*m.b782 >= 0)
m.c696 = Constraint(expr= m.x717 - 5*m.b780 >= 0)
m.c697 = Constraint(expr= m.x718 - 25*m.b781 >= 0)
m.c698 = Constraint(expr= m.x719 - 30*m.b782 >= 0)
m.c699 = Constraint(expr= m.x720 - 30*m.b785 >= 0)
m.c700 = Constraint(expr= m.x721 - 25*m.b784 >= 0)
m.c701 = Constraint(expr= m.x722 - 30*m.b785 >= 0)
m.c702 = Constraint(expr= m.x723 - 5*m.b783 >= 0)
m.c703 = Constraint(expr= m.x724 - 25*m.b784 >= 0)
m.c704 = Constraint(expr= m.x725 - 30*m.b785 >= 0)
m.c705 = Constraint(expr= - m.b762 + m.b768 + m.b769 + m.b770 == 0)
m.c706 = Constraint(expr= - m.b763 + m.b771 + m.b772 + m.b773 == 0)
m.c707 = Constraint(expr= - m.b764 + m.b774 + m.b775 + m.b776 == 0)
m.c708 = Constraint(expr= - m.b765 + m.b777 + m.b778 + m.b779 == 0)
m.c709 = Constraint(expr= - m.b766 + m.b780 + m.b781 + m.b782 == 0)
m.c710 = Constraint(expr= - m.b767 + m.b783 + m.b784 + m.b785 == 0)
m.c711 = Constraint(expr= m.b768 + m.b772 <= 1)
m.c712 = Constraint(expr= m.b769 + m.b772 <= 1)
m.c713 = Constraint(expr= m.b770 + m.b772 <= 1)
m.c714 = Constraint(expr= m.b768 + m.b776 <= 1)
m.c715 = Constraint(expr= m.b769 + m.b776 <= 1)
m.c716 = Constraint(expr= m.b770 + m.b776 <= 1)
m.c717 = Constraint(expr= m.b771 + m.b775 <= 1)
m.c718 = Constraint(expr= m.b772 + m.b775 <= 1)
m.c719 = Constraint(expr= m.b773 + m.b775 <= 1)
m.c720 = Constraint(expr= m.b771 + m.b776 <= 1)
m.c721 = Constraint(expr= m.b772 + m.b776 <= 1)
m.c722 = Constraint(expr= m.b773 + m.b776 <= 1)
m.c723 = Constraint(expr= m.b771 + m.b779 <= 1)
m.c724 = Constraint(expr= m.b772 + m.b779 <= 1)
m.c725 = Constraint(expr= m.b773 + m.b779 <= 1)
m.c726 = Constraint(expr= m.b774 + m.b778 <= 1)
m.c727 = Constraint(expr= m.b775 + m.b778 <= 1)
m.c728 = Constraint(expr= m.b776 + m.b778 <= 1)
m.c729 = Constraint(expr= m.b774 + m.b779 <= 1)
m.c730 = Constraint(expr= m.b775 + m.b779 <= 1)
m.c731 = Constraint(expr= m.b776 + m.b779 <= 1)
m.c732 = Constraint(expr= m.b774 + m.b782 <= 1)
m.c733 = Constraint(expr= m.b775 + m.b782 <= 1)
m.c734 = Constraint(expr= m.b776 + m.b782 <= 1)
m.c735 = Constraint(expr= m.b777 + m.b781 <= 1)
m.c736 = Constraint(expr= m.b778 + m.b781 <= 1)
m.c737 = Constraint(expr= m.b779 + m.b781 <= 1)
m.c738 = Constraint(expr= m.b777 + m.b782 <= 1)
m.c739 = Constraint(expr= m.b778 + m.b782 <= 1)
m.c740 = Constraint(expr= m.b779 + m.b782 <= 1)
m.c741 = Constraint(expr= m.b777 + m.b785 <= 1)
m.c742 = Constraint(expr= m.b778 + m.b785 <= 1)
m.c743 = Constraint(expr= m.b779 + m.b785 <= 1)
m.c744 = Constraint(expr= m.b780 + m.b784 <= 1)
m.c745 = Constraint(expr= m.b781 + m.b784 <= 1)
m.c746 = Constraint(expr= m.b782 + m.b784 <= 1)
m.c747 = Constraint(expr= m.b780 + m.b785 <= 1)
m.c748 = Constraint(expr= m.b781 + m.b785 <= 1)
m.c749 = Constraint(expr= m.b782 + m.b785 <= 1)
|
nilq/baby-python
|
python
|
import sqlite3
import os
from pomodoro import Pomodoro
os.chdir("..") # Go up one directory from working directory
# Create database if it does not exist
database_path = "data\pomodoros.db"
if not os.path.exists(database_path):
print("Creating database ...")
os.system("database.py")
conn = sqlite3.connect("data\pomodoros.db")
cursor = conn.cursor()
pomodoro = Pomodoro(cursor)
### Main loop
while True:
# Show the categories available
category_id = pomodoro.show_categories()
project_id = pomodoro.show_projects(category_id)
pomodoro_time = input("Add the length of the pomodoro in minutes: ")
# call for the timer
pomodoro.timer(minutes=pomodoro_time)
# Rest timer
pomodoro.timer(mode="rest")
# Ask for satisfaction
satisfaction = input("Type how well was your pomodoro. 1=Good - 2=Bad: ")
# Add the pomodoro to the database
pomodoro.add_pomodoro(pomodoro_time, category_id, project_id, satisfaction)
conn.commit()
# Next step
decision = pomodoro.next_decision()
if decision == 1:
continue
elif decision == 2:
pomodoro.end_project(project_id)
conn.commit()
elif decision == 3:
pomodoro.cancel_project(project_id)
conn.commit()
else:
break
conn.commit()
conn.close()
print("---ENDING PROGRAM---")
|
nilq/baby-python
|
python
|
"""
*
* Author: Juarez Paulino(coderemite)
* Email: juarez.paulino@gmail.com
*
"""
w,h,k=map(int,input().split());r=0
for _ in [0]*k:
r+=(w+h<<1)-4;w-=4;h-=4
print(r)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# À partir de l’export csv de https://opendata.paris.fr/explore/dataset/les-titres-les-plus-pretes/
# vous compterez le nombre d’ouvrages par ‘type de document’ et vous afficherez les types par ordre décroissant
from collections import Counter
sep = ';'
cnt = Counter()
with open('les-titres-les-plus-pretes.csv') as f_livres:
for line in f_livres:
if line.startswith("Rang;"):
continue
line = line.rstrip()
cols = line.split(sep)
cnt[cols[1]] += int(cols[2])
# La fonction most_common renvoie la liste des tuples par ordre décroissant
# (https://docs.python.org/3.7/library/collections.html#collections.Counter.most_common)
for cat, nb in cnt.most_common():
print(f"{cat} : {nb} prêts")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#pythonlib
import os
import sys
import math
import re
import time
import glob
import numpy
#appion
from pyami import imagefun
from appionlib import appionLoop2
from appionlib import apImage
from appionlib import apDisplay
from appionlib import apDatabase
from appionlib.apCtf import ctfdb
from appionlib import apDefocalPairs
from appionlib import appiondata
from appionlib import apParticle
from appionlib import apFile
from appionlib import apMask
from appionlib import apBoxer
from appionlib import apSizing
class ParticleExtractLoop(appionLoop2.AppionLoop):
############################################################
## Check pixel size
############################################################
def checkPixelSize(self):
# make sure that images all have same pixel size:
# first get pixel size of first image:
self.params['apix'] = None
for imgdata in self.imgtree:
# get pixel size
imgname = imgdata['filename']
if imgname in self.donedict:
continue
if self.params['apix'] is None:
self.params['apix'] = apDatabase.getPixelSize(imgdata)
apDisplay.printMsg("Stack pixelsize = %.3f A"%(self.params['apix']))
if apDatabase.getPixelSize(imgdata) != self.params['apix']:
apDisplay.printMsg("Image pixelsize %.3f A != Stack pixelsize %.3f A"%(apDatabase.getPixelSize(imgdata), self.params['apix']))
apDisplay.printMsg("Problem image name: %s"%(apDisplay.short(imgdata['filename'])))
apDisplay.printError("This particle selection run contains images of varying pixelsizes, a stack cannot be created")
#=======================
def getParticlesFromStack(self, stackdata,imgdata,is_defocpair=False):
"""
For image (or defocal pair), imgdata get particles in corresponding stack
"""
if is_defocpair is True:
sibling, shiftpeak = apDefocalPairs.getShiftFromImage(imgdata, self.params['sessionname'])
if shiftpeak is None:
return [],{'shiftx':0, 'shifty':0, 'scale':1}
shiftdata = {'shiftx':shiftpeak['shift'][0], 'shifty':shiftpeak['shift'][1], 'scale':shiftpeak['scalefactor']}
searchimgdata = sibling
else:
searchimgdata = imgdata
shiftdata = {'shiftx':0, 'shifty':0, 'scale':1}
partq = appiondata.ApParticleData()
partq['image'] = searchimgdata
stackpartq = appiondata.ApStackParticleData()
stackpartq['stack'] = stackdata
stackpartq['particle'] = partq
stackpartdatas = stackpartq.query()
partdatas = []
partorder = []
for stackpartdata in stackpartdatas:
if self.params['partlimit'] and self.params['partlimit'] < stackpartdata['particleNumber']:
continue
partdata = stackpartdata['particle']
partdatas.append(partdata)
partorder.append(stackpartdata['particleNumber'])
partdatas.reverse()
partorder.reverse()
self.writeStackParticleOrderFile(partorder)
return partdatas, shiftdata
def writeStackParticleOrderFile(self,partorder):
f = open(os.path.join(self.params['rundir'],'stackpartorder.list'),'a')
if partorder:
f.write('\n'.join(map((lambda x: '%d' % x),partorder))+'\n')
return
def getParticlesInImage(self,imgdata):
if self.params['defocpair'] is True and self.params['selectionid'] is not None:
# using defocal pairs and particle picks
partdatas, shiftdata = apParticle.getDefocPairParticles(imgdata, self.params['selectionid'], self.params['particlelabel'])
elif self.params['fromstackid'] is not None:
# using previous stack to make a new stack
fromstackdata = appiondata.ApStackData.direct_query(self.params['fromstackid'])
partdatas, shiftdata = self.getParticlesFromStack(fromstackdata,imgdata,self.params['defocpair'],)
else:
# using particle picks
partdatas = apParticle.getParticles(imgdata, self.params['selectionid'], self.params['particlelabel'])
shiftdata = {'shiftx':0, 'shifty':0, 'scale':1}
apDisplay.printMsg("Found %d particles"%(len(partdatas)))
### apply correlation limits
if self.params['correlationmin'] or self.params['correlationmax']:
partdatas = self.eliminateMinMaxCCParticles(partdatas)
### apply masks
if self.params['checkmask']:
partdatas = self.eliminateMaskedParticles(partdatas, imgdata)
return partdatas,shiftdata
############################################################
## Rejection Criteria
############################################################
############################################################
## image if additional criteria is not met
############################################################
def rejectImage(self, imgdata):
shortname = apDisplay.short(imgdata['filename'])
if self.params['mag']:
if not apDatabase.checkMag(imgdata, self.params['mag']):
apDisplay.printColor(shortname+" was not at the specific magnification","cyan")
return False
return True
############################################################
## get CTF parameters and skip image if criteria is not met
############################################################
def checkRequireCtf(self):
try:
return self.params['saveRequireCtf']
except KeyError:
ctfres = self.params['ctfres80min'] or self.params['ctfres50min'] or self.params['ctfres80max'] or self.params['ctfres50max']
defoc = self.params['mindefocus'] or self.params['maxdefocus']
self.params['saveRequireCtf'] = self.params['ctfcutoff'] or ctfres or defoc
return self.params['saveRequireCtf']
#=======================
def getBestCtfValue(self, imgdata, msg=False):
if self.params['ctfrunid'] is not None:
return ctfdb.getCtfValueForCtfRunId(imgdata, self.params['ctfrunid'], msg=msg)
return ctfdb.getBestCtfValue(imgdata, sortType=self.params['ctfsorttype'], method=self.params['ctfmethod'], msg=msg)
#=======================
def getDefocusAmpConstForImage(self,imgdata,msg=False):
ctfvalue = self.getBestCtfValue(imgdata, msg)
### This function returns defocus defined as negative underfocus
defocus = -(abs(ctfvalue['defocus1'])+abs(ctfvalue['defocus2']))/2
return defocus, ctfvalue['amplitude_contrast']
#=======================
def checkCtfParams(self, imgdata):
shortname = apDisplay.short(imgdata['filename'])
ctfvalue = self.getBestCtfValue(imgdata)
### check if we have values and if we care
if ctfvalue is None:
return not self.checkRequireCtf()
### check that CTF estimation is above confidence threshold
conf = ctfdb.calculateConfidenceScore(ctfvalue)
if self.params['ctfcutoff'] and conf < self.params['ctfcutoff']:
apDisplay.printColor(shortname+" is below confidence threshold (conf="+str(round(conf,3))+")\n","cyan")
return False
### check resolution requirement for CTF fit at 0.8 threshold
if self.params['ctfres80min'] is not None or self.params['ctfres80max'] is not None:
if not 'resolution_80_percent' in ctfvalue.keys() or ctfvalue['resolution_80_percent'] is None:
apDisplay.printColor("%s: no 0.8 resolution found"%(shortname), "cyan")
return False
if self.params['ctfres80max'] and ctfvalue['resolution_80_percent'] > self.params['ctfres80max']:
apDisplay.printColor("%s is above resolution threshold %.2f > %.2f"
%(shortname, ctfvalue['resolution_80_percent'], self.params['ctfres80max']), "cyan")
return False
if self.params['ctfres80min'] and ctfvalue['resolution_80_percent'] < self.params['ctfres80min']:
apDisplay.printColor("%s is below resolution threshold %.2f > %.2f"
%(shortname, ctfvalue['resolution_80_percent'], self.params['ctfres80min']), "cyan")
return False
### check resolution requirement for CTF fit at 0.5 threshold
if self.params['ctfres50min'] is not None or self.params['ctfres50max'] is not None:
if not 'resolution_50_percent' in ctfvalue.keys() or ctfvalue['resolution_50_percent'] is None:
apDisplay.printColor("%s: no 0.5 resolution found"%(shortname), "cyan")
return False
if self.params['ctfres50max'] and ctfvalue['resolution_50_percent'] > self.params['ctfres50max']:
apDisplay.printColor("%s is above resolution threshold %.2f > %.2f"
%(shortname, ctfvalue['resolution_50_percent'], self.params['ctfres50max']), "cyan")
return False
if self.params['ctfres50min'] and ctfvalue['resolution_50_percent'] < self.params['ctfres50min']:
apDisplay.printColor("%s is below resolution threshold %.2f > %.2f"
%(shortname, ctfvalue['resolution_50_percent'], self.params['ctfres50min']), "cyan")
return False
if self.params['mindefocus'] is not None or self.params['maxdefocus'] is not None:
### get best defocus value
### defocus should be in negative meters
if ctfvalue['defocus2'] is not None and ctfvalue['defocus1'] != ctfvalue['defocus2']:
defocus = (ctfvalue['defocus1'] + ctfvalue['defocus2'])/2.0
else:
defocus = ctfvalue['defocus1']
defocus = -1.0*abs(defocus)
### assume defocus values are ALWAYS negative but mindefocus is greater than maxdefocus
if self.params['mindefocus']:
self.params['mindefocus'] = -abs( self.params['mindefocus'] )
if self.params['maxdefocus']:
self.params['maxdefocus'] = -abs( self.params['maxdefocus'] )
if self.params['mindefocus'] and self.params['maxdefocus']:
if self.params['maxdefocus'] > self.params['mindefocus']:
mindef = self.params['mindefocus']
maxdef = self.params['maxdefocus']
self.params['mindefocus'] = maxdef
self.params['maxdefocus'] = mindef
### skip micrograph that have defocus above or below min & max defocus levels
if self.params['mindefocus'] and defocus > self.params['mindefocus']:
#apDisplay.printColor(shortname+" defocus ("+str(round(defocus*1e6,2))+\
# " um) is less than mindefocus ("+str(self.params['mindefocus']*1e6)+" um)\n","cyan")
return False
if self.params['maxdefocus'] and defocus < self.params['maxdefocus']:
#apDisplay.printColor(shortname+" defocus ("+str(round(defocus*1e6,2))+\
# " um) is greater than maxdefocus ("+str(self.params['maxdefocus']*1e6)+" um)\n","cyan")
return False
return True
#=======================
def checkDefocus(self, defocus, shortname):
if defocus > 0:
apDisplay.printError("defocus is positive "+str(defocus)+" for image "+shortname)
elif defocus < -1.0e3:
apDisplay.printError("defocus is very big "+str(defocus)+" for image "+shortname)
elif defocus > -1.0e-3:
apDisplay.printError("defocus is very small "+str(defocus)+" for image "+shortname)
#=======================
def eliminateMinMaxCCParticles(self, particles):
newparticles = []
eliminated = 0
for prtl in particles:
if self.params['correlationmin'] and prtl['correlation'] < self.params['correlationmin']:
eliminated += 1
elif self.params['correlationmax'] and prtl['correlation'] > self.params['correlationmax']:
eliminated += 1
else:
newparticles.append(prtl)
if eliminated > 0:
apDisplay.printMsg(str(eliminated)+" particle(s) eliminated due to min or max correlation cutoff")
return newparticles
#=======================
def eliminateMaskedParticles(self, particles, imgdata):
newparticles = []
eliminated = 0
sessiondata = apDatabase.getSessionDataFromSessionName(self.params['sessionname'])
if self.params['defocpair']:
imgdata = apDefocalPairs.getTransformedDefocPair(imgdata,2)
maskimg,maskbin = apMask.makeInspectedMask(sessiondata,self.params['maskassess'],imgdata)
if maskimg is not None:
for prtl in particles:
binnedcoord = (int(prtl['ycoord']/maskbin),int(prtl['xcoord']/maskbin))
if maskimg[binnedcoord] != 0:
eliminated += 1
else:
newparticles.append(prtl)
apDisplay.printMsg("%i particle(s) eliminated due to masking"%eliminated)
else:
apDisplay.printMsg("no masking")
newparticles = particles
return newparticles
############################################################
## Common parameters
############################################################
#=======================
def setupParserOptions(self):
self.ctfestopts = ('ace2', 'ctffind')
### values
self.parser.add_option("--bin", dest="bin", type="int", default=1,
help="Bin the particles after extracting", metavar="#")
self.parser.add_option("--ctfcutoff", dest="ctfcutoff", type="float",
help="CTF confidence cut off")
self.parser.add_option("--ctfres80min", dest="ctfres80min", type="float",
help="min resolution requirement at 0.8 threshold (rarely used)")
self.parser.add_option("--ctfres50min", dest="ctfres50min", type="float",
help="min resolution requirement at 0.5 threshold (rarely used)")
self.parser.add_option("--ctfres80max", dest="ctfres80max", type="float",
help="max resolution requirement for CTF fit at 0.8 threshold")
self.parser.add_option("--ctfres50max", dest="ctfres50max", type="float",
help="max resolution requirement for CTF fit at 0.5 threshold")
self.parser.add_option("--mincc", dest="correlationmin", type="float",
help="particle correlation mininum")
self.parser.add_option("--maxcc", dest="correlationmax", type="float",
help="particle correlation maximum")
self.parser.add_option("--mindef", dest="mindefocus", type="float",
help="minimum defocus")
self.parser.add_option("--maxdef", dest="maxdefocus", type="float",
help="maximum defocus")
self.parser.add_option("--selectionid", dest="selectionid", type="int",
help="particle picking runid")
self.parser.add_option("--fromstackid", dest="fromstackid", type="int",
help="redo a stack from a previous stack")
self.parser.add_option("--ctfrunid", dest="ctfrunid", type="int",
help="consider only specific ctfrun")
self.parser.add_option("--partlimit", dest="partlimit", type="int",
help="particle limit")
self.parser.add_option("--mag", dest="mag", type="int",
help="process only images of magification, mag")
self.parser.add_option("--maskassess", dest="maskassess",
help="Assessed mask run name")
self.parser.add_option("--label", dest="particlelabel", type="str", default=None,
help="select particles by label within the same run name")
self.parser.add_option("--ddstartframe", dest="startframe", type="int", default=0,
help="starting frame for direct detector raw frame processing. The first frame is 0")
self.parser.add_option("--ddnframe", dest="nframe", type="int",
help="total frames to consider for direct detector raw frame processing")
self.parser.add_option("--ddstack", dest="ddstack", type="int", default=0,
help="gain/dark corrected ddstack id used for dd frame integration")
self.parser.add_option("--dduseGS", dest="useGS", default=False,
action="store_true", help="use Gram-Schmidt process to scale dark to frame images")
self.parser.add_option("--dddriftlimit", dest="driftlimit", type="float",
help="direct detector frame acceptable drift, in Angstroms")
### true/false
self.parser.add_option("--defocpair", dest="defocpair", default=False,
action="store_true", help="select defocal pair")
self.parser.add_option("--checkmask", dest="checkmask", default=False,
action="store_true", help="Check masks")
self.parser.add_option("--keepall", dest="keepall", default=False,
action="store_true", help="Do not delete CTF corrected MRC files when finishing")
self.parser.add_option("--usedownmrc", dest="usedownmrc", default=False,
action="store_true", help="Use existing *.down.mrc in processing")
### option based
self.parser.add_option("--ctfmethod", dest="ctfmethod",
help="Only use ctf values coming from this method of estimation", metavar="TYPE",
type="choice", choices=self.ctfestopts)
#=======================
def checkConflicts(self):
if self.params['description'] is None:
apDisplay.printError("A description has to be specified")
if (self.params['mindefocus'] is not None and
(self.params['mindefocus'] < -1e-3 or self.params['mindefocus'] > -1e-9)):
apDisplay.printError("min defocus is not in an acceptable range, e.g. mindefocus=-1.5e-6")
if (self.params['maxdefocus'] is not None and
(self.params['maxdefocus'] < -1e-3 or self.params['maxdefocus'] > -1e-9)):
apDisplay.printError("max defocus is not in an acceptable range, e.g. maxdefocus=-1.5e-6")
if self.params['fromstackid'] is not None and self.params['selectionid'] is not None:
apDisplay.printError("please only specify one of either --selectionid or --fromstackid")
if self.params['fromstackid'] is None and self.params['selectionid'] is None:
apDisplay.printError("please specify one of either --selectionid or --fromstackid")
if self.params['maskassess'] is None and self.params['checkmask']:
apDisplay.printError("particle mask assessment run need to be defined to check mask")
if self.params['maskassess'] is not None and not self.params['checkmask']:
apDisplay.printMsg("running mask assess")
self.params['checkmask'] = True
def checkIsDD(self):
apDisplay.printWarning('Checking for dd')
if self.params['ddstack'] > 0:
self.is_dd_stack = True
self.is_dd = True
else:
if self.params['preset'] and '-a' in self.params['preset'] and (self.params['nframe'] or self.params['driftlimit'] > 0):
self.is_dd = True
self.is_dd_stack = True
elif self.params['mrcnames'] and self.params['mrcnames'].split(',')[0] and '-a' in self.params['mrcnames'].split(',')[0] and (self.params['nframe'] or self.params['driftlimit'] > 0):
self.is_dd = True
self.is_dd_stack = True
elif self.params['nframe']:
self.is_dd = True
self.is_dd_frame = True
#=======================
def preLoopFunctions(self):
self.is_dd_frame = False
self.is_dd_stack = False
self.is_dd = False
self.checkIsDD()
self.batchboxertimes = []
self.ctftimes = []
self.mergestacktimes = []
self.meanreadtimes = []
self.insertdbtimes = []
self.noimages = False
self.totalpart = 0
self.selectiondata = None
# Different class needed depending on if ddstack is specified or available
if self.is_dd:
from appionlib import apDDprocess
if self.is_dd_frame:
apDisplay.printMsg('DD Frame Processing')
self.dd = apDDprocess.initializeDDFrameprocess(self.params['sessionname'])
self.dd.setUseGS(self.params['useGS'])
if self.is_dd_stack:
apDisplay.printMsg('DD Stack Processing')
self.dd = apDDprocess.DDStackProcessing()
if len(self.imgtree) == 0:
apDisplay.printWarning("No images were found to process")
self.noimages = True
# Still need to set attributes if waiting for more images
if not self.params['wait']:
return
if self.params['selectionid'] is not None:
self.selectiondata = apParticle.getSelectionRunDataFromID(self.params['selectionid'])
if self.params['particlelabel'] == 'fromtrace':
if (not self.selectiondata['manparams'] or not self.selectiondata['manparams']['trace']):
apDisplay.printError("Can not use traced object center to extract boxed area without tracing")
else:
self.params['particlelabel'] = '_trace'
self.checkPixelSize()
self.existingParticleNumber=0
self.setStartingParticleNumber()
apDisplay.printMsg("Starting at particle number: "+str(self.particleNumber))
if self.params['partlimit'] is not None and self.particleNumber > self.params['partlimit']:
apDisplay.printError("Number of particles in existing stack already exceeds limit!")
self.logpeaks = 2
def setStartingParticleNumber(self):
self.particleNumber = self.existingParticleNumber
def convertTraceToParticlePeaks(self,imgdata):
apSizing.makeParticleFromContour(imgdata,self.selectiondata,'_trace')
#=====================
def reprocessImage(self, imgdata):
"""
Returns
True, if an image should be reprocessed
False, if an image was processed and should NOT be reprocessed
None, if image has not yet been processed
e.g. a confidence less than 80%
"""
# check to see if image is rejected by other criteria
if self.rejectImage(imgdata) is False:
return False
# check CTF parameters for image and skip if criteria is not met
if self.checkCtfParams(imgdata) is False:
return False
return None
#=======================
def processImage(self, imgdata):
imgname = imgdata['filename']
shortname = apDisplay.short(imgdata['filename'])
# set default to work with non-dd data
self.framelist = []
if self.is_dd:
if imgdata is None or imgdata['camera']['save frames'] != True:
apDisplay.printWarning('%s skipped for no-frame-saved\n ' % imgdata['filename'])
return
self.dd.setImageData(imgdata)
self.framelist = self.dd.getFrameList(self.params)
if not self.framelist:
apDisplay.printWarning('image rejected because no frame passes drift limit test')
return
if self.is_dd_stack:
# find the ddstackrun of the image
if not self.params['ddstack']:
self.dd.setDDStackRun()
else:
self.dd.setDDStackRun(self.params['ddstack'])
# compare image ddstackrun with the specified ddstackrun
if self.params['ddstack'] and self.params['ddstack'] != self.dd.getDDStackRun().dbid:
apDisplay.printWarning('ddstack image not from specified ddstack run')
apDisplay.printWarning('Skipping this image ....')
return None
# This function will reset self.dd.ddstackrun for actual processing
self.dd.setFrameStackPath(self.params['ddstack'])
### first remove any existing boxed files
shortfileroot = os.path.join(self.params['rundir'], shortname)
if not self.params['usedownmrc']:
# remove all previous temp files
rmfiles = glob.glob(shortfileroot+"*")
else:
# limit the files to be removed
rmfiles = glob.glob(shortfileroot+".*")
for rmfile in rmfiles:
apFile.removeFile(rmfile)
### convert contours to particles
if self.selectiondata and self.params['particlelabel'] == '_trace':
self.convertTraceToParticlePeaks(imgdata)
### get particles
partdatas,shiftdata = self.getParticlesInImage(imgdata)
### check if we have particles
if len(partdatas) == 0:
apDisplay.printColor(shortname+" has no remaining particles and has been rejected\n","cyan")
total_processed_particles = None
else:
### process partdatas
total_processed_particles = self.processParticles(imgdata,partdatas,shiftdata)
if total_processed_particles is None:
self.totalpart = len(partdatas)+self.totalpart
else:
self.totalpart = total_processed_particles
### check if particle limit is met
if self.params['partlimit'] is not None and self.totalpart > self.params['partlimit']:
apDisplay.printWarning("reached particle number limit of "+str(self.params['partlimit'])+" now stopping")
self.imgtree = []
self.notdone = False
def processParticles(self,imgdata,partdatas,shiftdata):
"""
this is the main component
it should return the total number of processed particles if available otherwise, it returns None
"""
raise NotImplementedError()
#=======================
def loopCleanUp(self,imgdata):
### last remove any existing boxed files, reset global params
shortname = apDisplay.short(imgdata['filename'])
shortfileroot = os.path.join(self.params['rundir'], shortname)
rmfiles = glob.glob(shortfileroot+"*")
if not self.params['keepall']:
for rmfile in rmfiles:
apFile.removeFile(rmfile)
############################################################################
# PaeticleExtract with Elimination of boxed particle cropped by the image
############################################################################
class ParticleBoxLoop(ParticleExtractLoop):
def setupParserOptions(self):
super(ParticleBoxLoop,self).setupParserOptions()
self.parser.add_option("--boxsize", dest="boxsize", type="int",
help="particle box size in pixel")
self.parser.add_option("--rotate", dest="rotate", default=False,
action="store_true", help="Apply rotation angles of ,for example, helix")
def checkConflicts(self):
super(ParticleBoxLoop,self).checkConflicts()
if self.params['boxsize'] is None:
apDisplay.printError("A boxsize has to be specified")
def preLoopFunctions(self):
super(ParticleBoxLoop,self).preLoopFunctions()
self.boxsize = int(self.params['boxsize'])
if self.params['rotate'] is True:
### with rotate we use a bigger boxsize
self.half_box = int(1.5*self.boxsize/2)
else:
self.half_box = int(math.floor(self.boxsize / 2.0))
def getParticlesInImage(self,imgdata):
partdatas,shiftdata = super(ParticleBoxLoop,self).getParticlesInImage(imgdata)
return self.removeBoxOutOfImage(imgdata,partdatas,shiftdata),shiftdata
def removeBoxOutOfImage(self,imgdata,partdatas,shiftdata):
imgdims = imgdata['camera']['dimension']
newpartdatas = []
for partdata in partdatas:
start_x,start_y = apBoxer.getBoxStartPosition(imgdata,self.half_box,partdata, shiftdata)
if apBoxer.checkBoxInImage(imgdims,start_x,start_y,self.boxsize):
newpartdatas.append(partdata)
return newpartdatas
class Test(ParticleExtractLoop):
def processParticles(self,imgdata,partdatas,shiftdata):
for partdata in partdatas:
print partdata['xcoord'],partdata['ycoord']
return None
def commitToDatabase(self,imgdata):
pass
if __name__ == '__main__':
makeStack = Test()
makeStack.run()
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
from models.pytorch_revgrad import RevGrad
from efficientnet_pytorch import EfficientNet as effnet
class EfficientNet(effnet):
def __init__(self, blocks_args=None, global_params=None):
super(EfficientNet, self).__init__(blocks_args=blocks_args, global_params=global_params)
def forward(self, x1, x2=None, train=False):
if train:
# main head (predictive)
out, endpoints = self.predictive_network(x1)
# additional head (adversarial)
out_s = self.adversarial_network(endpoints, x2)
return out, out_s
else:
# main head (predictive)
out, _ = self.predictive_network(x1)
return out
def build_adv_model(self):
dummy = torch.rand(1, 3, 96, 96)
endpoints = self.extract_endpoints(dummy)
n_filt = 0
for i in endpoints.keys():
if i == 'reduction_6':
n_filt += endpoints[i].shape[1]
self.adv_fc1 = nn.Linear(n_filt * 4, 300)
self.adv_fc2 = nn.Linear(300, 300)
# self.adv_fc3 = nn.Linear(300, 300)
self.adv_fc4 = nn.Linear(300, 1)
# gradient reversal layer
self.rever1_6 = RevGrad()
self.rever1_12 = RevGrad()
self.rever2_6 = RevGrad()
self.rever2_12 = RevGrad()
return True
def predictive_network(self, inputs):
# Convolution layers
endpoints = self.extract_endpoints(inputs)
# Pooling and final linear layer
x = self._avg_pooling(endpoints['reduction_6'])
if self._global_params.include_top:
x = x.flatten(start_dim=1)
x = self._dropout(x)
x = self._fc(x)
return x, endpoints
def adversarial_network(self, endpoints, x_s):
# Convolution layers
endpoints_s = self.extract_endpoints(x_s)
x6_s = self.rever2_6(endpoints_s['reduction_6']).mean(dim=2).mean(dim=2)
x12_s = self.rever2_12(endpoints_s['reduction_6']).std(dim=2).std(dim=2)
x6_p = self.rever2_6(endpoints['reduction_6']).mean(dim=2).mean(dim=2)
x12_p = self.rever2_12(endpoints['reduction_6']).std(dim=2).std(dim=2)
x = torch.cat([x6_s, x12_s, x6_p, x12_p,], dim=1,)
x = torch.relu(self.adv_fc1(x))
# x = torch.relu(self.adv_fc2(x))
# x = torch.relu(self.adv_fc3(x))
x = torch.sigmoid(self.adv_fc4(x))
return x
|
nilq/baby-python
|
python
|
from idm.objects import dp, MySignalEvent, SignalEvent, __version__
from idm.utils import ment_user
from datetime import datetime
import time
@dp.signal_event_register('инфо', 'инфа', 'info')
def sinfo(event: SignalEvent) -> str:
if event.msg['from_id'] not in event.db.trusted_users:
message_id = event.send(event.responses['not_in_trusted'])
time.sleep(3)
event.api.msg_op(3, msg_id=message_id)
return "ok"
owner = event.api('users.get', user_ids=event.db.duty_id)[0]
# TODO: сделать функцию для формата инфы (в трех местах юзается)
event.send(event.responses['info_duty'].format(
версия = __version__, владелец = ment_user(owner),
чаты = len(event.db.chats.keys()),
ид = event.chat.iris_id, имя = event.chat.name))
return "ok"
|
nilq/baby-python
|
python
|
"""
Title: Video Vision Transformer
Author: [Aritra Roy Gosthipaty](https://twitter.com/ariG23498), [Ayush Thakur](https://twitter.com/ayushthakur0) (equal contribution)
Date created: 2022/01/12
Last modified: 2022/01/12
Description: A Transformer-based architecture for video classification.
"""
"""
## Introduction
Videos are sequences of images. Let's assume you have an image
representation model (CNN, ViT, etc.) and a sequence model
(RNN, LSTM, etc.) at hand. We ask you to tweak the model for video
classification. The simplest approach would be to apply the image
model to individual frames, use the sequence model to learn
sequences of image features, then apply a classification head on
the learned sequence representation.
The Keras example
[Video Classification with a CNN-RNN Architecture](https://keras.io/examples/vision/video_classification/)
explains this approach in detail. Alernatively, you can also
build a hybrid Transformer-based model for video classification as shown in the Keras example
[Video Classification with Transformers](https://keras.io/examples/vision/video_transformers/).
In this example, we minimally implement
[ViViT: A Video Vision Transformer](https://arxiv.org/abs/2103.15691)
by Arnab et al., a **pure Transformer-based** model
for video classification. The authors propose a novel embedding scheme
and a number of Transformer variants to model video clips. We implement
the embedding scheme and one of the variants of the Transformer
architecture, for simplicity.
This example requires TensorFlow 2.6 or higher, and the `medmnist`
package, which can be installed by running the code cell below.
"""
"""shell
pip install -qq medmnist
"""
"""
## Imports
"""
import os
import io
import imageio
import medmnist
import ipywidgets
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# Setting seed for reproducibility
SEED = 42
os.environ["TF_CUDNN_DETERMINISTIC"] = "1"
keras.utils.set_random_seed(SEED)
"""
## Hyperparameters
The hyperparameters are chosen via hyperparameter
search. You can learn more about the process in the "conclusion" section.
"""
# DATA
DATASET_NAME = "organmnist3d"
BATCH_SIZE = 32
AUTO = tf.data.AUTOTUNE
INPUT_SHAPE = (28, 28, 28, 1)
NUM_CLASSES = 11
# OPTIMIZER
LEARNING_RATE = 1e-4
WEIGHT_DECAY = 1e-5
# TRAINING
EPOCHS = 60
# TUBELET EMBEDDING
PATCH_SIZE = (8, 8, 8)
NUM_PATCHES = (INPUT_SHAPE[0] // PATCH_SIZE[0]) ** 2
# ViViT ARCHITECTURE
LAYER_NORM_EPS = 1e-6
PROJECTION_DIM = 128
NUM_HEADS = 8
NUM_LAYERS = 8
"""
## Dataset
For our example we use the
[MedMNIST v2: A Large-Scale Lightweight Benchmark for 2D and 3D Biomedical Image Classification](https://medmnist.com/)
dataset. The videos are lightweight and easy to train on.
"""
def download_and_prepare_dataset(data_info: dict):
"""Utility function to download the dataset.
Arguments:
data_info (dict): Dataset metadata.
"""
data_path = keras.utils.get_file(origin=data_info["url"], md5_hash=data_info["MD5"])
with np.load(data_path) as data:
# Get videos
train_videos = data["train_images"]
valid_videos = data["val_images"]
test_videos = data["test_images"]
# Get labels
train_labels = data["train_labels"].flatten()
valid_labels = data["val_labels"].flatten()
test_labels = data["test_labels"].flatten()
return (
(train_videos, train_labels),
(valid_videos, valid_labels),
(test_videos, test_labels),
)
# Get the metadata of the dataset
info = medmnist.INFO[DATASET_NAME]
# Get the dataset
prepared_dataset = download_and_prepare_dataset(info)
(train_videos, train_labels) = prepared_dataset[0]
(valid_videos, valid_labels) = prepared_dataset[1]
(test_videos, test_labels) = prepared_dataset[2]
"""
### `tf.data` pipeline
"""
@tf.function
def preprocess(frames: tf.Tensor, label: tf.Tensor):
"""Preprocess the frames tensors and parse the labels."""
# Preprocess images
frames = tf.image.convert_image_dtype(
frames[
..., tf.newaxis
], # The new axis is to help for further processing with Conv3D layers
tf.float32,
)
# Parse label
label = tf.cast(label, tf.float32)
return frames, label
def prepare_dataloader(
videos: np.ndarray,
labels: np.ndarray,
loader_type: str = "train",
batch_size: int = BATCH_SIZE,
):
"""Utility function to prepare the dataloader."""
dataset = tf.data.Dataset.from_tensor_slices((videos, labels))
if loader_type == "train":
dataset = dataset.shuffle(BATCH_SIZE * 2)
dataloader = (
dataset.map(preprocess, num_parallel_calls=tf.data.AUTOTUNE)
.batch(batch_size)
.prefetch(tf.data.AUTOTUNE)
)
return dataloader
trainloader = prepare_dataloader(train_videos, train_labels, "train")
validloader = prepare_dataloader(valid_videos, valid_labels, "valid")
testloader = prepare_dataloader(test_videos, test_labels, "test")
"""
## Tubelet Embedding
In ViTs, an image is divided into patches, which are then spatially
flattened, a process known as tokenization. For a video, one can
repeat this process for individual frames. **Uniform frame sampling**
as suggested by the authors is a tokenization scheme in which we
sample frames from the video clip and perform simple ViT tokenization.
|  |
| :--: |
| Uniform Frame Sampling [Source](https://arxiv.org/abs/2103.15691) |
**Tubelet Embedding** is different in terms of capturing temporal
information from the video.
First, we extract volumes from the video -- these volumes contain
patches of the frame and the temporal information as well. The volumes
are then flattened to build video tokens.
|  |
| :--: |
| Tubelet Embedding [Source](https://arxiv.org/abs/2103.15691) |
"""
class TubeletEmbedding(layers.Layer):
def __init__(self, embed_dim, patch_size, **kwargs):
super().__init__(**kwargs)
self.projection = layers.Conv3D(
filters=embed_dim,
kernel_size=patch_size,
strides=patch_size,
padding="VALID",
)
self.flatten = layers.Reshape(target_shape=(-1, embed_dim))
def call(self, videos):
projected_patches = self.projection(videos)
flattened_patches = self.flatten(projected_patches)
return flattened_patches
"""
## Positional Embedding
This layer adds positional information to the encoded video tokens.
"""
class PositionalEncoder(layers.Layer):
def __init__(self, embed_dim, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
def build(self, input_shape):
_, num_tokens, _ = input_shape
self.position_embedding = layers.Embedding(
input_dim=num_tokens, output_dim=self.embed_dim
)
self.positions = tf.range(start=0, limit=num_tokens, delta=1)
def call(self, encoded_tokens):
# Encode the positions and add it to the encoded tokens
encoded_positions = self.position_embedding(self.positions)
encoded_tokens = encoded_tokens + encoded_positions
return encoded_tokens
"""
## Video Vision Transformer
The authors suggest 4 variants of Vision Transformer:
- Spatio-temporal attention
- Factorized encoder
- Factorized self-attention
- Factorized dot-product attention
In this example, we will implement the **Spatio-temporal attention**
model for simplicity. The following code snippet is heavily inspired from
[Image classification with Vision Transformer](https://keras.io/examples/vision/image_classification_with_vision_transformer/).
One can also refer to the
[official repository of ViViT](https://github.com/google-research/scenic/tree/main/scenic/projects/vivit)
which contains all the variants, implemented in JAX.
"""
def create_vivit_classifier(
tubelet_embedder,
positional_encoder,
input_shape=INPUT_SHAPE,
transformer_layers=NUM_LAYERS,
num_heads=NUM_HEADS,
embed_dim=PROJECTION_DIM,
layer_norm_eps=LAYER_NORM_EPS,
num_classes=NUM_CLASSES,
):
# Get the input layer
inputs = layers.Input(shape=input_shape)
# Create patches.
patches = tubelet_embedder(inputs)
# Encode patches.
encoded_patches = positional_encoder(patches)
# Create multiple layers of the Transformer block.
for _ in range(transformer_layers):
# Layer normalization and MHSA
x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim // num_heads, dropout=0.1
)(x1, x1)
# Skip connection
x2 = layers.Add()([attention_output, encoded_patches])
# Layer Normalization and MLP
x3 = layers.LayerNormalization(epsilon=1e-6)(x2)
x3 = keras.Sequential(
[
layers.Dense(units=embed_dim * 4, activation=tf.nn.gelu),
layers.Dense(units=embed_dim, activation=tf.nn.gelu),
]
)(x3)
# Skip connection
encoded_patches = layers.Add()([x3, x2])
# Layer normalization and Global average pooling.
representation = layers.LayerNormalization(epsilon=layer_norm_eps)(encoded_patches)
representation = layers.GlobalAvgPool1D()(representation)
# Classify outputs.
outputs = layers.Dense(units=num_classes, activation="softmax")(representation)
# Create the Keras model.
model = keras.Model(inputs=inputs, outputs=outputs)
return model
"""
## Train
"""
def run_experiment():
# Initialize model
model = create_vivit_classifier(
tubelet_embedder=TubeletEmbedding(
embed_dim=PROJECTION_DIM, patch_size=PATCH_SIZE
),
positional_encoder=PositionalEncoder(embed_dim=PROJECTION_DIM),
)
# Compile the model with the optimizer, loss function
# and the metrics.
optimizer = keras.optimizers.Adam(learning_rate=LEARNING_RATE)
model.compile(
optimizer=optimizer,
loss="sparse_categorical_crossentropy",
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="accuracy"),
keras.metrics.SparseTopKCategoricalAccuracy(5, name="top-5-accuracy"),
],
)
# Train the model.
_ = model.fit(trainloader, epochs=EPOCHS, validation_data=validloader)
_, accuracy, top_5_accuracy = model.evaluate(testloader)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%")
return model
model = run_experiment()
"""
## Inference
"""
NUM_SAMPLES_VIZ = 25
testsamples, labels = next(iter(testloader))
testsamples, labels = testsamples[:NUM_SAMPLES_VIZ], labels[:NUM_SAMPLES_VIZ]
ground_truths = []
preds = []
videos = []
for i, (testsample, label) in enumerate(zip(testsamples, labels)):
# Generate gif
with io.BytesIO() as gif:
imageio.mimsave(gif, (testsample.numpy() * 255).astype("uint8"), "GIF", fps=5)
videos.append(gif.getvalue())
# Get model prediction
output = model.predict(tf.expand_dims(testsample, axis=0))[0]
pred = np.argmax(output, axis=0)
ground_truths.append(label.numpy().astype("int"))
preds.append(pred)
def make_box_for_grid(image_widget, fit):
"""Make a VBox to hold caption/image for demonstrating option_fit values.
Source: https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20Styling.html
"""
# Make the caption
if fit is not None:
fit_str = "'{}'".format(fit)
else:
fit_str = str(fit)
h = ipywidgets.HTML(value="" + str(fit_str) + "")
# Make the green box with the image widget inside it
boxb = ipywidgets.widgets.Box()
boxb.children = [image_widget]
# Compose into a vertical box
vb = ipywidgets.widgets.VBox()
vb.layout.align_items = "center"
vb.children = [h, boxb]
return vb
boxes = []
for i in range(NUM_SAMPLES_VIZ):
ib = ipywidgets.widgets.Image(value=videos[i], width=100, height=100)
true_class = info["label"][str(ground_truths[i])]
pred_class = info["label"][str(preds[i])]
caption = f"T: {true_class} | P: {pred_class}"
boxes.append(make_box_for_grid(ib, caption))
ipywidgets.widgets.GridBox(
boxes, layout=ipywidgets.widgets.Layout(grid_template_columns="repeat(5, 200px)")
)
"""
## Final thoughts
With a vanilla implementation, we achieve ~79-80% Top-1 accuracy on the
test dataset.
The hyperparameters used in this tutorial were finalized by running a
hyperparameter search using
[W&B Sweeps](https://docs.wandb.ai/guides/sweeps).
You can find out our sweeps result
[here](https://wandb.ai/minimal-implementations/vivit/sweeps/66fp0lhz)
and our quick analysis of the results
[here](https://wandb.ai/minimal-implementations/vivit/reports/Hyperparameter-Tuning-Analysis--VmlldzoxNDEwNzcx).
For further improvement, you could look into the following:
- Using data augmentation for videos.
- Using a better regularization scheme for training.
- Apply different variants of the transformer model as in the paper.
We would like to thank [Anurag Arnab](https://anuragarnab.github.io/)
(first author of ViViT) for helpful discussion. We are grateful to
[Weights and Biases](https://wandb.ai/site) program for helping with
GPU credits.
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/video-vision-transformer)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/video-vision-transformer-CT).
"""
|
nilq/baby-python
|
python
|
import logging.config
import unittest
#import testing.mysqld
import sqlalchemy
from src.utils.Settings import Settings
# Modules to Test
from src.utils.Connector import Connector
class TestConnector(unittest.TestCase):
@classmethod
def tag(cls, *tags):
'''
Decorator to add tags to a test class or method.
'''
def decorator(obj):
setattr(obj, 'tags', set(tags))
return obj
return decorator
@classmethod
def setUpClass(cls):
'''
Set up for class to instantiate constructor of class under test
'''
super(TestConnector, cls).setUpClass()
logging.disable(logging.CRITICAL)
# Read in config.ini for database info
# Uses local config to not expose sensitive info
settings = Settings(config_file='../config.ini')
config = settings.db_config_read(section='sqldb')
cls.username = 'test'
cls.password = 'password'
# Initialize Connector constructor
cls.connector = Connector(TestConnector.username, TestConnector.password, config)
def test_init(self):
'''
Test Connector __init_
'''
self.assertTrue(TestConnector.connector.settings_model.username is TestConnector.username,
'Mock value should match username')
def test_create_engine(self):
'''
Test create_engine
'''
conn = TestConnector.connector.create_engine(TestConnector.connector.settings_model)
mock_conn = sqlalchemy.create_engine('mysql+pymysql://')
self.assertIsInstance(conn, type(mock_conn), 'Class type should match mock class type')
def test_login(self):
'''
Test login
'''
mock_conn = sqlalchemy.create_engine('mysql+pymysql://')
login = TestConnector.connector.login(mock_conn)
self.assertIsNotNone(login, 'Pass if value is returned from server')
@classmethod
def tearDownClass(cls):
'''
Tear up for class to destroy items created during class under test
'''
super(TestConnector, cls).tearDownClass()
logging.disable(logging.NOTSET)
cls.settings = ''
cls.username = ''
cls.password = ''
del cls.connector
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
from rsTools.ui.menus import menu
from ... import shelf, shelfButton
from rsTools.glob import *
import os
import rsTools.utils.osUtils.enviroments as env
import maya.cmds as cmds
from rsTools.core.skeleton import skeletonAsset
class skeletonShelfButton(shelfButton.ShelfButton):
_color = (255, 157, 0)
_title = None
_icon = os.path.join(g_rs_path_image_shelf, "bones.svg")
_annotation = "anim"
_enabled = True
# main overloading proc
def created(self):
_menuItems = {"tearOff": False,
"enabled": True,
"items_provider": None,
"async": False,
"cache": 0,
"items": [{"label": "QuickUI", "enabled": False},
{"label": "joint Tool Box", "command": lambda: self._buildJointToolBox(
), "toolTip": "NICE"},
{"label": "joint Pose UI", "command": lambda: self._buildJointPoseUI(
), "toolTip": "NICE"},
"separator",
{"label": "rigSkeleton Build", "enabled": False},
{"label": "rigSkeleton", "actionType": "skeletonAsset", "id": "skeletonAction",
"command": lambda: self._createRigSkeleton(), "toolTip": "Create rigSkeletonAsset"},
]
}
self._menu = menu.ContextMenu(_menuItems)
self._menu.attachTo(self._widget, button="right")
def _buildJointToolBox(self):
import rsTools.ui.interfaces.joints.jointsUI as jointUI
jointUI.run()
def _buildJointPoseUI(self):
import rsTools.ui.interfaces.jointPose.jointPoseUI as jointUI
jointUI.run()
def _createRigSkeleton(self):
ls = cmds.ls(sl=True)
if ls:
obb = self._menu.getActionByID("skeletonAction")
assetName = obb.getText()
lod = obb.getComboText()
project = env.get_project_show()
topNodeName = project+"_rigSkeleton_"+assetName+lod+"_GRP"
rigSkeletonAsset = skeletonAsset.SkeletonAsset(topNodeName, ls[0])
obb.setPlaceholderText("bearA")
def mousePressed(self):
import rsTools.ui.interfaces.joints.jointsUI as ui
ui.run()
|
nilq/baby-python
|
python
|
# must be first, as it does event loop patching and other "first" things
from common.page_tokens import PageTokenManager
from oozer.entities.collect_entities_iterators import (
iter_collect_entities_per_page,
iter_collect_entities_per_page_graph,
)
from tests.base.testcase import TestCase, mock
from common.enums.entity import Entity
from common.enums.reporttype import ReportType
from common.id_tools import generate_universal_id
from oozer.common.cold_storage.batch_store import ChunkDumpStore
from oozer.common.job_scope import JobScope
from oozer.common.enum import FB_PAGE_MODEL, FB_PAGE_POST_MODEL, FB_AD_VIDEO_MODEL
from tests.base import random
class TestCollectEntitiesPerPage(TestCase):
def setUp(self):
super().setUp()
self.sweep_id = random.gen_string_id()
self.scope_id = random.gen_string_id()
self.ad_account_id = random.gen_string_id()
def test_correct_vendor_data_inserted_into_cold_store_payload_posts(self):
entity_types = [Entity.PagePost, Entity.PageVideo]
fb_model_map = {Entity.PagePost: FB_PAGE_POST_MODEL, Entity.PageVideo: FB_AD_VIDEO_MODEL}
get_all_method_map = {Entity.PagePost: 'get_posts', Entity.PageVideo: 'get_videos'}
for entity_type in entity_types:
with self.subTest(f'Entity type = "{entity_type}"'):
fbid = random.gen_string_id()
FB_MODEL = fb_model_map[entity_type]
get_method_name = get_all_method_map[entity_type]
job_scope = JobScope(
sweep_id=self.sweep_id,
ad_account_id=self.ad_account_id,
report_type=ReportType.entity,
report_variant=entity_type,
tokens=['blah'],
)
universal_id_should_be = generate_universal_id(
ad_account_id=self.ad_account_id,
report_type=ReportType.entity,
entity_id=fbid,
entity_type=entity_type,
)
fb_data = FB_MODEL(fbid=fbid)
fb_data['account_id'] = '0'
entities_data = [fb_data]
with mock.patch.object(FB_PAGE_MODEL, get_method_name, return_value=entities_data), mock.patch.object(
ChunkDumpStore, 'store'
) as store:
list(iter_collect_entities_per_page(job_scope))
assert store.called
store_args, store_keyword_args = store.call_args
assert not store_keyword_args
assert len(store_args) == 1, 'Store method should be called with just 1 parameter'
data_actual = store_args[0]
vendor_data_key = '__oprm'
assert (
vendor_data_key in data_actual and type(data_actual[vendor_data_key]) == dict
), 'Special vendor key is present in the returned data'
assert data_actual[vendor_data_key] == {
'id': universal_id_should_be
}, 'Vendor data is set with the right universal id'
class TestCollectEntitiesPerPageGraph(TestCase):
def setUp(self):
super().setUp()
self.sweep_id = random.gen_string_id()
self.scope_id = random.gen_string_id()
self.ad_account_id = random.gen_string_id()
def test_correct_vendor_data_inserted_into_cold_store_payload_posts(self):
entity_types = [Entity.PagePostPromotable]
fb_model_map = {Entity.PagePostPromotable: FB_PAGE_POST_MODEL}
for entity_type in entity_types:
with self.subTest(f'Entity type - "{entity_type}"'):
fbid = random.gen_string_id()
FB_MODEL = fb_model_map[entity_type]
job_scope = JobScope(
sweep_id=self.sweep_id,
ad_account_id=self.ad_account_id,
report_type=ReportType.entity,
report_variant=entity_type,
tokens=['user-token'],
)
universal_id_should_be = generate_universal_id(
ad_account_id=self.ad_account_id,
report_type=ReportType.entity,
entity_id=fbid,
entity_type=entity_type,
)
fb_data = FB_MODEL(fbid=fbid)
fb_data['account_id'] = '0'
entities_data = [fb_data]
with mock.patch.object(
PageTokenManager, 'get_best_token', return_value=None
) as get_best_token, mock.patch.object(
FB_PAGE_MODEL, 'get_feed', return_value=entities_data
), mock.patch.object(
FB_PAGE_MODEL, 'get_ads_posts', return_value=entities_data
), mock.patch.object(
ChunkDumpStore, 'store'
) as store, mock.patch.object(
FB_PAGE_POST_MODEL, 'get', side_effect=lambda field: field == 'is_eligible_for_promotion'
):
list(iter_collect_entities_per_page_graph(job_scope))
assert get_best_token.called
assert store.called
store_args, store_keyword_args = store.call_args
assert not store_keyword_args
assert len(store_args) == 1, 'Store method should be called with just 1 parameter'
data_actual = store_args[0]
vendor_data_key = '__oprm'
assert (
vendor_data_key in data_actual and type(data_actual[vendor_data_key]) == dict
), 'Special vendor key is present in the returned data'
assert data_actual[vendor_data_key] == {
'id': universal_id_should_be
}, 'Vendor data is set with the right universal id'
|
nilq/baby-python
|
python
|
"""
Model
"""
from . import attention
from . import evaluator
from . import GAN_model
from . import GAN_trainer
from . import generator
from . import trainer
|
nilq/baby-python
|
python
|
import json
from typing import Dict, Optional
from azure.durable_functions.models.FunctionContext import FunctionContext
class DurableOrchestrationBindings:
"""Binding information.
Provides information relevant to the creation and management of
durable functions.
"""
# parameter names are as defined by JSON schema and do not conform to PEP8 naming conventions
def __init__(self, taskHubName: str, creationUrls: Dict[str, str],
managementUrls: Dict[str, str], rpcBaseUrl: Optional[str] = None, **kwargs):
self._task_hub_name: str = taskHubName
self._creation_urls: Dict[str, str] = creationUrls
self._management_urls: Dict[str, str] = managementUrls
# TODO: we can remove the Optional once we drop support for 1.x,
# this is always provided in 2.x
self._rpc_base_url: Optional[str] = rpcBaseUrl
self._client_data = FunctionContext(**kwargs)
@property
def task_hub_name(self) -> str:
"""Get the name of the container that is used for orchestrations."""
return self._task_hub_name
@property
def creation_urls(self) -> Dict[str, str]:
"""Get the URLs that are used for creating new orchestrations."""
return self._creation_urls
@property
def management_urls(self) -> Dict[str, str]:
"""Get the URLs that are used for managing orchestrations."""
return self._management_urls
@property
def rpc_base_url(self) -> Optional[str]:
"""Get the base url communication between out of proc workers and the function host."""
return self._rpc_base_url
@property
def client_data(self) -> FunctionContext:
"""Get any additional client data provided within the context of the client."""
return self._client_data
@classmethod
def from_json(cls, json_string):
"""Convert the value passed into a new instance of the class.
Parameters
----------
json_string
Context passed a JSON serializable value to be converted into an
instance of the class
Returns
-------
DurableOrchestrationBindings
New instance of the durable orchestration binding class
"""
json_dict = json.loads(json_string)
return cls(**json_dict)
|
nilq/baby-python
|
python
|
# Copyright 2014-2020 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This file is part of the GDB testsuite. It test the xmethods support
# in the Python extension language.
import gdb
import re
from gdb.xmethod import XMethod
from gdb.xmethod import XMethodMatcher, XMethodWorker
from gdb.xmethod import SimpleXMethodMatcher
def A_plus_A(obj, opr):
print('From Python <A_plus_A>:')
return obj['a'] + opr['a']
def plus_plus_A(obj):
print('From Python <plus_plus_A>:')
return obj['a'] + 1
def A_geta(obj):
print('From Python <A_geta>:')
return obj['a']
def A_getarrayind(obj, index):
print('From Python <A_getarrayind>:')
return obj['array'][index]
def A_indexoper(obj, index):
return obj['array'][index].reference_value()
def B_indexoper(obj, index):
return obj['array'][index].const_value().reference_value()
type_A = gdb.parse_and_eval('(dop::A *) 0').type.target()
type_B = gdb.parse_and_eval('(dop::B *) 0').type.target()
type_int = gdb.parse_and_eval('(int *) 0').type.target()
# The E class matcher and worker test two things:
# 1. xmethod returning None.
# 2. Matcher returning a list of workers.
class E_method_char_worker(XMethodWorker):
def __init__(self):
pass
def get_arg_types(self):
return gdb.lookup_type('char')
def get_result_type(self, obj, arg):
return gdb.lookup_type('void')
def __call__(self, obj, arg):
print('From Python <E_method_char>')
return None
class E_method_int_worker(XMethodWorker):
def __init__(self):
pass
def get_arg_types(self):
return gdb.lookup_type('int')
# Note: get_result_type method elided on purpose
def __call__(self, obj, arg):
print('From Python <E_method_int>')
return None
class E_method_matcher(XMethodMatcher):
def __init__(self):
XMethodMatcher.__init__(self, 'E_methods')
self.methods = [XMethod('method_int'), XMethod('method_char')]
def match(self, class_type, method_name):
class_tag = class_type.unqualified().tag
if not re.match('^dop::E$', class_tag):
return None
if not re.match('^method$', method_name):
return None
workers = []
if self.methods[0].enabled:
workers.append(E_method_int_worker())
if self.methods[1].enabled:
workers.append(E_method_char_worker())
return workers
# The G class method matcher and worker illustrate how to write
# xmethod matchers and workers for template classes and template
# methods.
class G_size_diff_worker(XMethodWorker):
def __init__(self, class_template_type, method_template_type):
self._class_template_type = class_template_type
self._method_template_type = method_template_type
def get_arg_types(self):
pass
def __call__(self, obj):
print('From Python G<>::size_diff()')
return (self._method_template_type.sizeof -
self._class_template_type.sizeof)
class G_size_mul_worker(XMethodWorker):
def __init__(self, class_template_type, method_template_val):
self._class_template_type = class_template_type
self._method_template_val = method_template_val
def get_arg_types(self):
pass
def __call__(self, obj):
print('From Python G<>::size_mul()')
return self._class_template_type.sizeof * self._method_template_val
class G_mul_worker(XMethodWorker):
def __init__(self, class_template_type, method_template_type):
self._class_template_type = class_template_type
self._method_template_type = method_template_type
def get_arg_types(self):
return self._method_template_type
def __call__(self, obj, arg):
print('From Python G<>::mul()')
return obj['t'] * arg
class G_methods_matcher(XMethodMatcher):
def __init__(self):
XMethodMatcher.__init__(self, 'G_methods')
self.methods = [XMethod('size_diff'),
XMethod('size_mul'),
XMethod('mul')]
def _is_enabled(self, name):
for method in self.methods:
if method.name == name and method.enabled:
return True
def match(self, class_type, method_name):
class_tag = class_type.unqualified().tag
if not re.match('^dop::G<[ ]*[_a-zA-Z][ _a-zA-Z0-9]*>$',
class_tag):
return None
t_name = class_tag[7:-1]
try:
t_type = gdb.lookup_type(t_name)
except gdb.error:
return None
if re.match('^size_diff<[ ]*[_a-zA-Z][ _a-zA-Z0-9]*>$', method_name):
if not self._is_enabled('size_diff'):
return None
t1_name = method_name[10:-1]
try:
t1_type = gdb.lookup_type(t1_name)
return G_size_diff_worker(t_type, t1_type)
except gdb.error:
return None
if re.match('^size_mul<[ ]*[0-9]+[ ]*>$', method_name):
if not self._is_enabled('size_mul'):
return None
m_val = int(method_name[9:-1])
return G_size_mul_worker(t_type, m_val)
if re.match('^mul<[ ]*[_a-zA-Z][ _a-zA-Z0-9]*>$', method_name):
if not self._is_enabled('mul'):
return None
t1_name = method_name[4:-1]
try:
t1_type = gdb.lookup_type(t1_name)
return G_mul_worker(t_type, t1_type)
except gdb.error:
return None
global_dm_list = [
SimpleXMethodMatcher(r'A_plus_A',
r'^dop::A$',
r'operator\+',
A_plus_A,
# This is a replacement, hence match the arg type
# exactly!
type_A.const().reference()),
SimpleXMethodMatcher(r'plus_plus_A',
r'^dop::A$',
r'operator\+\+',
plus_plus_A),
SimpleXMethodMatcher(r'A_geta',
r'^dop::A$',
r'^geta$',
A_geta),
SimpleXMethodMatcher(r'A_getarrayind',
r'^dop::A$',
r'^getarrayind$',
A_getarrayind,
type_int),
SimpleXMethodMatcher(r'A_indexoper',
r'^dop::A$',
r'operator\[\]',
A_indexoper,
type_int),
SimpleXMethodMatcher(r'B_indexoper',
r'^dop::B$',
r'operator\[\]',
B_indexoper,
type_int)
]
for matcher in global_dm_list:
gdb.xmethod.register_xmethod_matcher(gdb, matcher)
gdb.xmethod.register_xmethod_matcher(gdb.current_progspace(),
G_methods_matcher())
gdb.xmethod.register_xmethod_matcher(gdb.current_progspace(),
E_method_matcher())
|
nilq/baby-python
|
python
|
"""
Unit Testing
"""
def unittest(tests=None):
"""
A Unit Test wrapper that will return the return code and an error message ( if any )
- Return
Syntax:
{
0 : [return_code_1, error_message_1],
1 : [return-code_2, error_message_2],
}
Type: Dictionary
:: Syntax
tests
Description: The function and arguments you want to test mapped to the result
Syntax:
{
0 : {
"function" : func_name,
"args" : [args],
"result" : result
}
}
function : Your function variable
args : List of arguments to put into function (if any)
result : Your expected output of type variable
Type: Dictionary
"""
ret_code = 0 # 0 : No Error, 1 : Error
err_msg = "" # Error Message
ret = {}
# Process
for k,v in tests.items():
test_id = k
curr_test_val = v
test_func = curr_test_val["function"]
test_args = curr_test_val["args"]
test_expected_res = curr_test_val["result"]
try:
if not(test_args == []):
print(f"Test function: {test_func}")
print(f"Test arguments: {test_args}")
print(f"Test results: {test_expected_res}")
assert test_func(test_args) == test_expected_res
else:
print(f"Test function: {test_func}")
print(f"Test results: {test_expected_res}")
assert test_func() == test_expected_res, "Error"
ret[test_id] = [ret_code, ""]
except Exception as ex:
ret_code = 1
err_msg = str(ex)
print(f"Exception: {ex}")
ret[test_id] = [ret_code, err_msg]
# Reset
ret_code = 0
# Return
print(f"Result: {ret}")
return ret
def design_test(functions=None, args=None, results=None):
"""
Design Testing syntax (JSON Format) by
1. Taking all functions, arguments and results provided
2. Parsing into dictionary
- Please put
- 'None' (for non-list/dictionary variables),
- '[]' for list variables and
- '{}' for dictionary variables for all empty arguments
:: Params
functions
Description: This contains all functions
Type: List
args
Description: This contains all arguments to pass into the function
Type: List
results
Description: This contains the intended output string / result to pass into the function
Type: List
"""
res_dict = {}
number_of_functions = len(functions)
number_of_arguments = len(args)
number_of_results = len(results)
# If equals
for i in range(number_of_functions):
# Append values to dictionary
curr_func = functions[i]
curr_args = args[i]
curr_exp_res = results[i]
# Generate new variable
res_dict[i] = {
"function" : None,
"args" : [],
"result" : None
}
if not (curr_func == None):
res_dict[i]["function"] = curr_func
else:
res_dict[i]["function"] = None
if not (curr_args == None):
res_dict[i]["args"] = args[i]
else:
res_dict[i]["args"] = []
if not (curr_exp_res == None):
res_dict[i]["result"] = results[i]
else:
res_dict[i]["result"] = None
return res_dict
def run_test(tests):
"""
Run Submitted test designs
:: Params
tests
Description: This is the test in json format
Syntax:
tests = {
0 : {
"function" : test_1, # Type Function
"args" : [], # Type List
"result" : "Hello world" # Type String
},
n : {
"function" : <func_var>, # Type Function
"args" : [arg1, arg2,... argn], # Type List
"result" : "<output_result>" # Type String
},
}
Type: Dictionary
"""
number_of_tests = len(tests)
# Errors
number_of_errors = 0
errors = []
# --- Processing
# Program Testing
ret = unittest(tests)
# Error Scanning
for i in range(number_of_tests):
curr_test_res = ret[i]
err_msg = curr_test_res[1]
if not (err_msg == ""):
# Populate Error List
number_of_errors+=1
errors.append(err_msg)
# break
if number_of_errors == 0:
print("No Errors Found!")
else:
for err_id in range(number_of_errors):
print("Error Found: {}".format(errors[err_id]))
def main():
print("Begin Test")
# --- Internal Functions
def test_1():
print("Hello world")
def test_2(msg):
print("Hello!".format(msg))
# --- Input
# Local Variables
testspecs = {
"functions" : [test_1, test_2],
"args" : [None, "World"],
"results" : ["Hello world", "Hello! World"]
}
tests = design_test(**testspecs)
run_test(tests)
# assert test_2("World") == "Hello! World"
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
n = int(input())
m = n
line = input().split()
t = 0
for i in range(0, n):
if int(line[i]) >= 0:
t += int(line[i])
else:
m -= 1
a = t / m
print(a)
|
nilq/baby-python
|
python
|
import pandas as pd
from pathlib import Path
import sys
import re
from collections import OrderedDict
import pyqtgraph as pg
from pyqtgraph.parametertree import Parameter, ParameterTree
from ephys.ephysanalysis import acq4read
from ephys.mapanalysistools import analyzeMapData as AMD
import pylibrary.tools.fileselector as FS
class ReadDataTable():
def __init__(self):
"""
"""
self.tree = None
self.ptree = ParameterTree()
# self.ptreedata = Parameter.create(name='dataset', type='group', children=self.setParams(0))
# self.ptree.setParameters(self.ptreedata) # add the table with granularity of "cells"
self.prottree = ParameterTree()
# self.setProtocols() # add the protocols
# self.ptree_layout.addWidget(self.analysis_ptree)
# self.ptree_layout.addWidget(self.ptree) # Parameter Tree on left
# self.ptree_layout.addWidget(self.prottree) # protocol tree just below
#
def get_filename(self, test=False):
if test == True:
return self.filename
fn = Path(self.current_DSC.strip(), self.current_protocol)
print( "filename: ", fn)
return fn
def readtable(self, fn=None, datainfo='protocols', listprotocols=False):
if datainfo == 'protocols':
dataname = 'data_complete'
if datainfo == 'images':
dataname = 'data_images'
df = pd.read_pickle(open(fn, 'rb'))
allprotocols = []
self.tree = OrderedDict()
alldays = sorted(set(df.index.values.tolist()))
for day in alldays:
subdf = df.loc[day]
print(subdf)
dayn = subdf.index.values.tolist()
slices = subdf['slice_slice']
cells = subdf['cell_cell']
protocols = subdf[dataname]
date = subdf['date']
dsc = str(Path(date, slices, cells))
self.tree[dsc] = []
prs = protocols.split(',')
for i in range(len(prs)):
if listprotocols:
print ("\033[0;33;40m "+ str(Path(dayn, slices, cells))+ '%s'%prs[i] + '\033[0;37;40m')
self.tree[dsc].append(prs[i])
print(f'Read {len(alldays):d} records with {len(self.tree):d} {dataname:s}')
allprotocols = sorted(set(allprotocols))
return list(alldays), self.tree, df
class BuildGui():
def __init__(self, tree):
"""
Test fixture
"""
self.basename = '/Users/pbmanis/Documents/data/MRK_Pyramidal'
self.filename = None
self.tree = tree
print('tree: ', tree)
self.mainwin = pg.Qt.QtGui.QMainWindow()
self.win = pg.Qt.QtGui.QWidget()
self.main_layout = pg.Qt.QtGui.QGridLayout() # top level layout for the window
self.win.setLayout(self.main_layout)
self.mainwin.setCentralWidget(self.win)
self.mainwin.show()
self.mainwin.setWindowTitle('Data Selection')
self.mainwin.setGeometry( 100 , 100 , 1400 , 900)
# build buttons at top of controls
self.current_DSC = list(self.tree.keys())[0]
self.btn_read = pg.Qt.QtGui.QPushButton("Read")
self.btn_find = pg.Qt.QtGui.QPushButton('Find and Read')
# use a nested grid layout for the buttons
button_layout = pg.Qt.QtGui.QGridLayout()
button_layout.addWidget(self.btn_read, 1, 0, 1, 1)
# button_layout.addWidget(self.btn_analyze, 0, 1, 1, 1)
button_layout.addWidget(self.btn_find, 0, 0, 1, 1)
# build parametertree in left column
#
ptreewidth = 320
self.main_layout.setColumnMinimumWidth(0, ptreewidth)
# analysis # empty in test rig
params = [
{'name': 'Analysis', 'type': 'group', 'children': [],
}]
self.analysis_ptree = ParameterTree()
self.analysis_ptreedata = Parameter.create(name='params', type='group', children=params)
self.analysis_ptree.setParameters(self.analysis_ptreedata)
self.ptree = ParameterTree()
self.ptreedata = Parameter.create(name='dataset', type='group', children=self.setParams(0))
self.ptree.setParameters(self.ptreedata) # add the table with granularity of "cells"
self.prottree = ParameterTree()
self.setProtocols() # add the protocols
# use a grid layout to hold the trees
self.ptree_widget = pg.Qt.QtGui.QWidget()
self.ptree_layout = pg.Qt.QtGui.QGridLayout()
self.ptree_widget.setLayout(self.ptree_layout)
self.ptree_layout.setSpacing(2)
# ptree in row 1 col 0, 4 rows, 2 cols
self.ptree_layout.addWidget(self.analysis_ptree)
self.ptree_layout.addWidget(self.ptree) # Parameter Tree on left
self.ptree_layout.addWidget(self.prottree) # protocol tree just below
# self.ptree_layout.setColumnStretch(0, 5)
self.ptree_layout.setRowStretch(0, 5)
self.ptree_layout.setRowStretch(1, 1)
self.ptree_layout.setRowStretch(2, 1)
# build plot window
self.plots_widget = pg.Qt.QtGui.QWidget()
self.plots_layout = pg.Qt.QtGui.QGridLayout()
self.plots_widget.setLayout(self.plots_layout)
self.plots_layout.setContentsMargins(4, 4, 4, 4)
self.plots_layout.setSpacing(2)
self.plots = {}
for panel in zip(['Wave', 'Average', 'PSTH'], [0, 14, 18], [1, 5, 5],):
self.plots[panel[0]] = pg.PlotWidget()
self.plots_layout.addWidget(self.plots[panel[0]],
panel[1], 0, panel[2], 1)
self.plots[panel[0]].getAxis('left').setLabel('V', color="#ff0000")
self.plots[panel[0]].setTitle(panel[0], color="#ff0000")
self.plots[panel[0]].getAxis('bottom').setLabel('t (sec)', color="#ff0000")
self.main_layout.addWidget(self.plots_widget, 0, 2, 22, 1)
self.main_layout.addLayout(button_layout, 0, 0, 1, 2)
self.main_layout.addWidget(self.ptree_widget, 1, 0, -1, 2)
self.retrieveAllParameters()
# connect buttons and ptrees to actions
self.ptreedata.sigTreeStateChanged.connect(self.update_DSC)
self.prottreedata.sigTreeStateChanged.connect(self.get_current)
self.btn_read.clicked.connect(self.read_run)
# self.btn_analyze.clicked.connect(self.analyze)
self.btn_find.clicked.connect(self.find_run)
# print( self.MParams)
def retrieveAllParameters(self):
pass
def read_run(self):
pass
def find_run(self):
pass
def setParams(self, isel):
self.params = [
{'name': 'Day', 'type': 'group', 'children':
[{'name': 'Slices/Cells', 'type': 'list', 'values': list(self.tree.keys()), 'value': list(self.tree.keys())[isel]}]
}
]
return self.params
def setProtocols(self):
"""
Update the prototocls to correspond to the current parameters, top protocol selected
"""
if self.tree == None:
raise ValueError('setProtocols: Must set up read data before setting up protocols')
self.protocols = [
{'name': 'Protos', 'type': 'group', 'children':
[{'name': 'Protocols', 'type': 'list',
'values': self.tree[self.current_DSC][:], 'value': self.tree[self.current_DSC][0]}]
}
]
self.prottreedata = Parameter.create(name='protocol', type='group', children=self.protocols)
self.prottree.setParameters(self.prottreedata)
self.current_protocol = self.tree[self.current_DSC][0]
self.prottreedata.sigTreeStateChanged.connect(self.get_current)
return self.protocols
def get_current(self, param, changes):
for param, change, data in changes:
# path = self.prottreedata.childPath(param)
# if path is not None:
# childName = '.'.join(path)
# else:
# childName = param.name()
self.current_protocol = data
def update_DSC(self, param, changes):
for param, change, data in changes:
# path = self.ptreedata.childPath(param)
# if path is not None:
# childName = '.'.join(path)
# else:
# childName = param.name()
self.current_DSC = data
self.setProtocols()
def test():
app = pg.mkQApp()
RDT = ReadDataTable()
alldays, tree, df = RDT.readtable(fn='/Users/pbmanis/Desktop/Python/mrk-nf107/NF107Ai32_Het/NF107Ai32_Het.pkl',
datainfo='images')
G = BuildGui(tree)
if (sys.flags.interactive != 1): # or not hasattr(pg.Qt.QtCore, 'PYQT_VERSION'):
pg.Qt.QtGui.QApplication.instance().exec_()
if __name__ == '__main__':
test()
|
nilq/baby-python
|
python
|
first="Akash"
last="Singh"
fname=first+" "+last
print(fname)
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.5 on 2021-01-13 18:09
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="RequestLog",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("session_key", models.CharField(blank=True, max_length=40)),
("http_method", models.CharField(max_length=10)),
("http_user_agent", models.TextField(default="")),
("http_referer", models.TextField(default="")),
("request_path", models.URLField(default="")),
("query_string", models.TextField(blank=True, default="")),
("remote_addr", models.CharField(default="", max_length=100)),
("timestamp", models.DateTimeField(default=django.utils.timezone.now)),
(
"category",
models.CharField(
help_text="Used to filter / group logs.", max_length=100
),
),
(
"label",
models.CharField(
help_text="Used to identify individual logs.", max_length=100
),
),
(
"user",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"abstract": False,
},
),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
class dictionary:
def __init__(self):
self.paths = ['/phpMyAdmin/',
'/phpmyadmin/',
'/PMA/',
'/admin/',
'/dbadmin/',
'/mysql/',
'/myadmin/',
'/phpmyadmin2/',
'/phpMyAdmin2/',
'/phpMyAdmin-2/',
'/php-my-admin/',
'/phpMyAdmin-2.2.3/',
'/phpMyAdmin-2.2.6/',
'/phpMyAdmin-2.5.1/',
'/phpMyAdmin-2.5.4/',
'/phpMyAdmin-2.5.5-rc1/',
'/phpMyAdmin-2.5.5-rc2/',
'/phpMyAdmin-2.5.5/',
'/phpMyAdmin-2.5.5-pl1/',
'/phpMyAdmin-2.5.6-rc1/',
'/phpMyAdmin-2.5.6-rc2/',
'/phpMyAdmin-2.5.6/',
'/phpMyAdmin-2.5.7/',
'/phpMyAdmin-2.5.7-pl1/',
'/phpMyAdmin-2.6.0-alpha/',
'/phpMyAdmin-2.6.0-alpha2/',
'/phpMyAdmin-2.6.0-beta1/',
'/phpMyAdmin-2.6.0-beta2/',
'/phpMyAdmin-2.6.0-rc1/',
'/phpMyAdmin-2.6.0-rc2/',
'/phpMyAdmin-2.6.0-rc3/',
'/phpMyAdmin-2.6.0/',
'/phpMyAdmin-2.6.0-pl1/',
'/phpMyAdmin-2.6.0-pl2/',
'/phpMyAdmin-2.6.0-pl3/',
'/phpMyAdmin-2.6.1-rc1/',
'/phpMyAdmin-2.6.1-rc2/',
'/phpMyAdmin-2.6.1/',
'/phpMyAdmin-2.6.1-pl1/',
'/phpMyAdmin-2.6.1-pl2/',
'/phpMyAdmin-2.6.1-pl3/',
'/phpMyAdmin-2.6.2-rc1/',
'/phpMyAdmin-2.6.2-beta1/',
'/phpMyAdmin-2.6.2-rc1/',
'/phpMyAdmin-2.6.2/',
'/phpMyAdmin-2.6.2-pl1/',
'/phpMyAdmin-2.6.3/',
'/phpMyAdmin-2.6.3-rc1/',
'/phpMyAdmin-2.6.3/',
'/phpMyAdmin-2.6.3-pl1/',
'/phpMyAdmin-2.6.4-rc1/',
'/phpMyAdmin-2.6.4-pl1/',
'/phpMyAdmin-2.6.4-pl2/',
'/phpMyAdmin-2.6.4-pl3/',
'/phpMyAdmin-2.6.4-pl4/',
'/phpMyAdmin-2.6.4/',
'/phpMyAdmin-2.7.0-beta1/',
'/phpMyAdmin-2.7.0-rc1/',
'/phpMyAdmin-2.7.0-pl1/',
'/phpMyAdmin-2.7.0-pl2/',
'/phpMyAdmin-2.7.0/',
'/phpMyAdmin-2.8.0-beta1/',
'/phpMyAdmin-2.8.0-rc1/',
'/phpMyAdmin-2.8.0-rc2/',
'/phpMyAdmin-2.8.0/',
'/phpMyAdmin-2.8.0.1/',
'/phpMyAdmin-2.8.0.2/',
'/phpMyAdmin-2.8.0.3/',
'/phpMyAdmin-2.8.0.4/',
'/phpMyAdmin-2.8.1-rc1/',
'/phpMyAdmin-2.8.1/',
'/phpMyAdmin-2.8.2/',
'/sqlmanager/',
'/mysqlmanager/',
'/p/m/a/',
'/PMA2005/',
'/pma2005/',
'/phpmanager/',
'/php-myadmin/',
'/phpmy-admin/',
'/webadmin/',
'/sqlweb/',
'/websql/',
'/webdb/',
'/mysqladmin/',
'/mysql-admin/']
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#############################################################
# ubi_reader/ubi_io
# (c) 2013 Jason Pruitt (jrspruitt@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#############################################################
from ubi.block import sort
class ubi_file(object):
"""UBI image file object
Arguments:
Str:path -- Path to file to parse
Int:block_size -- Erase block size of NAND in bytes.
Int:start_offset -- (optional) Where to start looking in the file for
UBI data.
Int:end_offset -- (optional) Where to stop looking in the file.
Methods:
seek -- Put file head to specified byte offset.
Int:offset
read -- Read specified bytes from file handle.
Int:size
tell -- Returns byte offset of current file location.
read_block -- Returns complete PEB data of provided block
description.
Obj:block
read_block_data -- Returns LEB data only from provided block.
Obj:block
reader -- Generator that returns data from file.
reset -- Reset file position to start_offset
Handles all the actual file interactions, read, seek,
extract blocks, etc.
"""
def __init__(self, path, block_size, start_offset=0, end_offset=None):
self._fhandle = open(path, 'rb')
self._start_offset = start_offset
if end_offset:
self._end_offset = end_offset
else:
self._fhandle.seek(0,2)
self._end_offset = self.tell()
self._block_size = block_size
if start_offset >= self._end_offset:
raise Exception('Start offset larger than file size!')
self._fhandle.seek(self._start_offset)
def _set_start(self, i):
self._start_offset = i
def _get_start(self):
return self._start_offset
start_offset = property(_get_start, _set_start)
def _get_end(self):
return self._end_offset
end_offset = property(_get_end)
def _get_block_size(self):
return self._block_size
block_size = property(_get_block_size)
def seek(self, offset):
self._fhandle.seek(offset)
def read(self, size):
return self._fhandle.read(size)
def tell(self):
return self._fhandle.tell()
def reset(self):
self._fhandle.seek(self.start_offset)
def reader(self):
self.reset()
while True:
cur_loc = self._fhandle.tell()
if self.end_offset and cur_loc > self.end_offset:
break
elif self.end_offset and self.end_offset - cur_loc < self.block_size:
chunk_size = self.end_offset - cur_loc
else:
chunk_size = self.block_size
buf = self.read(chunk_size)
if not buf:
break
yield buf
def read_block(self, block):
"""Read complete PEB data from file.
Argument:
Obj:block -- Block data is desired for.
"""
self.seek(block.file_offset)
return self._fhandle.read(block.size)
def read_block_data(self, block):
"""Read LEB data from file
Argument:
Obj:block -- Block data is desired for.
"""
self.seek(block.file_offset + block.ec_hdr.data_offset)
buf = self._fhandle.read(block.size - block.ec_hdr.data_offset - block.vid_hdr.data_pad)
return buf
class leb_virtual_file():
def __init__(self, ubi, volume):
self._ubi = ubi
self._volume = volume
self._blocks = sort.by_leb(self._volume.get_blocks(self._ubi.blocks))
self._seek = 0
self.leb_data_size = len(self._blocks) * self._ubi.leb_size
self._last_leb = -1
self._last_buf = ''
def read(self, i):
buf = ''
leb = int(self.tell() / self._ubi.leb_size)
offset = self.tell() % self._ubi.leb_size
if leb == self._last_leb:
self.seek(self.tell() + i)
return self._last_buf[offset:offset+i]
else:
buf = self._ubi.file.read_block_data(self._ubi.blocks[self._blocks[leb]])
self._last_buf = buf
self._last_leb = leb
self.seek(self.tell() + i)
return buf[offset:offset+i]
def reset(self):
self.seek(0)
def seek(self, offset):
self._seek = offset
def tell(self):
return self._seek
def reader(self):
last_leb = 0
for block in self._blocks:
while 0 != (self._ubi.blocks[block].leb_num - last_leb):
last_leb += 1
yield '\xff'*self._ubi.leb_size
last_leb += 1
yield self._ubi.file.read_block_data(self._ubi.blocks[block])
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from codegen.spec import *
from codegen.mir_emitter import *
from codegen.isel import *
from codegen.x64_def import *
from codegen.matcher import *
class X64OperandFlag(IntFlag):
NO_FLAG = auto()
# GOT_ABSOLUTE_ADDRESS - On a symbol operand = auto() this represents a
# relocation of:
# SYMBOL_LABEL + [. - PICBASELABEL]
GOT_ABSOLUTE_ADDRESS = auto()
# PIC_BASE_OFFSET - On a symbol operand this indicates that the
# immediate should get the value of the symbol minus the PIC base label:
# SYMBOL_LABEL - PICBASELABEL
PIC_BASE_OFFSET = auto()
# GOT - On a symbol operand this indicates that the immediate is the
# offset to the GOT entry for the symbol name from the base of the GOT.
#
# See the X86-64 ELF ABI supplement for more details.
# SYMBOL_LABEL @GOT
GOT = auto()
# GOTOFF - On a symbol operand this indicates that the immediate is
# the offset to the location of the symbol name from the base of the GOT.
#
# See the X86-64 ELF ABI supplement for more details.
# SYMBOL_LABEL @GOTOFF
GOTOFF = auto()
# GOTPCREL - On a symbol operand this indicates that the immediate is
# offset to the GOT entry for the symbol name from the current code
# location.
#
# See the X86-64 ELF ABI supplement for more details.
# SYMBOL_LABEL @GOTPCREL
GOTPCREL = auto()
# PLT - On a symbol operand this indicates that the immediate is
# offset to the PLT entry of symbol name from the current code location.
#
# See the X86-64 ELF ABI supplement for more details.
# SYMBOL_LABEL @PLT
PLT = auto()
# TLSGD - On a symbol operand this indicates that the immediate is
# the offset of the GOT entry with the TLS index structure that contains
# the module number and variable offset for the symbol. Used in the
# general dynamic TLS access model.
#
# See 'ELF Handling for Thread-Local Storage' for more details.
# SYMBOL_LABEL @TLSGD
TLSGD = auto()
# TLSLD - On a symbol operand this indicates that the immediate is
# the offset of the GOT entry with the TLS index for the module that
# contains the symbol. When this index is passed to a call to
# __tls_get_addr = auto() the function will return the base address of the TLS
# block for the symbol. Used in the x86-64 local dynamic TLS access model.
#
# See 'ELF Handling for Thread-Local Storage' for more details.
# SYMBOL_LABEL @TLSLD
TLSLD = auto()
# TLSLDM - On a symbol operand this indicates that the immediate is
# the offset of the GOT entry with the TLS index for the module that
# contains the symbol. When this index is passed to a call to
# ___tls_get_addr = auto() the function will return the base address of the TLS
# block for the symbol. Used in the IA32 local dynamic TLS access model.
#
# See 'ELF Handling for Thread-Local Storage' for more details.
# SYMBOL_LABEL @TLSLDM
TLSLDM = auto()
# GOTTPOFF - On a symbol operand this indicates that the immediate is
# the offset of the GOT entry with the thread-pointer offset for the
# symbol. Used in the x86-64 initial exec TLS access model.
#
# See 'ELF Handling for Thread-Local Storage' for more details.
# SYMBOL_LABEL @GOTTPOFF
GOTTPOFF = auto()
# INDNTPOFF - On a symbol operand this indicates that the immediate is
# the absolute address of the GOT entry with the negative thread-pointer
# offset for the symbol. Used in the non-PIC IA32 initial exec TLS access
# model.
#
# See 'ELF Handling for Thread-Local Storage' for more details.
# SYMBOL_LABEL @INDNTPOFF
INDNTPOFF = auto()
# TPOFF - On a symbol operand this indicates that the immediate is
# the thread-pointer offset for the symbol. Used in the x86-64 local
# exec TLS access model.
#
# See 'ELF Handling for Thread-Local Storage' for more details.
# SYMBOL_LABEL @TPOFF
TPOFF = auto()
# DTPOFF - On a symbol operand this indicates that the immediate is
# the offset of the GOT entry with the TLS offset of the symbol. Used
# in the local dynamic TLS access model.
#
# See 'ELF Handling for Thread-Local Storage' for more details.
# SYMBOL_LABEL @DTPOFF
DTPOFF = auto()
# NTPOFF - On a symbol operand this indicates that the immediate is
# the negative thread-pointer offset for the symbol. Used in the IA32
# local exec TLS access model.
#
# See 'ELF Handling for Thread-Local Storage' for more details.
# SYMBOL_LABEL @NTPOFF
NTPOFF = auto()
# GOTNTPOFF - On a symbol operand this indicates that the immediate is
# the offset of the GOT entry with the negative thread-pointer offset for
# the symbol. Used in the PIC IA32 initial exec TLS access model.
#
# See 'ELF Handling for Thread-Local Storage' for more details.
# SYMBOL_LABEL @GOTNTPOFF
GOTNTPOFF = auto()
# DLLIMPORT - On a symbol operand "FOO" = auto() this indicates that the
# reference is actually to the "__imp_FOO" symbol. This is used for
# dllimport linkage on windows.
DLLIMPORT = auto()
# DARWIN_NONLAZY - On a symbol operand "FOO" = auto() this indicates that the
# reference is actually to the "FOO$non_lazy_ptr" symbol = auto() which is a
# non-PIC-base-relative reference to a non-hidden dyld lazy pointer stub.
DARWIN_NONLAZY = auto()
# DARWIN_NONLAZY_PIC_BASE - On a symbol operand "FOO" = auto() this indicates
# that the reference is actually to "FOO$non_lazy_ptr - PICBASE" = auto() which is
# a PIC-base-relative reference to a non-hidden dyld lazy pointer stub.
DARWIN_NONLAZY_PIC_BASE = auto()
# TLVP - On a symbol operand this indicates that the immediate is
# some TLS offset.
#
# This is the TLS offset for the Darwin TLS mechanism.
TLVP = auto()
# TLVP_PIC_BASE - On a symbol operand this indicates that the immediate
# is some TLS offset from the picbase.
#
# This is the 32-bit TLS offset for Darwin TLS in PIC mode.
TLVP_PIC_BASE = auto()
# SECREL - On a symbol operand this indicates that the immediate is
# the offset from beginning of section.
#
# This is the TLS offset for the COFF/Windows TLS mechanism.
SECREL = auto()
# ABS8 - On a symbol operand this indicates that the symbol is known
# to be an absolute symbol in range [0 = auto()128) = auto() so we can use the @ABS8
# symbol modifier.
ABS8 = auto()
# COFFSTUB - On a symbol operand "FOO" = auto() this indicates that the
# reference is actually to the ".refptr.FOO" symbol. This is used for
# stub symbols on windows.
COFFSTUB = auto()
def is_null_constant(value):
return isinstance(value.node, ConstantDagNode) and value.node.is_zero
def is_null_fp_constant(value):
return isinstance(value.node, ConstantFPDagNode) and value.node.is_zero
def is_x86_zero(value):
return is_null_constant(value) or is_null_fp_constant(value)
class X64InstructionSelector(InstructionSelector):
def __init__(self):
super().__init__()
def lower_wrapper_rip(self, node, dag):
noreg = MachineRegister(NOREG)
MVT = MachineValueType
ty = node.value_types[0]
base = DagValue(dag.add_register_node(
MVT(ValueType.I64), MachineRegister(RIP)), 0)
scale = DagValue(dag.add_target_constant_node(MVT(ValueType.I8), 1), 0)
index = DagValue(dag.add_register_node(MVT(ValueType.I32), noreg), 0)
disp = node.operands[0]
segment = DagValue(dag.add_register_node(MVT(ValueType.I16), noreg), 0)
lea_ops = (base, scale, index, disp, segment)
if ty == MachineValueType(ValueType.I64):
lea_operand = X64MachineOps.LEA64r
elif ty == MachineValueType(ValueType.I32):
lea_operand = X64MachineOps.LEA32r
else:
raise ValueError()
return dag.add_machine_dag_node(lea_operand, node.value_types, *lea_ops)
def get_memory_operands(self, dag: Dag, operand: DagValue):
assert(isinstance(operand, DagValue))
noreg = MachineRegister(NOREG)
MVT = MachineValueType
if operand.node.opcode == VirtualDagOps.ADD:
sub_op1 = operand.node.operands[0]
sub_op2 = operand.node.operands[1]
if sub_op2.node.opcode in [VirtualDagOps.CONSTANT, VirtualDagOps.TARGET_CONSTANT]:
if sub_op1.node.opcode == X64DagOps.WRAPPER_RIP:
base = DagValue(dag.add_target_register_node(
MVT(ValueType.I64), RIP), 0)
assert(sub_op2.node.value == 0)
disp = sub_op1.node.operands[0]
else:
base = sub_op1
disp = sub_op2
elif sub_op1.node.opcode in [VirtualDagOps.CONSTANT, VirtualDagOps.TARGET_CONSTANT]:
if sub_op2.node.opcode == X64DagOps.WRAPPER_RIP:
base = DagValue(dag.add_target_register_node(
MVT(ValueType.I64), RIP), 0)
assert(sub_op1.node.value == 0)
disp = sub_op2.node.operands[0]
else:
base = sub_op2
disp = sub_op1
else:
raise ValueError()
scale = DagValue(dag.add_target_constant_node(
MVT(ValueType.I8), 1), 0)
index = DagValue(dag.add_register_node(
MVT(ValueType.I32), noreg), 0)
segment = DagValue(dag.add_register_node(
MVT(ValueType.I16), noreg), 0)
assert(base.node.opcode != X64DagOps.WRAPPER_RIP)
return (base, scale, index, disp, segment)
elif operand.node.opcode == VirtualDagOps.SUB:
sub_op1 = operand.node.operands[0]
sub_op2 = operand.node.operands[1]
if sub_op2.node.opcode == VirtualDagOps.CONSTANT:
base = sub_op1
disp = DagValue(dag.add_target_constant_node(
sub_op2.node.value_types[0], -sub_op2.node.value), 0)
elif sub_op1.node.opcode == VirtualDagOps.CONSTANT:
base = sub_op2
disp = DagValue(dag.add_target_constant_node(
sub_op1.node.value_ty[0], -sub_op1.node.value), 0)
elif sub_op1.node.opcode == VirtualDagOps.CONSTANT:
base = operand
disp = DagValue(dag.add_target_constant_node(
MVT(ValueType.I32), 0), 0)
scale = DagValue(dag.add_target_constant_node(
MVT(ValueType.I8), 1), 0)
index = DagValue(dag.add_register_node(
MVT(ValueType.I32), noreg), 0)
segment = DagValue(dag.add_register_node(
MVT(ValueType.I16), noreg), 0)
assert(base.node.opcode != X64DagOps.WRAPPER_RIP)
return (base, scale, index, disp, segment)
elif operand.node.opcode == X64DagOps.WRAPPER_RIP:
base = DagValue(dag.add_register_node(
MVT(ValueType.I64), MachineRegister(RIP)), 0)
scale = DagValue(dag.add_target_constant_node(
MVT(ValueType.I8), 1), 0)
index = DagValue(dag.add_register_node(
MVT(ValueType.I32), noreg), 0)
disp = operand.node.operands[0]
segment = DagValue(dag.add_register_node(
MVT(ValueType.I16), noreg), 0)
assert(base.node.opcode != X64DagOps.WRAPPER_RIP)
return (base, scale, index, disp, segment)
elif operand.node.opcode == X64DagOps.WRAPPER:
base = DagValue(dag.add_register_node(
MVT(ValueType.I64), MachineRegister(RIP)), 0)
scale = DagValue(dag.add_target_constant_node(
MVT(ValueType.I8), 1), 0)
index = DagValue(dag.add_register_node(
MVT(ValueType.I32), noreg), 0)
disp = operand.node.operands[0]
segment = DagValue(dag.add_register_node(
MVT(ValueType.I16), noreg), 0)
return (base, scale, index, disp, segment)
elif operand.node.opcode == VirtualDagOps.FRAME_INDEX:
base = DagValue(dag.add_frame_index_node(
operand.ty, operand.node.index, True), 0)
scale = DagValue(dag.add_target_constant_node(
MVT(ValueType.I8), 1), 0)
index = DagValue(dag.add_register_node(
MVT(ValueType.I32), noreg), 0)
disp = DagValue(dag.add_target_constant_node(
MVT(ValueType.I32), 0), 0)
segment = DagValue(dag.add_register_node(
MVT(ValueType.I16), noreg), 0)
assert(base.node.opcode != X64DagOps.WRAPPER_RIP)
return (base, scale, index, disp, segment)
raise ValueError()
def select_srl(self, node: DagNode, dag: Dag, new_ops):
op1 = new_ops[0]
op2 = new_ops[1]
if isinstance(op1.node, DagNode) and isinstance(op2.node, ConstantDagNode):
return dag.add_machine_dag_node(X64MachineOps.SHR32ri, node.value_types, op1, op2)
elif isinstance(op1.node, DagNode) and isinstance(op2.node, DagNode):
subreg_idx_node = dag.add_target_constant_node(
MachineValueType(ValueType.I32), subregs.index(sub_8bit))
extract_subreg_node = DagValue(dag.add_node(TargetDagOps.EXTRACT_SUBREG, [MachineValueType(ValueType.I8)],
op2, DagValue(subreg_idx_node, 0)), 0)
cl = DagValue(dag.add_target_register_node(
MachineValueType(ValueType.I8), CL), 0)
if op1.ty == MachineValueType(ValueType.I32):
opcode = X64MachineOps.SHR32rCL
elif op1.ty == MachineValueType(ValueType.I64):
opcode = X64MachineOps.SHR64rCL
else:
raise ValueError()
copy_to_cl_node = DagValue(dag.add_node(VirtualDagOps.COPY_TO_REG, [MachineValueType(ValueType.OTHER), MachineValueType(ValueType.GLUE)],
dag.entry, cl, extract_subreg_node), 0)
return dag.add_machine_dag_node(opcode, node.value_types, op1, copy_to_cl_node.get_value(1))
print("select_and")
print([edge.node for edge in new_ops])
raise NotImplementedError()
def select_sra(self, node: DagNode, dag: Dag, new_ops):
op1 = new_ops[0]
op2 = new_ops[1]
if isinstance(op1.node, DagNode) and isinstance(op2.node, ConstantDagNode):
return dag.add_machine_dag_node(X64MachineOps.SAR32ri, node.value_types, op1, op2)
elif isinstance(op1.node, DagNode) and isinstance(op2.node, DagNode):
subreg_idx_node = dag.add_target_constant_node(
MachineValueType(ValueType.I32), subregs.index(sub_8bit))
extract_subreg_node = DagValue(dag.add_node(TargetDagOps.EXTRACT_SUBREG, [MachineValueType(ValueType.I8)],
op2, DagValue(subreg_idx_node, 0)), 0)
cl = DagValue(dag.add_target_register_node(
MachineValueType(ValueType.I8), CL), 0)
if op1.ty == MachineValueType(ValueType.I32):
opcode = X64MachineOps.SAR32rCL
elif op1.ty == MachineValueType(ValueType.I64):
opcode = X64MachineOps.SAR64rCL
else:
raise ValueError()
copy_to_cl_node = DagValue(dag.add_node(VirtualDagOps.COPY_TO_REG, [MachineValueType(ValueType.OTHER), MachineValueType(ValueType.GLUE)],
dag.entry, cl, extract_subreg_node), 0)
return dag.add_machine_dag_node(opcode, node.value_types, op1, copy_to_cl_node.get_value(1))
print("select_and")
print([edge.node for edge in new_ops])
raise NotImplementedError()
def select_shl(self, node: DagNode, dag: Dag, new_ops):
op1 = new_ops[0]
op2 = new_ops[1]
if isinstance(op1.node, DagNode) and isinstance(op2.node, ConstantDagNode):
return dag.add_machine_dag_node(X64MachineOps.SHL32ri, node.value_types, op1, op2)
elif isinstance(op1.node, DagNode) and isinstance(op2.node, DagNode):
subreg_idx_node = DagValue(dag.add_target_constant_node(
MachineValueType(ValueType.I32), subregs.index(sub_8bit)), 0)
extract_subreg_node = DagValue(dag.add_node(TargetDagOps.EXTRACT_SUBREG, [MachineValueType(ValueType.I8)],
op2, subreg_idx_node), 0)
cl = DagValue(dag.add_target_register_node(
MachineValueType(ValueType.I8), CL), 0)
if op1.ty == MachineValueType(ValueType.I32):
opcode = X64MachineOps.SHL32rCL
elif op1.ty == MachineValueType(ValueType.I64):
opcode = X64MachineOps.SHL64rCL
else:
raise ValueError()
copy_to_cl_node = DagValue(dag.add_node(VirtualDagOps.COPY_TO_REG, [MachineValueType(ValueType.OTHER), MachineValueType(ValueType.GLUE)],
dag.entry, cl, extract_subreg_node), 0)
return dag.add_machine_dag_node(opcode, node.value_types, op1, copy_to_cl_node.get_value(1))
print("select_and")
print([edge.node for edge in new_ops])
raise NotImplementedError()
def select_bitcast(self, node: DagNode, dag: Dag, new_ops):
src = new_ops[0]
raise NotImplementedError()
def select_trunc(self, node: DagNode, dag: Dag, new_ops):
src = new_ops[0]
dst_ty = node.value_types[0]
if isinstance(src.node, DagNode):
if dst_ty.value_type == ValueType.I8:
subreg_idx = subregs.index(sub_8bit)
elif dst_ty.value_type == ValueType.I16:
subreg_idx = subregs.index(sub_16bit)
elif dst_ty.value_type == ValueType.I32:
subreg_idx = subregs.index(sub_32bit)
subreg_idx_node = DagValue(dag.add_target_constant_node(
MachineValueType(ValueType.I32), subreg_idx), 0)
extract_subreg_node = dag.add_node(TargetDagOps.EXTRACT_SUBREG, node.value_types,
src, subreg_idx_node)
return extract_subreg_node
raise NotImplementedError()
def select_callseq_start(self, node: DagNode, dag: Dag, new_ops):
chain = new_ops[0]
in_bytes = new_ops[1]
out_bytes = new_ops[2]
opt = dag.add_target_constant_node(MachineValueType(ValueType.I32), 0)
return dag.add_machine_dag_node(X64MachineOps.ADJCALLSTACKDOWN32, node.value_types, in_bytes, out_bytes, DagValue(opt, 0), chain)
def select_callseq_end(self, node: DagNode, dag: Dag, new_ops):
chain = new_ops[0]
in_bytes = new_ops[1]
out_bytes = new_ops[2]
glue = self.get_glue(new_ops)
ops = [in_bytes, out_bytes, chain]
if glue:
ops.append(glue)
return dag.add_machine_dag_node(X64MachineOps.ADJCALLSTACKUP32, node.value_types, *ops)
def get_glue(self, operands):
for operand in operands:
if operand.ty == MachineValueType(ValueType.GLUE):
return operand
return None
def select_call(self, node: DagNode, dag: Dag, new_ops):
chain = new_ops[0]
target = new_ops[1]
glue = self.get_glue(new_ops)
ops = [target, chain]
if glue:
ops.append(glue)
return dag.add_machine_dag_node(X64MachineOps.CALLpcrel32, node.value_types, *ops)
def select_return(self, node: DagNode, dag: Dag, new_ops):
chain = new_ops[0]
ops = new_ops[1:]
return dag.add_machine_dag_node(X64MachineOps.RET, node.value_types, *ops, chain)
def select_divrem(self, node: DagNode, dag: Dag, new_ops):
op1 = new_ops[0]
op2 = new_ops[1]
is_signed = node.opcode == VirtualDagOps.SDIVREM
if isinstance(op1.node, FrameIndexDagNode) and isinstance(op2.node, ConstantDagNode):
raise NotImplementedError()
elif isinstance(op1.node, DagNode):
if isinstance(op2.node, DagNode):
pass
elif isinstance(op2.node, ConstantDagNode):
op2 = DagValue(dag.add_target_constant_node(
op2.ty, op2.node.value), 0)
else:
raise NotImplementedError()
ty = op1.ty
if is_signed:
if ty == MachineValueType(ValueType.I8):
opcode = X64MachineOps.IDIV8r
elif ty == MachineValueType(ValueType.I16):
opcode = X64MachineOps.IDIV16r
elif ty == MachineValueType(ValueType.I32):
opcode = X64MachineOps.IDIV32r
elif ty == MachineValueType(ValueType.I64):
opcode = X64MachineOps.IDIV64r
else:
raise NotImplementedError()
else:
if ty == MachineValueType(ValueType.I8):
opcode = X64MachineOps.DIV8r
elif ty == MachineValueType(ValueType.I16):
opcode = X64MachineOps.DIV16r
elif ty == MachineValueType(ValueType.I32):
opcode = X64MachineOps.DIV32r
elif ty == MachineValueType(ValueType.I64):
opcode = X64MachineOps.DIV64r
else:
raise NotImplementedError()
if ty == MachineValueType(ValueType.I8):
lo_reg = DagValue(dag.add_target_register_node(ty, AL), 0)
hi_reg = DagValue(dag.add_target_register_node(ty, AH), 0)
elif ty == MachineValueType(ValueType.I16):
lo_reg = DagValue(dag.add_target_register_node(ty, AX), 0)
hi_reg = DagValue(dag.add_target_register_node(ty, DX), 0)
elif ty == MachineValueType(ValueType.I32):
lo_reg = DagValue(dag.add_target_register_node(ty, EAX), 0)
hi_reg = DagValue(dag.add_target_register_node(ty, EDX), 0)
elif ty == MachineValueType(ValueType.I64):
lo_reg = DagValue(dag.add_target_register_node(ty, RAX), 0)
hi_reg = DagValue(dag.add_target_register_node(ty, RDX), 0)
else:
raise NotImplementedError()
if is_signed:
copy_to_lo_node = DagValue(dag.add_node(VirtualDagOps.COPY_TO_REG, [MachineValueType(ValueType.OTHER), MachineValueType(ValueType.GLUE)],
dag.entry, lo_reg, op1), 1)
copy_to_hi_node = DagValue(dag.add_machine_dag_node(X64MachineOps.CDQ, [MachineValueType(ValueType.GLUE)],
copy_to_lo_node), 0)
divrem_node = DagValue(dag.add_machine_dag_node(
opcode, [MachineValueType(ValueType.GLUE)], op2, copy_to_hi_node), 0)
q_node = DagValue(dag.add_node(VirtualDagOps.COPY_FROM_REG, [lo_reg.ty],
dag.entry, lo_reg, divrem_node), 0)
r_node = DagValue(dag.add_node(VirtualDagOps.COPY_FROM_REG, [hi_reg.ty],
dag.entry, hi_reg, divrem_node), 0)
else:
zero_value = DagValue(dag.add_target_constant_node(ty, 0), 0)
if ty == MachineValueType(ValueType.I8):
mov_ri_opcode = X64MachineOps.MOV8ri
elif ty == MachineValueType(ValueType.I16):
mov_ri_opcode = X64MachineOps.MOV16ri
elif ty == MachineValueType(ValueType.I32):
mov_ri_opcode = X64MachineOps.MOV32ri
elif ty == MachineValueType(ValueType.I64):
mov_ri_opcode = X64MachineOps.MOV64ri
else:
raise NotImplementedError()
zero_value = DagValue(
dag.add_machine_dag_node(mov_ri_opcode, [ty], zero_value), 0)
copy_to_lo_node = DagValue(dag.add_node(VirtualDagOps.COPY_TO_REG, [MachineValueType(ValueType.OTHER), MachineValueType(ValueType.GLUE)],
dag.entry, lo_reg, op1), 1)
copy_to_hi_node = DagValue(dag.add_node(VirtualDagOps.COPY_TO_REG, [MachineValueType(ValueType.OTHER), MachineValueType(ValueType.GLUE)],
dag.entry, hi_reg, zero_value, copy_to_lo_node), 1)
divrem_node = DagValue(dag.add_machine_dag_node(
opcode, [MachineValueType(ValueType.GLUE)], op2, copy_to_hi_node), 0)
q_node = DagValue(dag.add_node(VirtualDagOps.COPY_FROM_REG, [lo_reg.ty],
dag.entry, lo_reg, divrem_node), 0)
r_node = DagValue(dag.add_node(VirtualDagOps.COPY_FROM_REG, [hi_reg.ty],
dag.entry, hi_reg, divrem_node), 0)
return q_node.node
print("select_divrem")
print([edge.node for edge in new_ops])
raise NotImplementedError()
def select_copy_from_reg(self, node: DagNode, dag: Dag, new_ops):
return node
# return dag.add_machine_dag_node(VirtualDagOps.COPY_FROM_REG, node.value_types, *new_ops)
chain = new_ops[0]
src = new_ops[1]
if isinstance(src.node, RegisterDagNode):
return src.node
print("select_copy_from_reg")
print([edge.node for edge in new_ops])
raise NotImplementedError()
def select_copy_to_reg(self, node: DagNode, dag: Dag, new_ops):
chain = node.operands[0]
dest = node.operands[1]
src = node.operands[2]
if src.node.opcode == VirtualDagOps.CONSTANT:
src = DagValue(self.select_constant(src.node, dag, []), 0)
elif src.node.opcode == VirtualDagOps.FRAME_INDEX:
lea_ops = self.get_memory_operands(dag, src)
if src.ty == MachineValueType(ValueType.I64):
lea_operand = X64MachineOps.LEA64r
elif src.ty == MachineValueType(ValueType.I32):
lea_operand = X64MachineOps.LEA32r
else:
raise ValueError()
src = DagValue(dag.add_machine_dag_node(
lea_operand, [src.ty], *lea_ops), 0)
glue = self.get_glue(new_ops)
ops = [chain, dest, src]
if glue:
ops.append(glue)
return dag.add_node(VirtualDagOps.COPY_TO_REG, node.value_types, *ops)
def select_code(self, node: DagNode, dag: Dag):
ops_table = [op for op in X64MachineOps.insts()]
value = DagValue(node, 0)
def match_node(inst: MachineInstructionDef):
for pattern in inst.patterns:
_, res = pattern.match(None, [value], 0, dag)
if res:
return construct(inst, node, dag, res)
return None
for op in ops_table:
matched = match_node(op)
if matched:
return matched
for pattern in x64_patterns:
_, res = pattern.match(node, dag)
if res:
return pattern.construct(node, dag, res).node
return None
def select_constant(self, node: DagNode, dag: Dag, new_ops):
value = DagValue(dag.add_target_constant_node(
node.value_types[0], node.value), 0)
ops = [value]
if node.value_types[0] == MachineValueType(ValueType.I64):
operand = X64MachineOps.MOV64ri
elif node.value_types[0] == MachineValueType(ValueType.I32):
operand = X64MachineOps.MOV32ri
elif node.value_types[0] == MachineValueType(ValueType.I8):
operand = X64MachineOps.MOV8ri
else:
raise ValueError()
return dag.add_machine_dag_node(operand, node.value_types, *ops)
def select_scalar_to_vector(self, node: DagNode, dag: Dag, new_ops):
in_type = node.operands[0].ty
out_type = node.value_types[0]
if in_type == MachineValueType(ValueType.F32) and out_type == MachineValueType(ValueType.V4F32):
regclass_id = regclasses.index(VR128)
regclass_id_val = DagValue(dag.add_target_constant_node(
MachineValueType(ValueType.I32), regclass_id), 0)
return dag.add_node(TargetDagOps.COPY_TO_REGCLASS, node.value_types, node.operands[0], regclass_id_val)
raise ValueError()
def select(self, node: DagNode, dag: Dag):
new_ops = node.operands
if isinstance(node.opcode, TargetDagOps):
return node
matched = self.select_code(node, dag)
if matched:
return matched
reg_info = dag.mfunc.target_info.get_register_info()
SELECT_TABLE = {
VirtualDagOps.COPY_FROM_REG: self.select_copy_from_reg,
VirtualDagOps.COPY_TO_REG: self.select_copy_to_reg,
VirtualDagOps.SRL: self.select_srl,
VirtualDagOps.SHL: self.select_shl,
VirtualDagOps.SRA: self.select_sra,
VirtualDagOps.SDIVREM: self.select_divrem,
VirtualDagOps.UDIVREM: self.select_divrem,
VirtualDagOps.BITCAST: self.select_bitcast,
VirtualDagOps.TRUNCATE: self.select_trunc,
VirtualDagOps.CALLSEQ_START: self.select_callseq_start,
VirtualDagOps.CALLSEQ_END: self.select_callseq_end,
VirtualDagOps.SCALAR_TO_VECTOR: self.select_scalar_to_vector,
X64DagOps.CALL: self.select_call,
X64DagOps.RETURN: self.select_return,
}
if node.opcode == VirtualDagOps.ZERO_EXTEND:
src_ty = node.operands[0].ty
dst_ty = node.value_types[0]
if src_ty == MachineValueType(ValueType.I32) and dst_ty == MachineValueType(ValueType.I64):
if dst_ty == MachineValueType(ValueType.I64):
zero_val = DagValue(dag.add_machine_dag_node(
X64MachineOps.MOV64r0, [dst_ty]), 0)
if src_ty.value_type == ValueType.I8:
subreg_idx = subregs.index(sub_8bit)
elif src_ty.value_type == ValueType.I16:
subreg_idx = subregs.index(sub_16bit)
elif src_ty.value_type == ValueType.I32:
subreg_idx = subregs.index(sub_32bit)
subreg_idx_node = DagValue(dag.add_target_constant_node(
MachineValueType(ValueType.I32), subreg_idx), 0)
regclass_id = x64_regclasses.index(GR64)
regclass_id_val = DagValue(
dag.add_target_constant_node(MachineValueType(ValueType.I32), regclass_id), 0)
return dag.add_node(TargetDagOps.SUBREG_TO_REG, [dst_ty], zero_val, node.operands[0], subreg_idx_node)
if node.opcode == VirtualDagOps.ENTRY:
return dag.entry.node
elif node.opcode == VirtualDagOps.UNDEF:
return node
elif node.opcode == VirtualDagOps.CONDCODE:
return node
elif node.opcode == VirtualDagOps.BASIC_BLOCK:
return node
elif node.opcode == VirtualDagOps.REGISTER:
return node
elif node.opcode == VirtualDagOps.TARGET_CONSTANT:
return node
elif node.opcode == VirtualDagOps.TARGET_CONSTANT_POOL:
return node
elif node.opcode == VirtualDagOps.TARGET_FRAME_INDEX:
return node
elif node.opcode == VirtualDagOps.TARGET_REGISTER:
return node
elif node.opcode == VirtualDagOps.TARGET_GLOBAL_ADDRESS:
return node
elif node.opcode == VirtualDagOps.TARGET_GLOBAL_TLS_ADDRESS:
return node
elif node.opcode == VirtualDagOps.TARGET_EXTERNAL_SYMBOL:
return node
elif node.opcode == VirtualDagOps.INLINEASM:
return node
elif node.opcode == VirtualDagOps.EXTERNAL_SYMBOL:
return dag.add_external_symbol_node(node.value_types[0], node.symbol, True)
elif node.opcode == VirtualDagOps.MERGE_VALUES:
return dag.add_node(node.opcode, node.value_types, *new_ops)
elif node.opcode == VirtualDagOps.TOKEN_FACTOR:
return dag.add_node(node.opcode, node.value_types, *new_ops)
elif node.opcode == X64DagOps.WRAPPER_RIP:
return self.lower_wrapper_rip(node, dag)
elif node.opcode == X64DagOps.WRAPPER:
return node
elif node.opcode == VirtualDagOps.TARGET_CONSTANT_FP:
return node
elif node.opcode in SELECT_TABLE:
select_func = SELECT_TABLE[node.opcode]
minst = select_func(node, dag, new_ops)
else:
raise NotImplementedError(
"Can't select the instruction: {}".format(node.opcode))
return minst
class X86CallingConv(CallingConv):
def __init__(self):
pass
@property
def id(self):
return CallingConvID.C
def can_lower_return(self, func: Function):
return_size, align = func.module.data_layout.get_type_size_in_bits(
func.vty.return_ty)
return return_size / 8 <= 16
def lower_return(self, builder: DagBuilder, inst: ReturnInst, g: Dag):
mfunc = builder.mfunc
calling_conv = mfunc.target_info.get_calling_conv()
reg_info = mfunc.target_info.get_register_info()
data_layout = builder.data_layout
demote_reg = builder.func_info.sret_reg
has_demote_arg = demote_reg is not None
stack_pop_bytes = builder.get_value(ConstantInt(0, i32))
if len(inst.operands) > 0:
return_offsets = []
return_vts = compute_value_types(
inst.block.func.return_ty, data_layout, return_offsets)
returns = []
offset_in_arg = 0
# Analyze return value
for val_idx, vt in enumerate(return_vts):
reg_vt = reg_info.get_register_type(vt)
reg_count = reg_info.get_register_count(vt)
for reg_idx in range(reg_count):
flags = CCArgFlags()
returns.append(CallingConvReturn(
vt, reg_vt, 0, offset_in_arg, flags))
offset_in_arg += reg_vt.get_size_in_byte()
# Apply caling convention
ccstate = CallingConvState(calling_conv, mfunc)
ccstate.compute_returns_layout(returns)
# Handle return values
ret_parts = []
ret_value = builder.get_value(inst.rs)
idx = 0
for val_idx, vt in enumerate(return_vts):
reg_vt = reg_info.get_register_type(vt)
reg_count = reg_info.get_register_count(vt)
if reg_count > 1:
raise NotImplementedError()
ret_parts.append(ret_value.get_value(idx))
idx += reg_count
reg_vals = []
for idx, ret_val in enumerate(ccstate.values):
assert(isinstance(ret_val, CCArgReg))
ret_vt = ret_val.loc_vt
reg_val = DagValue(
g.add_target_register_node(ret_vt, ret_val.reg), 0)
copy_val = ret_parts[idx]
builder.root = get_copy_to_parts(
copy_val, [reg_val], ret_vt, builder.root, builder.g)
reg_vals.append(reg_val)
ops = [builder.root, stack_pop_bytes, *reg_vals]
else:
ops = [builder.root, stack_pop_bytes]
if has_demote_arg:
return_ty = inst.block.func.ty
vts = compute_value_types(
return_ty, inst.block.func.module.data_layout)
assert(len(vts) == 1)
assert(len(demote_reg) == 1)
ret_val = DagValue(
builder.g.add_register_node(vts[0], demote_reg[0]), 0)
if ret_val.ty == MachineValueType(ValueType.I32):
ret_reg = EAX
elif ret_val.ty == MachineValueType(ValueType.I64):
ret_reg = RAX
else:
raise NotImplementedError()
reg_node = DagValue(
g.add_target_register_node(ret_val.ty, ret_reg), 0)
node = g.add_copy_to_reg_node(reg_node, ret_val)
builder.root = DagValue(node, 0)
ops = [builder.root, stack_pop_bytes, reg_node]
node = g.add_node(X64DagOps.RETURN, [
MachineValueType(ValueType.OTHER)], *ops)
builder.root = DagValue(node, 0)
return node
def compute_type_size_aligned(self, ty, data_layout: DataLayout):
return data_layout.get_type_size_in_bits(ty)
def lower_call(self, builder: DagBuilder, inst: CallInst, g: Dag):
mfunc = builder.mfunc
func = inst.callee
calling_conv = mfunc.target_info.get_calling_conv()
reg_info = mfunc.target_info.get_register_info()
data_layout = builder.data_layout
target_lowering = mfunc.target_info.get_lowering()
ptr_ty = target_lowering.get_frame_index_type(data_layout)
is_vararg = func.is_variadic
is_win64 = mfunc.target_info.triple.os == OS.Windows and mfunc.target_info.triple.arch == ArchType.X86_64
# Handle arguments
args = []
for i, arg in enumerate(inst.args):
vts = compute_value_types(arg.ty, data_layout)
offset_in_arg = 0
for val_idx, vt in enumerate(vts):
reg_vt = reg_info.get_register_type(vt)
reg_count = reg_info.get_register_count(vt)
for reg_idx in range(reg_count):
flags = CCArgFlags()
args.append(CallingConvArg(
vt, reg_vt, i, offset_in_arg, flags))
offset_in_arg += reg_vt.get_size_in_byte()
ccstate = CallingConvState(calling_conv, mfunc)
ccstate.compute_arguments_layout(args)
stack_offset = align_to(ccstate.stack_offset, ccstate.stack_maxalign)
# Estimate stack size to call function
data_layout = builder.data_layout
stack_bytes = 32
for arg in inst.args:
size, align = self.compute_type_size_aligned(arg.ty, data_layout)
arg_size = int(size / 8)
arg_align = int(align / 8)
stack_bytes += arg_size
in_bytes = g.add_target_constant_node(
MachineValueType(ValueType.I32), stack_bytes)
out_bytes = g.add_target_constant_node(
MachineValueType(ValueType.I32), 0)
callseq_start_node = g.add_node(VirtualDagOps.CALLSEQ_START, [
MachineValueType(ValueType.OTHER)], builder.root, DagValue(in_bytes, 0), DagValue(out_bytes, 0))
builder.root = DagValue(callseq_start_node, 0)
stack_ptr_type = MachineValueType(ValueType.I64)
esp_reg_node = g.add_target_register_node(stack_ptr_type, RSP)
esp = g.add_copy_from_reg_node(
stack_ptr_type, DagValue(esp_reg_node, 0))
##
arg_parts = []
for arg in inst.args:
idx = 0
arg_value = builder.get_value(arg)
vts = compute_value_types(arg.ty, data_layout)
for val_idx, vt in enumerate(vts):
reg_vt = reg_info.get_register_type(vt)
reg_count = reg_info.get_register_count(vt)
if reg_count > 1:
raise NotImplementedError()
arg_parts.append(arg_value.get_value(idx))
idx += reg_count
chain = g.root
reg_vals = []
arg_vals = []
regs_to_pass = []
for idx, arg_val in enumerate(ccstate.values):
if isinstance(arg_val, CCArgReg):
reg_val = DagValue(g.add_target_register_node(
arg_val.vt, arg_val.reg), 0)
copy_val = arg_parts[idx]
if arg_val.loc_info == CCArgLocInfo.Full:
pass
elif arg_val.loc_info == CCArgLocInfo.Indirect:
arg_mem_size = arg_val.vt.get_size_in_byte()
arg_mem_align = int(data_layout.get_pref_type_alignment(
arg_val.vt.get_ir_type()) / 8)
arg_mem_frame_idx = mfunc.frame.create_stack_object(
arg_mem_size, arg_mem_align)
arg_mem_val = DagValue(builder.g.add_frame_index_node(
ptr_ty, arg_mem_frame_idx), 0)
chain = DagValue(g.add_store_node(
chain, arg_mem_val, copy_val), 0)
copy_val = arg_mem_val
else:
raise ValueError()
arg_vals.append(copy_val)
reg_vals.append(reg_val)
regs_to_pass.append((reg_val, copy_val))
if is_vararg and is_win64:
shadow_reg = None
if arg_val.reg == XMM0:
shadow_reg = RCX
elif arg_val.reg == XMM1:
shadow_reg = RDX
elif arg_val.reg == XMM2:
shadow_reg = R8
elif arg_val.reg == XMM3:
shadow_reg = R9
if shadow_reg:
reg_val = DagValue(g.add_target_register_node(
arg_val.vt, shadow_reg), 0)
regs_to_pass.append((reg_val, copy_val))
else:
assert(isinstance(arg_val, CCArgMem))
copy_val = arg_parts[idx]
ptr_val = DagValue(g.add_target_register_node(
ptr_ty, RSP), 0)
ptr_offset_val = DagValue(
g.add_constant_node(ptr_ty, (32 + arg_val.offset)), 0)
ptr_val = DagValue(
g.add_node(VirtualDagOps.ADD, [ptr_ty], ptr_val, ptr_offset_val), 0)
chain = DagValue(g.add_store_node(
chain, ptr_val, copy_val), 0)
copy_to_reg_chain = None
for reg_val, copy_val in regs_to_pass:
operands = [chain, reg_val, copy_val]
if copy_to_reg_chain:
operands.append(copy_to_reg_chain.get_value(1))
copy_to_reg_chain = DagValue(builder.g.add_node(VirtualDagOps.COPY_TO_REG, [MachineValueType(
ValueType.OTHER), MachineValueType(ValueType.GLUE)], *operands), 0)
func_address = builder.get_or_create_global_address(inst.callee, True)
ops = [chain, func_address]
if len(ccstate.values) > 0:
ops.append(copy_to_reg_chain.get_value(1))
call_node = DagValue(g.add_node(
X64DagOps.CALL, [MachineValueType(ValueType.OTHER), MachineValueType(ValueType.GLUE)], *ops), 0)
ops = [call_node.get_value(0), DagValue(in_bytes, 0), DagValue(
out_bytes, 0), call_node.get_value(1)]
callseq_end_node = DagValue(g.add_node(VirtualDagOps.CALLSEQ_END, [
MachineValueType(ValueType.OTHER), MachineValueType(ValueType.GLUE)], *ops), 0)
chain = callseq_end_node.get_value(0)
builder.root = chain
# Handle returns
return_offsets = []
return_vts = compute_value_types(inst.ty, data_layout, return_offsets)
returns = []
if not self.can_lower_return(func):
raise NotImplementedError()
else:
offset_in_arg = 0
for val_idx, vt in enumerate(return_vts):
reg_vt = reg_info.get_register_type(vt)
reg_count = reg_info.get_register_count(vt)
for reg_idx in range(reg_count):
flags = CCArgFlags()
returns.append(CallingConvReturn(
vt, reg_vt, 0, offset_in_arg, flags))
offset_in_arg += reg_vt.get_size_in_byte()
ccstate = CallingConvState(calling_conv, mfunc)
ccstate.compute_returns_layout(returns)
glue_val = callseq_end_node.get_value(1)
# Handle return values
ret_vals = []
for idx, ret_val in enumerate(ccstate.values):
assert(isinstance(ret_val, CCArgReg))
reg = MachineRegister(ret_val.reg)
reg_node = DagValue(
builder.g.add_register_node(ret_val.loc_vt, reg), 0)
ret_val_node = DagValue(builder.g.add_node(VirtualDagOps.COPY_FROM_REG, [
ret_val.loc_vt, MachineValueType(ValueType.GLUE)], chain, reg_node, glue_val), 0)
glue_val = ret_val_node.get_value(1)
ret_vals.append(ret_val_node)
ret_parts = []
idx = 0
for val_idx, vt in enumerate(return_vts):
reg_vt = reg_info.get_register_type(vt)
reg_count = reg_info.get_register_count(vt)
if reg_count > 1:
raise NotImplementedError()
ret_parts.append(ret_vals[idx])
idx += reg_count
if len(ret_parts) == 0:
return None
return builder.g.add_merge_values(ret_parts)
def allocate_return_x64_cdecl(self, idx, vt: MachineValueType, loc_vt, loc_info, flags: CCArgFlags, ccstate: CallingConvState):
if loc_vt.value_type == ValueType.I1:
loc_vt = MachineValueType(ValueType.I8)
if loc_vt.value_type == ValueType.I8:
regs = [AL, DL, CL]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type == ValueType.I16:
regs = [AX, CX, DX]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type == ValueType.I32:
regs = [EAX, ECX, EDX]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type == ValueType.I64:
regs = [RAX, RCX, RDX]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type in [ValueType.F32, ValueType.F64]:
regs = [XMM0, XMM1, XMM2]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type in [ValueType.V4F32]:
regs = [XMM0, XMM1, XMM2]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
raise NotImplementedError("The type is unsupporting.")
def allocate_return_win64_cdecl(self, idx, vt: MachineValueType, loc_vt, loc_info, flags: CCArgFlags, ccstate: CallingConvState):
if loc_vt.value_type == ValueType.I1:
loc_vt = MachineValueType(ValueType.I8)
if loc_vt.value_type == ValueType.I8:
regs = [AL, DL, CL]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type == ValueType.I16:
regs = [AX, CX, DX]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type == ValueType.I32:
regs = [EAX, ECX, EDX]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type == ValueType.I64:
regs = [RAX, RCX, RDX]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type in [ValueType.F32, ValueType.F64]:
regs = [XMM0, XMM1, XMM2]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type in [ValueType.V4F32]:
regs = [XMM0, XMM1, XMM2]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
raise NotImplementedError("The type is unsupporting.")
def allocate_return(self, idx, vt: MachineValueType, loc_vt, loc_info, flags: CCArgFlags, ccstate: CallingConvState):
target_info = ccstate.mfunc.target_info
if target_info.triple.os == OS.Windows and target_info.is_64bit_mode:
self.allocate_return_win64_cdecl(
idx, vt, loc_vt, loc_info, flags, ccstate)
return
self.allocate_return_x64_cdecl(
idx, vt, loc_vt, loc_info, flags, ccstate)
def allocate_argument_x64_cdecl(self, idx, vt: MachineValueType, loc_vt, loc_info, flags: CCArgFlags, ccstate: CallingConvState):
if loc_vt.value_type in [ValueType.I1, ValueType.I8, ValueType.I16]:
loc_vt = MachineValueType(ValueType.I32)
if loc_vt.value_type == ValueType.I32:
regs = [EDI, ESI, EDX, ECX, R8D, R9D]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type == ValueType.I64:
regs = [RDI, RSI, RDX, RCX, R8, R9]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type in [ValueType.F32, ValueType.F64, ValueType.F128]:
regs = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type in [ValueType.V4F32]:
regs = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type in [ValueType.I32, ValueType.I64]:
stack_offset = ccstate.alloc_stack(8, 8)
ccstate.assign_stack_value(
idx, vt, loc_vt, loc_info, stack_offset, flags)
return False
raise NotImplementedError("The type is unsupporting.")
def allocate_argument_win64_cdecl(self, idx, vt: MachineValueType, loc_vt, loc_info, flags: CCArgFlags, ccstate: CallingConvState):
if loc_vt.value_type in [ValueType.V4F32]:
loc_vt = MachineValueType(ValueType.I64)
loc_info = CCArgLocInfo.Indirect
if loc_vt.value_type == ValueType.I8:
regs1 = [CL, DL, R8B, R9B]
regs2 = [XMM0, XMM1, XMM2, XMM3]
reg = ccstate.alloc_reg_from_list(regs1, regs2)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type == ValueType.I16:
regs1 = [CX, DX, R8W, R9W]
regs2 = [XMM0, XMM1, XMM2, XMM3]
reg = ccstate.alloc_reg_from_list(regs1, regs2)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type == ValueType.I32:
regs1 = [ECX, EDX, R8D, R9D]
regs2 = [XMM0, XMM1, XMM2, XMM3]
reg = ccstate.alloc_reg_from_list(regs1, regs2)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type == ValueType.I64:
regs1 = [RCX, RDX, R8, R9]
regs2 = [XMM0, XMM1, XMM2, XMM3]
reg = ccstate.alloc_reg_from_list(regs1, regs2)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type in [ValueType.F32, ValueType.F64]:
regs1 = [XMM0, XMM1, XMM2, XMM3]
regs2 = [RCX, RDX, R8, R9]
reg = ccstate.alloc_reg_from_list(regs1, regs2)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type in [ValueType.I8, ValueType.I16, ValueType.I32, ValueType.I64, ValueType.F32, ValueType.F64]:
stack_offset = ccstate.alloc_stack(8, 8)
ccstate.assign_stack_value(
idx, vt, loc_vt, loc_info, stack_offset, flags)
return False
raise NotImplementedError("The type is unsupporting.")
def allocate_argument(self, idx, vt: MachineValueType, loc_vt, loc_info, flags: CCArgFlags, ccstate: CallingConvState):
target_info = ccstate.mfunc.target_info
if target_info.triple.os == OS.Windows and target_info.is_64bit_mode:
self.allocate_argument_win64_cdecl(
idx, vt, loc_vt, loc_info, flags, ccstate)
return
self.allocate_argument_x64_cdecl(
idx, vt, loc_vt, loc_info, flags, ccstate)
class X64TargetInstInfo(TargetInstInfo):
def __init__(self):
super().__init__()
def copy_phys_reg(self, src_reg, dst_reg, kill_src, inst: MachineInstruction):
assert(isinstance(src_reg, MachineRegister))
assert(isinstance(dst_reg, MachineRegister))
def is_hreg(reg):
return reg in [AH, BH, CH, DH]
opcode = None
if src_reg.spec in GR64.regs and dst_reg.spec in GR64.regs:
opcode = X64MachineOps.MOV64rr
elif src_reg.spec in GR32.regs and dst_reg.spec in GR32.regs:
opcode = X64MachineOps.MOV32rr
elif src_reg.spec in GR16.regs and dst_reg.spec in GR16.regs:
opcode = X64MachineOps.MOV16rr
elif src_reg.spec in GR8.regs and dst_reg.spec in GR8.regs:
if is_hreg(src_reg.spec) or is_hreg(dst_reg.spec):
opcode = X64MachineOps.MOV8rr
else:
opcode = X64MachineOps.MOV8rr
elif src_reg.spec in VR128.regs and dst_reg.spec in VR128.regs:
opcode = X64MachineOps.MOVAPSrr
elif src_reg.spec in FR64.regs and dst_reg.spec in FR64.regs:
opcode = X64MachineOps.MOVSDrr
elif src_reg.spec in FR32.regs and dst_reg.spec in FR32.regs:
opcode = X64MachineOps.MOVSSrr
elif src_reg.spec in VR128.regs:
if dst_reg.spec in GR64.regs:
opcode = X64MachineOps.MOVPQIto64rr
if not opcode:
raise NotImplementedError(
"Move instructions support GR64 or GR32 at the present time.")
copy_inst = MachineInstruction(opcode)
copy_inst.add_reg(dst_reg, RegState.Define)
if opcode in [X64MachineOps.MOVSSrr, X64MachineOps.MOVSDrr]:
copy_inst.add_reg(dst_reg, RegState.Non)
copy_inst.add_reg(src_reg, RegState.Kill if kill_src else RegState.Non)
copy_inst.insert_after(inst)
return copy_inst
def copy_reg_to_stack(self, reg, stack_slot, regclass, inst: MachineInstruction):
hwmode = inst.mbb.func.target_info.hwmode
tys = regclass.get_types(hwmode)
align = int(regclass.align / 8)
size = tys[0].get_size_in_bits()
size = int(int((size + 7) / 8))
def has_reg_regclass(reg, regclass):
if isinstance(reg, MachineVirtualRegister):
return reg.regclass == regclass
else:
return reg.spec in regclass.regs
if size == 1:
if has_reg_regclass(reg, GR8):
opcode = X64MachineOps.MOV8mr
elif size == 2:
if has_reg_regclass(reg, GR16):
opcode = X64MachineOps.MOV16mr
elif size == 4:
if has_reg_regclass(reg, GR32):
opcode = X64MachineOps.MOV32mr
elif has_reg_regclass(reg, FR32):
opcode = X64MachineOps.MOVSSmr
elif size == 8:
if has_reg_regclass(reg, GR64):
opcode = X64MachineOps.MOV64mr
elif has_reg_regclass(reg, FR64):
opcode = X64MachineOps.MOVSDmr
elif size == 16:
if has_reg_regclass(reg, VR128):
opcode = X64MachineOps.MOVAPSmr
else:
raise NotImplementedError(
"Move instructions support GR64 or GR32 at the present time.")
copy_inst = MachineInstruction(opcode)
noreg = MachineRegister(NOREG)
copy_inst.add_frame_index(stack_slot) # base
copy_inst.add_imm(1) # scale
copy_inst.add_reg(noreg, RegState.Non) # index
copy_inst.add_imm(0) # disp
copy_inst.add_reg(noreg, RegState.Non) # segment
copy_inst.add_reg(reg, RegState.Non)
copy_inst.insert_before(inst)
return copy_inst
def copy_reg_from_stack(self, reg, stack_slot, regclass, inst: MachineInstruction):
hwmode = inst.mbb.func.target_info.hwmode
tys = regclass.get_types(hwmode)
align = int(regclass.align / 8)
size = tys[0].get_size_in_bits()
size = int(int((size + 7) / 8))
def has_reg_regclass(reg, regclass):
if isinstance(reg, MachineVirtualRegister):
return reg.regclass == regclass
else:
return reg.spec in regclass.regs
if size == 1:
if has_reg_regclass(reg, GR8):
opcode = X64MachineOps.MOV8rm
elif size == 2:
if has_reg_regclass(reg, GR16):
opcode = X64MachineOps.MOV16rm
elif size == 4:
if has_reg_regclass(reg, GR32):
opcode = X64MachineOps.MOV32rm
elif has_reg_regclass(reg, FR32):
opcode = X64MachineOps.MOVSSrm
elif size == 8:
if has_reg_regclass(reg, GR64):
opcode = X64MachineOps.MOV64rm
elif has_reg_regclass(reg, FR64):
opcode = X64MachineOps.MOVSDrm
elif size == 16:
if has_reg_regclass(reg, VR128):
opcode = X64MachineOps.MOVAPSrm
else:
raise NotImplementedError(
"Move instructions support GR64 or GR32 at the present time.")
copy_inst = MachineInstruction(opcode)
noreg = MachineRegister(NOREG)
copy_inst.add_reg(reg, RegState.Define)
copy_inst.add_frame_index(stack_slot) # base
copy_inst.add_imm(1) # scale
copy_inst.add_reg(noreg, RegState.Non) # index
copy_inst.add_imm(0) # disp
copy_inst.add_reg(noreg, RegState.Non) # segment
copy_inst.insert_before(inst)
return copy_inst
def calculate_frame_offset(self, func: MachineFunction, idx):
slot_size = 8
frame = func.frame
stack_obj = func.frame.get_stack_object(idx)
frame_lowering = func.target_info.get_frame_lowering()
if idx < 0:
return stack_obj.offset + frame_lowering.frame_spill_size
return stack_obj.offset
def eliminate_frame_index(self, func: MachineFunction, inst: MachineInstruction, idx):
# Analyze the frame index into a base register and a displacement.
operand = inst.operands[idx]
if isinstance(operand, MOFrameIndex):
base_reg = MachineRegister(RBP)
stack_obj = func.frame.get_stack_object(operand.index)
offset = self.calculate_frame_offset(func, operand.index)
inst.operands[idx] = MOReg(base_reg, RegState.Non)
inst.operands[idx + 3] = MOImm(inst.operands[idx + 3].val + offset)
def optimize_compare_inst(self, func: MachineFunction, inst: MachineInstruction):
# Eliminate destination register.
reginfo = func.reg_info
if reginfo.is_use_empty(inst.operands[0].reg):
if inst.opcode == X64MachineOps.SUB8ri:
inst.opcode = X64MachineOps.CMP8ri
elif inst.opcode == X64MachineOps.SUB32ri:
inst.opcode = X64MachineOps.CMP32ri
elif inst.opcode == X64MachineOps.SUB32rm:
inst.opcode = X64MachineOps.CMP32rm
elif inst.opcode == X64MachineOps.SUB32rr:
inst.opcode = X64MachineOps.CMP32rr
elif inst.opcode == X64MachineOps.SUB64rr:
inst.opcode = X64MachineOps.CMP64rr
else:
raise ValueError("Not supporting instruction.")
remove_op = inst.operands[0]
if remove_op.tied_to >= 0:
tied = inst.operands[remove_op.tied_to]
assert(tied.tied_to == 0)
tied.tied_to = -1
inst.remove_operand(0)
def expand_post_ra_pseudo(self, inst: MachineInstruction):
if inst.opcode == X64MachineOps.V_SET0:
inst.opcode = X64MachineOps.XORPSrr
reg_operand = inst.operands[0]
inst.add_reg(reg_operand.reg, RegState.Undef)
inst.add_reg(reg_operand.reg, RegState.Undef)
if inst.opcode == X64MachineOps.MOV32r0:
inst.opcode = X64MachineOps.XOR32rr
reg_operand = inst.operands[0]
inst.add_reg(reg_operand.reg, RegState.Undef)
inst.add_reg(reg_operand.reg, RegState.Undef)
if inst.opcode == X64MachineOps.MOV64r0:
inst.opcode = X64MachineOps.XOR64rr
reg_operand = inst.operands[0]
inst.add_reg(reg_operand.reg, RegState.Undef)
inst.add_reg(reg_operand.reg, RegState.Undef)
def get_super_regs(reg):
assert(isinstance(reg, MachineRegisterDef))
regs = MachineRegisterDef.regs
intersects = {}
for a_reg in regs:
intersects[a_reg] = set()
for a_reg in regs:
for subreg in a_reg.subregs:
intersects[subreg].add(a_reg)
stk = list(intersects[reg])
supers = set()
while len(stk) > 0:
poped = stk.pop()
if poped in supers:
continue
supers.add(poped)
for super_reg in intersects[poped]:
stk.append(super_reg)
return supers
def get_sub_regs(reg):
stk = list(reg.subregs)
subregs = set()
while len(stk) > 0:
poped = stk.pop()
if poped in subregs:
continue
subregs.add(poped)
for subreg in poped.subregs:
stk.append(subreg)
return subregs
def count_if(values, pred):
return len([v for v in values if pred(v)])
def find_if(values, pred):
for i, v in enumerate(values):
if pred(v):
return i
return -1
class X64TargetLowering(TargetLowering):
def __init__(self):
super().__init__()
self.reg_type_for_vt = {MachineValueType(
e): MachineValueType(e) for e in ValueType}
self.reg_type_for_vt[MachineValueType(
ValueType.I1)] = MachineValueType(ValueType.I8)
self.reg_count_for_vt = {MachineValueType(e): 1 for e in ValueType}
def get_reg_for_inline_asm_constraint(self, reg_info, code, vt):
reg, regclass = None, None
def is_gr_class(regclass):
return regclass in [GR8, GR16, GR32, GR64]
def is_fr_class(regclass):
return regclass in [FR32, FR64, VR128]
def get_sub_or_super_reg_for_size(reg, size_in_bits, high=False):
if size_in_bits == 8:
raise NotImplementedError()
elif size_in_bits == 16:
raise NotImplementedError()
elif size_in_bits == 32:
if reg in [CL, CX, ECX, RCX]:
return ECX
elif reg in [DL, DX, EDX, RDX]:
return EDX
raise NotImplementedError()
elif size_in_bits == 64:
if reg in [DIL, DI, EDI, RDI]:
return RDI
raise NotImplementedError()
raise ValueError("Can't found the suitable register")
TABLE = {
"{di}": (DI, GR16),
"{cx}": (CX, GR16),
"{dx}": (DX, GR16)
}
if code in TABLE:
reg, regclass = TABLE[code]
if not reg:
return None
if is_gr_class(regclass):
size = vt.get_size_in_bits()
if size == 8:
rc = GR8
elif size == 16:
rc = GR16
elif size == 32:
rc = GR32
elif size == 64:
rc = GR64
reg = get_sub_or_super_reg_for_size(reg, size)
return reg
else:
raise NotImplementedError()
raise NotImplementedError()
def get_register_type(self, vt):
if vt in self.reg_type_for_vt:
return self.reg_type_for_vt[vt]
raise NotImplementedError()
def get_register_count(self, vt):
if vt in self.reg_count_for_vt:
return self.reg_count_for_vt[vt]
raise NotImplementedError()
def lower_setcc(self, node: DagNode, dag: Dag):
op1 = node.operands[0]
op2 = node.operands[1]
cond = node.operands[2]
is_fcmp = op1.node.value_types[0].value_type in [
ValueType.F32, ValueType.F64]
def compute_condcode(cond):
ty = MachineValueType(ValueType.I8)
swap = False
cond = cond.node.cond
# if is_fcmp:
# if cond in [CondCode.SETOLT, CondCode.SETOLE, CondCode.SETUGT, CondCode.SETUGE]:
# swap = True
# if cond == CondCode.SETOLT:
# cond = CondCode.SETUGE
# elif cond == CondCode.SETOLE:
# cond = CondCode.SETUGT
# elif cond == CondCode.SETUGT:
# cond = CondCode.SETOLE
# elif cond == CondCode.SETUGE:
# cond = CondCode.SETOLT
if cond == CondCode.SETEQ:
node = dag.add_target_constant_node(ty, 4)
elif cond == CondCode.SETNE:
node = dag.add_target_constant_node(ty, 5)
elif cond == CondCode.SETLT:
node = dag.add_target_constant_node(ty, 12)
elif cond == CondCode.SETGT:
node = dag.add_target_constant_node(ty, 15)
elif cond == CondCode.SETLE:
node = dag.add_target_constant_node(ty, 14)
elif cond == CondCode.SETGE:
node = dag.add_target_constant_node(ty, 13)
elif cond in [CondCode.SETULT, CondCode.SETOLT]:
node = dag.add_target_constant_node(ty, 2)
elif cond in [CondCode.SETUGT, CondCode.SETOGT]:
node = dag.add_target_constant_node(ty, 7)
elif cond in [CondCode.SETULE, CondCode.SETOLE]:
node = dag.add_target_constant_node(ty, 6)
elif cond in [CondCode.SETUGE, CondCode.SETOGE]:
node = dag.add_target_constant_node(ty, 3)
else:
raise NotImplementedError()
return node, swap
condcode, swap = compute_condcode(cond)
if swap:
op1, op2 = op2, op1
if is_fcmp:
if cond in [CondCode.SETULT, CondCode.SETUGT, CondCode.SETULE, CondCode.SETUGE]:
op = X64DagOps.UCOMI
else:
op = X64DagOps.COMI
cmp_node = DagValue(dag.add_node(op,
[MachineValueType(ValueType.I32), MachineValueType(ValueType.GLUE)], op1, op2), 0)
else:
op = X64DagOps.SUB
cmp_node = DagValue(dag.add_node(op,
[op1.ty, MachineValueType(ValueType.I32), MachineValueType(ValueType.GLUE)], op1, op2), 1)
# operand 1 is eflags.
setcc_node = dag.add_node(X64DagOps.SETCC, node.value_types,
DagValue(condcode, 0), cmp_node, cmp_node.get_value(cmp_node.index + 1))
return setcc_node
def lower_brcond(self, node: DagNode, dag: Dag):
chain = node.operands[0]
cond = node.operands[1]
dest = node.operands[2]
if cond.node.opcode == VirtualDagOps.SETCC:
cond = DagValue(self.lower_setcc(cond.node, dag), 0)
if cond.node.opcode == X64DagOps.SETCC:
condcode = cond.node.operands[0]
cond = cond.node.operands[1]
else:
if cond.ty == MachineValueType(ValueType.I1):
cond = DagValue(dag.add_node(VirtualDagOps.ZERO_EXTEND, [
MachineValueType(ValueType.I32)], cond), 0)
zero = DagValue(dag.add_constant_node(cond.ty, 0), 0)
condcode = DagValue(dag.add_condition_code_node(CondCode.SETNE), 0)
cond = DagValue(dag.add_node(VirtualDagOps.SETCC, [
MachineValueType(ValueType.I1)], cond, zero, condcode), 0)
cond = DagValue(self.lower_setcc(cond.node, dag), 0)
condcode = cond.node.operands[0]
cond = cond.node.operands[1]
return dag.add_node(X64DagOps.BRCOND, node.value_types, chain, dest, condcode, cond)
def lower_global_address(self, node: DagNode, dag: Dag):
target_address = dag.add_global_address_node(
node.value_types[0], node.value, True)
wrapper_opc = X64DagOps.WRAPPER if node.value.is_thread_local else X64DagOps.WRAPPER_RIP
return dag.add_node(wrapper_opc, node.value_types, DagValue(target_address, 0))
def lower_global_tls_address(self, node: DagNode, dag: Dag):
data_layout = dag.mfunc.func_info.func.module.data_layout
ptr_ty = self.get_pointer_type(data_layout)
global_value = node.value
if dag.mfunc.target_info.machine.options.emulated_tls:
raise NotImplementedError()
if dag.mfunc.target_info.triple.os == OS.Linux:
if global_value.thread_local == ThreadLocalMode.GeneralDynamicTLSModel:
ga = DagValue(dag.add_global_address_node(
ptr_ty, global_value, True), 0)
ops = [dag.entry, ga]
chain = DagValue(dag.add_node(X64DagOps.TLSADDR, [
MachineValueType(ValueType.OTHER)], *ops), 0)
reg_node = DagValue(dag.add_register_node(
ptr_ty, MachineRegister(RAX)), 0)
return dag.add_node(VirtualDagOps.COPY_FROM_REG, [ptr_ty, MachineValueType(
ValueType.OTHER)], chain, reg_node)
raise ValueError("Not supporing TLS model.")
if dag.mfunc.target_info.triple.os == OS.Windows:
ptr = get_constant_null_value(PointerType(i8, 256))
tls_array = DagValue(dag.add_constant_node(ptr_ty, 0x58), 0)
tls_array = DagValue(dag.add_node(
X64DagOps.WRAPPER, node.value_types, tls_array), 0)
thread_ptr = DagValue(dag.add_load_node(
ptr_ty, dag.entry, tls_array, False, ptr_info=MachinePointerInfo(ptr)), 0)
if global_value.thread_local == ThreadLocalMode.LocalExecTLSModel:
raise NotImplementedError()
else:
idx = DagValue(dag.add_external_symbol_node(
ptr_ty, "_tls_index", False), 0)
idx = DagValue(dag.add_node(
X64DagOps.WRAPPER_RIP, node.value_types, idx), 0)
idx = DagValue(dag.add_load_node(
ptr_ty, dag.entry, idx, False), 0)
def log2_uint64_cail(value):
if value == 0:
return 0
value = value - 1
for i in reversed(range(63)):
if (value & (1 << 64)) != 0:
return i
value = value << 1
return 0
scale = DagValue(dag.add_constant_node(
MachineValueType(ValueType.I8), log2_uint64_cail(data_layout.get_pointer_size_in_bits())), 0)
idx = DagValue(dag.add_node(
VirtualDagOps.SHL, [ptr_ty], idx, scale), 0)
thread_ptr = DagValue(dag.add_node(
VirtualDagOps.ADD, [ptr_ty], thread_ptr, idx), 0)
tls_ptr = DagValue(dag.add_load_node(
ptr_ty, dag.entry, thread_ptr, False), 0)
# This value is the offset from the .tls section
target_address = DagValue(dag.add_global_address_node(
node.value_types[0], node.value, True, target_flags=X64OperandFlag.SECREL), 0)
offset = DagValue(dag.add_node(
X64DagOps.WRAPPER, node.value_types, target_address), 0)
return dag.add_node(VirtualDagOps.ADD, [ptr_ty], tls_ptr, offset)
raise NotImplementedError()
def get_pointer_type(self, data_layout, addr_space=0):
return get_int_value_type(data_layout.get_pointer_size_in_bits(addr_space))
def get_frame_index_type(self, data_layout):
return get_int_value_type(data_layout.get_pointer_size_in_bits(0))
def lower_constant_fp(self, node: DagNode, dag: Dag):
assert(isinstance(node, ConstantFPDagNode))
data_layout = dag.mfunc.func_info.func.module.data_layout
ptr_ty = self.get_pointer_type(data_layout)
constant_pool = dag.add_constant_pool_node(ptr_ty, node.value, False)
return dag.add_load_node(node.value_types[0], dag.entry, DagValue(constant_pool, 0), False)
def lower_constant_pool(self, node: DagNode, dag: Dag):
assert(isinstance(node, ConstantPoolDagNode))
target_constant_pool = dag.add_constant_pool_node(
node.value_types[0], node.value, True)
return dag.add_node(X64DagOps.WRAPPER_RIP, node.value_types, DagValue(target_constant_pool, 0))
def lower_build_vector(self, node: DagNode, dag: Dag):
assert(node.opcode == VirtualDagOps.BUILD_VECTOR)
elm = node.operands[0]
all_eq = True
all_constant_fp = True
for operand in node.operands:
if elm.node != operand.node or elm.index != operand.index:
all_eq = False
if not isinstance(elm.node, ConstantFPDagNode):
all_constant_fp = False
elm = operand
operands = []
if all_eq:
if all_constant_fp:
for operand in node.operands:
target_constant_fp = dag.add_target_constant_fp_node(
operand.node.value_types[0], operand.node.value)
operands.append(DagValue(target_constant_fp, 0))
return dag.add_node(VirtualDagOps.BUILD_VECTOR, node.value_types, *operands)
result = self._mm_set_ps1(node.value_types[0], elm, dag)
return result.node
else:
raise NotImplementedError()
def shuffle_param(self, fp3, fp2, fp1, fp0):
return (fp3 << 6) | (fp2 << 4) | (fp1 << 2) | fp0
def get_x86_shuffle_mask_v4(self, mask, dag):
mask_val = self.shuffle_param(mask[3], mask[2], mask[1], mask[0])
return DagValue(dag.add_target_constant_node(MachineValueType(ValueType.I8), mask_val), 0)
def _mm_set_ps1(self, vec_ty, val, dag):
vec = DagValue(dag.add_node(
VirtualDagOps.SCALAR_TO_VECTOR, [vec_ty], val), 0)
param = DagValue(dag.add_target_constant_node(MachineValueType(
ValueType.I8), self.shuffle_param(0, 0, 0, 0)), 0)
return DagValue(dag.add_node(X64DagOps.SHUFP, vec.node.value_types, vec, vec, param), 0)
def lower_insert_vector_elt(self, node: DagNode, dag: Dag):
assert(node.opcode == VirtualDagOps.INSERT_VECTOR_ELT)
vec = node.operands[0]
elem = node.operands[1]
idx = node.operands[2]
if isinstance(idx.node, ConstantDagNode):
elem_vec = DagValue(dag.add_node(
VirtualDagOps.SCALAR_TO_VECTOR, [vec.ty], elem), 0)
num_elems = vec.ty.get_num_vector_elems()
idx_val = idx.node.value
shuffle_idx = []
for i in range(num_elems):
if i == idx_val.value:
shuffle_idx.append(num_elems)
else:
shuffle_idx.append(i)
return dag.add_shuffle_vector(vec.ty, vec, elem_vec, shuffle_idx)
raise ValueError()
def get_scalar_value_for_vec_elem(self, vec, idx, dag: Dag):
if vec.node.opcode == VirtualDagOps.SCALAR_TO_VECTOR and idx == 0:
scalar_val = vec.node.operands[idx]
return scalar_val
raise ValueError()
def lower_shuffle_as_elem_insertion(self, vt, vec1, vec2, mask, dag: Dag):
vec2_idx = find_if(mask, lambda m: m >= len(mask))
elem_vt = vt.get_vector_elem_type()
assert(elem_vt.value_type == ValueType.F32)
vec2_elem = self.get_scalar_value_for_vec_elem(vec2, vec2_idx, dag)
vec2 = DagValue(dag.add_node(
VirtualDagOps.SCALAR_TO_VECTOR, [vt], vec2_elem), 0)
if elem_vt.value_type == ValueType.F32:
opcode = X64DagOps.MOVSS
else:
opcode = X64DagOps.MOVSD
return dag.add_node(opcode, [vt], vec1, vec2)
def lower_shuffle_shufps(self, vt, vec1, vec2, mask, dag: Dag):
assert(len(mask) == 4)
num_vec2_elems = count_if(mask, lambda m: m >= 4)
new_mask = list(mask)
lo_vec, hi_vec = vec1, vec2
if num_vec2_elems == 1:
vec2_idx = find_if(mask, lambda m: m >= 4)
# Each element of the vector is divided into groups of two elements.
# If the index is odd, the index of the other element is even.
vec2_idx_adj = vec2_idx ^ 1
# Merge the vectors.
blend_mask = [mask[vec2_idx] - 4, 0, mask[vec2_idx_adj], 0]
vec2 = DagValue(dag.add_node(X64DagOps.SHUFP, [
vt], vec2, vec1, self.get_x86_shuffle_mask_v4(blend_mask, dag)), 0)
if vec2_idx < 2:
lo_vec = vec2
hi_vec = vec1
else:
lo_vec = vec1
hi_vec = vec2
new_mask[vec2_idx] = 0
new_mask[vec2_idx_adj] = 2
elif num_vec2_elems == 2:
raise NotImplementedError()
return dag.add_node(X64DagOps.SHUFP, [vt], lo_vec, hi_vec, self.get_x86_shuffle_mask_v4(new_mask, dag))
def lower_v4f32_shuffle(self, node: DagNode, dag: Dag):
vec1 = node.operands[0]
vec2 = node.operands[1]
mask = node.mask
num_vec2_elems = count_if(mask, lambda m: m >= 4)
if num_vec2_elems == 1 and mask[0] >= 4:
return self.lower_shuffle_as_elem_insertion(MachineValueType(ValueType.V4F32), vec1, vec2, mask, dag)
return self.lower_shuffle_shufps(MachineValueType(ValueType.V4F32), vec1, vec2, mask, dag)
def lower_shuffle_vector(self, node: DagNode, dag: Dag):
if node.value_types[0] == MachineValueType(ValueType.V4F32):
return self.lower_v4f32_shuffle(node, dag)
raise ValueError()
def lower_sub(self, node: DagNode, dag: Dag):
return dag.add_node(X64DagOps.SUB, node.value_types, *node.operands)
def lower_atomic_fence(self, node: DagNode, dag: Dag):
ordering = node.operands[1].node.value.value
if ordering == AtomicOrdering.SequentiallyConsistent.value:
raise NotImplementedError()
return dag.add_node(X64DagOps.MEMBARRIER, node.value_types, node.operands[0])
def lower_div(self, node: DagNode, dag: Dag):
is_signed = node.opcode == VirtualDagOps.SDIV
divrem_opc = VirtualDagOps.SDIVREM if is_signed else VirtualDagOps.UDIVREM
value_ty = node.value_types[0]
return dag.add_node(divrem_opc, [value_ty, value_ty], *node.operands)
def lower_fp_to_int(self, node: DagNode, dag: Dag):
is_signed = node.opcode == VirtualDagOps.FP_TO_SINT
src = node.operands[0]
value_ty = node.value_types[0]
if src.ty == MachineValueType(ValueType.F64):
value = DagValue(dag.add_node(VirtualDagOps.FP_TO_SINT, [
MachineValueType(ValueType.I64)], *node.operands), 0)
if value.ty == value_ty:
return value.node
return dag.add_node(VirtualDagOps.TRUNCATE, [value_ty], value)
elif src.ty == MachineValueType(ValueType.F32):
value = DagValue(dag.add_node(VirtualDagOps.FP_TO_SINT, [
MachineValueType(ValueType.I32)], *node.operands), 0)
if value.ty == value_ty:
return value.node
return dag.add_node(VirtualDagOps.TRUNCATE, [value_ty], value)
raise NotImplementedError()
def get_unpackl(self, value_ty: MachineValueType, v1: DagValue, v2: DagValue, dag: Dag):
def get_unpack_shuffle_mask(value_ty, lo, unary):
num_elem = value_ty.get_num_vector_elems()
num_elem_in_lane = 128 / value_ty.get_vector_elem_size_in_bits()
mask = []
for i in range(num_elem):
lane_start = int(int(i / num_elem_in_lane) * num_elem_in_lane)
pos = (i % num_elem_in_lane) >> 2 + lane_start
pos += (0 if unary else (num_elem * (i % 2)))
pos += (0 if lo else (num_elem_in_lane >> 1))
mask.append(pos)
return mask
shuffle_idx = get_unpack_shuffle_mask(value_ty, True, False)
return dag.add_shuffle_vector(value_ty, v1, v2, shuffle_idx)
def lower_uint_to_fp(self, node: DagNode, dag: Dag):
data_layout = dag.mfunc.func_info.func.module.data_layout
target_lowering = dag.mfunc.target_info.get_lowering()
ptr_ty = target_lowering.get_frame_index_type(data_layout)
is_signed = node.opcode == VirtualDagOps.FP_TO_SINT
src = node.operands[0]
value_ty = node.value_types[0]
def int_to_double(value):
from struct import unpack, pack
bys = pack("q", value)
return unpack('d', bys)[0]
if src.ty == MachineValueType(ValueType.I32):
src = DagValue(dag.add_node(VirtualDagOps.ZERO_EXTEND, [
MachineValueType(ValueType.I64)], src), 0)
return dag.add_node(VirtualDagOps.SINT_TO_FP, [value_ty], src)
if src.ty == MachineValueType(ValueType.I64) and value_ty == MachineValueType(ValueType.F64):
cv0 = [0x43300000, 0x45300000, 0, 0]
c0 = ConstantVector(cv0, VectorType("v4i32", i32, 4))
cp0 = DagValue(dag.add_constant_pool_node(ptr_ty, c0, align=16), 0)
cv2 = [int_to_double(0x4330000000000000),
int_to_double(0x4530000000000000)]
c2 = ConstantVector(cv2, VectorType("v2f64", f64, 2))
cp2 = DagValue(dag.add_constant_pool_node(ptr_ty, c2, align=16), 0)
src_vec = DagValue(dag.add_node(VirtualDagOps.SCALAR_TO_VECTOR, [
MachineValueType(ValueType.V2I64)], src), 0)
exp_part_vec = DagValue(dag.add_load_node(
MachineValueType(ValueType.V4I32), dag.entry, cp0), 0)
unpack1 = DagValue(dag.add_node(VirtualDagOps.BITCAST, [
MachineValueType(ValueType.V4I32)], src_vec), 0)
unpack1 = DagValue(self.get_unpackl(
unpack1.ty, unpack1, exp_part_vec, dag), 0)
cst_val2 = DagValue(dag.add_load_node(
MachineValueType(ValueType.V2F64), dag.entry, cp2), 0)
unpack1 = DagValue(dag.add_node(VirtualDagOps.BITCAST, [
MachineValueType(ValueType.V2F64)], unpack1), 0)
sub_val = DagValue(dag.add_node(VirtualDagOps.FSUB, [
MachineValueType(ValueType.V2F64)], unpack1, cst_val2), 0)
shuffle_val = DagValue(dag.add_shuffle_vector(
MachineValueType(ValueType.V2F64), unpack1, unpack1, [1, -1]), 0)
add_val = DagValue(dag.add_node(VirtualDagOps.FADD, [
MachineValueType(ValueType.V2F64)], sub_val, shuffle_val), 0)
zero_val = DagValue(dag.add_target_constant_node(
MachineValueType(ValueType.I32), 0), 0)
return dag.add_node(VirtualDagOps.EXTRACT_VECTOR_ELT, [MachineValueType(ValueType.F64)], add_val, zero_val)
return node
def lower(self, node: DagNode, dag: Dag):
if node.opcode == VirtualDagOps.ENTRY:
return dag.entry.node
if node.opcode == VirtualDagOps.BRCOND:
return self.lower_brcond(node, dag)
elif node.opcode == VirtualDagOps.SETCC:
return self.lower_setcc(node, dag)
elif node.opcode == VirtualDagOps.SUB:
return self.lower_sub(node, dag)
elif node.opcode in [VirtualDagOps.SDIV, VirtualDagOps.UDIV]:
return self.lower_div(node, dag)
elif node.opcode in [VirtualDagOps.FP_TO_SINT, VirtualDagOps.FP_TO_UINT]:
return self.lower_fp_to_int(node, dag)
elif node.opcode == VirtualDagOps.UINT_TO_FP:
return self.lower_uint_to_fp(node, dag)
elif node.opcode == VirtualDagOps.GLOBAL_ADDRESS:
return self.lower_global_address(node, dag)
elif node.opcode == VirtualDagOps.GLOBAL_TLS_ADDRESS:
return self.lower_global_tls_address(node, dag)
elif node.opcode == VirtualDagOps.CONSTANT_FP:
return self.lower_constant_fp(node, dag)
elif node.opcode == VirtualDagOps.CONSTANT_POOL:
return self.lower_constant_pool(node, dag)
elif node.opcode == VirtualDagOps.BUILD_VECTOR:
return self.lower_build_vector(node, dag)
elif node.opcode == VirtualDagOps.SHUFFLE_VECTOR:
return self.lower_shuffle_vector(node, dag)
elif node.opcode == VirtualDagOps.INSERT_VECTOR_ELT:
return self.lower_insert_vector_elt(node, dag)
elif node.opcode == VirtualDagOps.ATOMIC_FENCE:
return self.lower_atomic_fence(node, dag)
else:
return node
def lower_arguments(self, func: Function, builder: DagBuilder):
arg_load_chains = []
chain = builder.root
mfunc = builder.mfunc
calling_conv = mfunc.target_info.get_calling_conv()
reg_info = mfunc.target_info.get_register_info()
data_layout = func.module.data_layout
target_lowering = mfunc.target_info.get_lowering()
ptr_ty = target_lowering.get_frame_index_type(data_layout)
args = []
for i, arg in enumerate(func.args):
vts = compute_value_types(arg.ty, data_layout)
offset_in_arg = 0
for val_idx, vt in enumerate(vts):
reg_vt = reg_info.get_register_type(vt)
reg_count = reg_info.get_register_count(vt)
for reg_idx in range(reg_count):
flags = CCArgFlags()
args.append(CallingConvArg(
vt, reg_vt, i, offset_in_arg, flags))
offset_in_arg += reg_vt.get_size_in_byte()
ccstate = CallingConvState(calling_conv, mfunc)
ccstate.compute_arguments_layout(args)
arg_vals = []
for arg_val in ccstate.values:
arg_vt = arg_val.loc_vt
if isinstance(arg_val, CCArgReg):
if arg_vt.value_type == ValueType.I8:
regclass = GR8
elif arg_vt.value_type == ValueType.I16:
regclass = GR16
elif arg_vt.value_type == ValueType.I32:
regclass = GR32
elif arg_vt.value_type == ValueType.I64:
regclass = GR64
elif arg_vt.value_type == ValueType.F32:
regclass = FR32
elif arg_vt.value_type == ValueType.F64:
regclass = FR64
elif arg_vt.value_type == ValueType.V4F32:
regclass = VR128
else:
raise ValueError()
reg = mfunc.reg_info.create_virtual_register(regclass)
mfunc.reg_info.add_live_in(MachineRegister(arg_val.reg), reg)
reg_node = DagValue(
builder.g.add_register_node(arg_vt, reg), 0)
arg_val_node = DagValue(
builder.g.add_copy_from_reg_node(arg_vt, reg_node), 0)
else:
assert(isinstance(arg_val, CCArgMem))
size = arg_vt.get_size_in_byte()
offset = arg_val.offset
frame_idx = builder.mfunc.frame.create_fixed_stack_object(
size, offset + 32)
frame_idx_node = DagValue(
builder.g.add_frame_index_node(ptr_ty, frame_idx), 0)
arg_val_node = DagValue(builder.g.add_load_node(
arg_vt, builder.root, frame_idx_node, False), 0)
if arg_val.loc_info == CCArgLocInfo.Indirect:
arg_val_node = DagValue(builder.g.add_load_node(
arg_val.vt, builder.root, arg_val_node, False), 0)
arg_vals.append(arg_val_node)
arg_idx = 0
for i, arg in enumerate(func.args):
vts = compute_value_types(arg.ty, data_layout)
offset_in_arg = 0
arg_parts = []
for val_idx, vt in enumerate(vts):
reg_vt = reg_info.get_register_type(vt)
reg_count = reg_info.get_register_count(vt)
if reg_count > 1:
raise NotImplementedError()
arg_parts.append(arg_vals[arg_idx])
arg_idx += reg_count
val = builder.g.add_merge_values(arg_parts)
if val.node.opcode == VirtualDagOps.COPY_FROM_REG:
reg = val.node.operands[1].node.reg
if isinstance(reg, MachineVirtualRegister):
builder.func_info.reg_value_map[arg] = [reg]
else:
reg_info = builder.reg_info
for ty, arg_part in zip(vts, arg_parts):
reg_vt = reg_info.get_register_type(ty)
reg_count = reg_info.get_register_count(ty)
regs = []
reg_vals = []
for idx in range(reg_count):
vreg = target_lowering.get_machine_vreg(
reg_vt)
reg = builder.mfunc.reg_info.create_virtual_register(
vreg)
regs.append(reg)
reg_vals.append(
DagValue(builder.g.add_register_node(reg_vt, reg), 0))
chain = get_copy_to_parts(
arg_part, reg_vals, reg_vt, chain, builder.g)
builder.func_info.reg_value_map[arg] = regs
builder.set_inst_value(arg, val)
builder.root = chain
has_demote_arg = len(func.args) > 0 and func.args[0].has_attribute(
AttributeKind.StructRet)
if has_demote_arg:
demote_arg = func.args[0]
builder.func_info.sret_reg = builder.func_info.reg_value_map[demote_arg]
else:
builder.func_info.sret_reg = None
# builder.root = DagValue(DagNode(VirtualDagOps.TOKEN_FACTOR, [
# MachineValueType(ValueType.OTHER)], arg_load_chains), 0)
def is_frame_op(self, inst):
if inst.opcode == X64MachineOps.ADJCALLSTACKDOWN32:
return True
if inst.opcode == X64MachineOps.ADJCALLSTACKUP32:
return True
return False
def lower_prolog(self, func: MachineFunction, bb: MachineBasicBlock):
inst_info = func.target_info.get_inst_info()
frame_info = func.target_info.get_frame_lowering()
reg_info = func.target_info.get_register_info()
data_layout = func.func_info.func.module.data_layout
front_inst = bb.insts[0]
push_rbp_inst = MachineInstruction(X64MachineOps.PUSH64r)
push_rbp_inst.add_reg(MachineRegister(RBP), RegState.Non)
push_rbp_inst.add_reg(MachineRegister(RSP), RegState.ImplicitDefine)
push_rbp_inst.insert_before(front_inst)
mov_esp_inst = MachineInstruction(X64MachineOps.MOV64rr)
mov_esp_inst.add_reg(MachineRegister(RBP), RegState.Define) # To
mov_esp_inst.add_reg(MachineRegister(RSP), RegState.Non) # From
mov_esp_inst.insert_before(front_inst)
# The stack and base pointer is aligned by 16 bytes here.
for cs_info in func.frame.calee_save_info:
reg = cs_info.reg
regclass = reg_info.get_regclass_from_reg(reg)
frame_idx = cs_info.frame_idx
inst_info.copy_reg_to_stack(MachineRegister(
reg), frame_idx, regclass, front_inst)
stack_size = func.frame.estimate_stack_size(
X64MachineOps.ADJCALLSTACKDOWN32, X64MachineOps.ADJCALLSTACKUP32)
max_align = max(func.frame.max_alignment, func.frame.stack_alignment)
stack_size = int(
int((stack_size + max_align - 1) / max_align) * max_align)
sub_esp_inst = MachineInstruction(X64MachineOps.SUB64ri)
sub_esp_inst.add_reg(MachineRegister(RSP), RegState.Define)
sub_esp_inst.add_reg(MachineRegister(RSP), RegState.Non)
sub_esp_inst.add_imm(stack_size)
sub_esp_inst.insert_before(front_inst)
def lower_epilog(self, func: MachineFunction, bb: MachineBasicBlock):
inst_info = func.target_info.get_inst_info()
reg_info = func.target_info.get_register_info()
data_layout = func.func_info.func.module.data_layout
front_inst = bb.insts[-1]
for cs_info in func.frame.calee_save_info:
reg = cs_info.reg
regclass = reg_info.get_regclass_from_reg(reg)
frame_idx = cs_info.frame_idx
inst_info.copy_reg_from_stack(MachineRegister(
reg), frame_idx, regclass, front_inst)
restore_esp_inst = MachineInstruction(X64MachineOps.MOV64rr)
restore_esp_inst.add_reg(MachineRegister(RSP), RegState.Define) # To
restore_esp_inst.add_reg(MachineRegister(RBP), RegState.Non) # From
restore_esp_inst.insert_before(front_inst)
pop_rbp_inst = MachineInstruction(X64MachineOps.POP64r)
pop_rbp_inst.add_reg(MachineRegister(RBP), RegState.Non)
pop_rbp_inst.add_reg(MachineRegister(RSP), RegState.ImplicitDefine)
pop_rbp_inst.insert_before(front_inst)
def eliminate_call_frame_pseudo_inst(self, func, inst: MachineInstruction):
inst.remove()
def get_machine_vreg(self, ty: MachineValueType):
if ty.value_type == ValueType.I1:
return GR8
elif ty.value_type == ValueType.I8:
return GR8
elif ty.value_type == ValueType.I16:
return GR16
elif ty.value_type == ValueType.I32:
return GR32
elif ty.value_type == ValueType.I64:
return GR64
elif ty.value_type == ValueType.F32:
return FR32
elif ty.value_type == ValueType.F64:
return FR64
elif ty.value_type == ValueType.V4F32:
return VR128
raise NotImplementedError()
def lower_optimal_memory_op(self, size, src_op, dst_op, src_align, dst_align, builder: DagBuilder):
chain = builder.root
is_volatile = False
offset = 0
chains = []
while offset < size:
copy_size = min(4, size - offset)
copy_ty = MachineValueType(ValueType.I32)
if offset != 0:
src_ty = src_op.ty
size_node = DagValue(
builder.g.add_target_constant_node(src_ty, offset), 0)
src_ptr = DagValue(builder.g.add_node(
VirtualDagOps.ADD, [src_ty], src_op, size_node), 0)
else:
src_ptr = src_op
if offset != 0:
dst_ty = dst_op.ty
size_node = DagValue(
builder.g.add_target_constant_node(dst_ty, offset), 0)
dst_ptr = DagValue(builder.g.add_node(
VirtualDagOps.ADD, [dst_ty], dst_op, size_node), 0)
else:
dst_ptr = dst_op
load_op = builder.g.add_load_node(
copy_ty, chain, src_ptr, is_volatile)
store_op = builder.g.add_store_node(
chain, dst_ptr, DagValue(load_op, 0))
chains.extend([DagValue(store_op, 0)])
offset += copy_size
builder.root = DagValue(builder.g.add_node(VirtualDagOps.TOKEN_FACTOR, [
MachineValueType(ValueType.OTHER)], *chains), 0)
class X64TargetRegisterInfo(TargetRegisterInfo):
def __init__(self, target_info):
super().__init__()
self.target_info = target_info
self.triple = target_info.triple
def get_reserved_regs(self):
reserved = []
reserved.extend([SPL, BPL])
reserved.extend([SP, BP])
reserved.extend([ESP, EBP])
reserved.extend([RSP, RBP])
return reserved
@property
def allocatable_regs(self):
regs = set()
regs |= set(GR64.regs)
regs |= set(GR32.regs)
regs |= set(GR16.regs)
regs |= set(GR8.regs)
regs |= set(FR32.regs)
regs |= set(FR64.regs)
regs |= set(VR128.regs)
return regs
def get_callee_saved_regs(self):
if self.triple.arch == ArchType.X86_64:
if self.triple.os == OS.Windows:
return [RBX, RDI, RSI, R12, R13, R14, R15, XMM6,
XMM7, XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15]
elif self.triple.os == OS.Linux:
return [RBX, R12, R13, R14, R15, XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15]
raise Exception("Unsupporting architecture.")
def get_callee_clobbered_regs(self):
if self.triple.arch == ArchType.X86_64:
if self.triple.os == OS.Windows:
return [RAX, RCX, RDX, R8, R9, R10, R11,
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5]
elif self.triple.os == OS.Linux:
return [RAX, RDI, RSI, RCX, RDX, R8, R9, R10, R11,
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]
raise Exception("Unsupporting architecture.")
def get_ordered_regs(self, regclass):
reserved_regs = self.get_reserved_regs()
free_regs = set(regclass.regs) - set(reserved_regs)
return [reg for reg in regclass.regs if reg in free_regs]
def get_regclass_for_vt(self, vt):
hwmode = self.target_info.hwmode
for regclass in x64_regclasses:
tys = regclass.get_types(hwmode)
if vt in tys:
return regclass
raise ValueError("Could not find the register class.")
class X64FrameLowering(TargetFrameLowering):
def __init__(self, alignment):
super().__init__(alignment)
self.frame_spill_size = 16
@property
def stack_grows_direction(self):
return StackGrowsDirection.Down
class X64Legalizer(Legalizer):
def __init__(self):
super().__init__()
def promote_integer_result_setcc(self, node, dag, legalized):
lhs = get_legalized_op(node.operands[0], legalized)
rhs = get_legalized_op(node.operands[1], legalized)
cond = node.operands[2]
setcc_ty = MachineValueType(ValueType.I8)
return dag.add_node(node.opcode, [setcc_ty], lhs, rhs, cond)
def promote_integer_result_bin(self, node, dag, legalized):
lhs = get_legalized_op(node.operands[0], legalized)
rhs = get_legalized_op(node.operands[1], legalized)
assert(lhs.ty == rhs.ty)
return dag.add_node(node.opcode, [lhs.ty], lhs, rhs)
def promote_integer_result_truncate(self, node, dag, legalized):
new_vt = MachineValueType(ValueType.I8)
return dag.add_node(VirtualDagOps.TRUNCATE, [new_vt], *node.operands)
def promote_integer_result_constant(self, node, dag, legalized):
new_vt = MachineValueType(ValueType.I8)
return dag.add_constant_node(new_vt, node.value)
return dag.add_node(VirtualDagOps.ZERO_EXTEND, [new_vt], DagValue(node, 0))
def promote_integer_result(self, node, dag, legalized):
if node.opcode == VirtualDagOps.SETCC:
return self.promote_integer_result_setcc(node, dag, legalized)
elif node.opcode in [VirtualDagOps.ADD, VirtualDagOps.SUB, VirtualDagOps.AND, VirtualDagOps.OR, VirtualDagOps.XOR]:
return self.promote_integer_result_bin(node, dag, legalized)
elif node.opcode == VirtualDagOps.TRUNCATE:
return self.promote_integer_result_truncate(node, dag, legalized)
elif node.opcode in [VirtualDagOps.LOAD]:
chain = node.operands[0]
ptr = get_legalized_op(node.operands[1], legalized)
return dag.add_load_node(MachineValueType(ValueType.I8), chain, ptr, False, mem_operand=node.mem_operand)
elif node.opcode == VirtualDagOps.CONSTANT:
return self.promote_integer_result_constant(node, dag, legalized)
else:
raise ValueError("No method to promote.")
def legalize_node_result(self, node: DagNode, dag: Dag, legalized):
for vt in node.value_types:
if vt.value_type == ValueType.I1:
return self.promote_integer_result(node, dag, legalized)
return node
def promote_integer_operand_brcond(self, node, dag: Dag, legalized):
chain_op = node.operands[0]
cond_op = get_legalized_op(node.operands[1], legalized)
dst_op = node.operands[2]
return dag.add_node(VirtualDagOps.BRCOND, node.value_types, chain_op, cond_op, dst_op)
def promote_integer_operand_zext(self, node, dag: Dag, legalized):
src_op = get_legalized_op(node.operands[0], legalized)
if src_op.ty == node.value_types[0]:
return src_op.node
return dag.add_node(VirtualDagOps.TRUNCATE, node.value_types, src_op)
def promote_integer_operand_uint_to_fp(self, node, dag: Dag, legalized):
src_op = get_legalized_op(node.operands[0], legalized)
if src_op.ty == MachineValueType(ValueType.I16):
promoted_ty = MachineValueType(ValueType.I32)
else:
raise NotImplementedError()
promoted = DagValue(dag.add_node(
VirtualDagOps.ZERO_EXTEND, [promoted_ty], src_op), 0)
return dag.add_node(VirtualDagOps.UINT_TO_FP, node.value_types, promoted)
def promote_integer_operand_sint_to_fp(self, node, dag: Dag, legalized):
src_op = get_legalized_op(node.operands[0], legalized)
if src_op.ty == MachineValueType(ValueType.I16):
promoted_ty = MachineValueType(ValueType.I32)
else:
raise NotImplementedError()
promoted = DagValue(dag.add_node(
VirtualDagOps.SIGN_EXTEND, [promoted_ty], src_op), 0)
return dag.add_node(VirtualDagOps.SINT_TO_FP, node.value_types, promoted)
def legalize_node_operand(self, node, i, dag: Dag, legalized):
operand = node.operands[i]
vt = operand.ty
if vt.value_type == ValueType.I1:
if node.opcode == VirtualDagOps.BRCOND:
return self.promote_integer_operand_brcond(
node, dag, legalized)
if node.opcode == VirtualDagOps.ZERO_EXTEND:
return self.promote_integer_operand_zext(
node, dag, legalized)
if node.opcode == VirtualDagOps.STORE:
op_chain = node.operands[0]
op_val = get_legalized_op(node.operands[1], legalized)
op_ptr = node.operands[2]
return dag.add_store_node(op_chain, op_ptr, op_val, False, mem_operand=node.mem_operand)
if vt.value_type == ValueType.I16:
if node.opcode == VirtualDagOps.SINT_TO_FP:
return self.promote_integer_operand_sint_to_fp(
node, dag, legalized)
if node.opcode == VirtualDagOps.UINT_TO_FP:
return self.promote_integer_operand_uint_to_fp(
node, dag, legalized)
return None
class X64TargetInfo(TargetInfo):
def __init__(self, triple, machine):
super().__init__(triple)
self.machine = machine
def get_inst_info(self) -> TargetInstInfo:
return X64TargetInstInfo()
def is_64bit_mode(self):
return self.triple.arch == ArchType.X86_64
def get_lowering(self) -> TargetLowering:
return X64TargetLowering()
def get_register_info(self) -> TargetRegisterInfo:
return X64TargetRegisterInfo(self)
def get_calling_conv(self) -> CallingConv:
return X86CallingConv()
def get_instruction_selector(self):
return X64InstructionSelector()
def get_legalizer(self):
return X64Legalizer()
def get_frame_lowering(self) -> TargetFrameLowering:
return X64FrameLowering(16)
@property
def hwmode(self) -> MachineHWMode:
if self.triple.arch == ArchType.X86_64:
return X64
raise ValueError("Invalid arch type")
class X64TargetMachine(TargetMachine):
def __init__(self, triple, options):
super().__init__(options)
self.triple = triple
def get_target_info(self, func: Function):
return X64TargetInfo(self.triple, self)
def add_mc_emit_passes(self, pass_manager, mccontext, output, is_asm):
from codegen.x64_asm_printer import X64AsmInfo, MCAsmStream, X64CodeEmitter, X64AsmBackend, X64AsmPrinter, X64IntelInstPrinter
from codegen.coff import WinCOFFObjectWriter, WinCOFFObjectStream
from codegen.elf import ELFObjectStream, ELFObjectWriter, X64ELFObjectWriter
objformat = self.triple.objformat
mccontext.asm_info = X64AsmInfo()
if is_asm:
printer = X64IntelInstPrinter()
stream = MCAsmStream(mccontext, output, printer)
else:
emitter = X64CodeEmitter()
backend = X64AsmBackend()
if objformat == ObjectFormatType.COFF:
writer = WinCOFFObjectWriter(output)
stream = WinCOFFObjectStream(
mccontext, backend, writer, emitter)
elif objformat == ObjectFormatType.ELF:
target_writer = X64ELFObjectWriter()
writer = ELFObjectWriter(output, target_writer)
stream = ELFObjectStream(mccontext, backend, writer, emitter)
pass_manager.passes.append(X64AsmPrinter(stream))
|
nilq/baby-python
|
python
|
import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(
name='pylateral',
version='1.0.0',
description='Intuitive multi-threaded task processing in python.',
long_description=README,
long_description_content_type="text/markdown",
url='https://boxysean.github.io/pylateral/',
author='Sean McIntyre',
author_email='boxysean@gmail.com',
license='MIT',
tests_require=[
'pytest',
],
packages=find_packages(),
)
|
nilq/baby-python
|
python
|
import numpy as np
class BaseAgent:
def __init__(self, name, environment=None):
self.name = name
self.environment = environment
def choose_action(self):
action = np.random.choice(self.environment.valid_actions)
pawn_actions = [a for a in self.environment.valid_actions if a < 12]
action = action = np.random.choice(pawn_actions)
print("Choosing action {action}".format(action=action))
return action
|
nilq/baby-python
|
python
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Updates ExtensionPermission2 enum in histograms.xml file with values read
from permission_message.h.
If the file was pretty-printed, the updated version is pretty-printed too.
"""
import os
from update_histogram_enum import UpdateHistogramEnum
if __name__ == '__main__':
UpdateHistogramEnum(histogram_enum_name='ExtensionPermission2',
source_enum_path=os.path.join('..', '..', '..',
'extensions', 'common',
'permissions',
'permission_message.h'),
start_marker='^enum ID {',
end_marker='^kEnumBoundary')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
import pkg_resources
import pytest
import torch
from flsim.common.pytest_helper import assertEqual
from flsim.data.csv_dataset import FLCSVDataset
from flsim.data.data_sharder import ColumnSharderConfig
from flsim.data.dataset_data_loader import FLDatasetDataLoaderWithBatch
from hydra.utils import instantiate
@pytest.fixture(scope="class")
def prepare_dataset_data_loader_with_batch(request):
request.cls.test_csv_path = "test_resources/data.csv"
request.cls.total_data_count = 15
request.cls.train_batch_size = 1
request.cls.eval_batch_size = 3
request.cls.test_batch_size = 5
class TestDataset(FLCSVDataset):
def _get_processed_row_from_single_raw_row(self, raw_row: Any) -> Dict[str, Any]:
return {
"userid": torch.Tensor([raw_row["userid"]]),
"label": torch.Tensor([raw_row["label"]]),
}
@pytest.mark.usefixtures("prepare_dataset_data_loader_with_batch")
class TestDatasetDataLoaderWithBatch:
def test_batch_size(self) -> None:
# pyre-ignore[16]: for pytest fixture
file_path = pkg_resources.resource_filename(__name__, self.test_csv_path)
dataset = TestDataset(file_path)
fl_data_sharder = instantiate(ColumnSharderConfig(sharding_col="userid"))
data_loader = FLDatasetDataLoaderWithBatch(
dataset,
dataset,
dataset,
fl_data_sharder,
# pyre-ignore[16]: for pytest fixture
self.train_batch_size,
# pyre-ignore[16]: for pytest fixture
self.eval_batch_size,
# pyre-ignore[6]
self.test_batch_size,
)
assertEqual(
len(list(data_loader.fl_train_set())),
# pyre-ignore[16]: for pytest fixture
self.total_data_count / self.train_batch_size,
)
assertEqual(
len(list(data_loader.fl_eval_set())),
self.total_data_count / self.eval_batch_size,
)
assertEqual(
len(list(data_loader.fl_test_set())),
self.total_data_count / self.test_batch_size,
)
|
nilq/baby-python
|
python
|
import click
import json
from pathlib import Path
from PIL import Image
import pycountry
import re
import shlex
import subprocess
import time
import traceback
import youtube_dl
SUB_LANGUAGES = ['en', 'en-US', 'en-UK', 'en-us', 'en-uk', 'de', 'de-DE', 'de-de', 'un']
ytops = {
'outtmpl': '{}.%(ext)s',
'hls_prefer_native': True,
'nocheckcertificate': True,
'writesubtitles': True,
'subtitleslangs': SUB_LANGUAGES,
'subtitlesformat': 'srt/vtt',
'keepvideo': True,
'skip_unavailable_fragments': False,
'writethumbnail': True,
'fixup': 'never',
'socket_timeout': 10,
'postprocessors': [
{
'key': 'FFmpegSubtitlesConvertor',
'format': 'srt',
},
],
}
ytdl = youtube_dl.YoutubeDL(ytops)
@click.command()
@click.argument('file', type=click.Path(exists=True))
@click.option('-o', '--output',
default='.',
type=click.Path(file_okay=False))
@click.option('-v', '--verbose', is_flag=True)
def main(file, output, verbose):
file_path = Path(file)
output_path = Path(output)
with file_path.open('r') as f:
links = f.read().splitlines()
done_path = file_path.with_suffix('.done')
if not done_path.is_file():
done_path.touch()
with done_path.open('r') as f:
done = f.read().splitlines()
done = [int(i) for i in done if i.isdigit()]
path_forced_info = file_path.with_suffix('.forced.json')
if path_forced_info.is_file():
with path_forced_info.open('r') as f:
forced_info = json.load(f)
else:
forced_info = {}
for index, link in enumerate(links):
if index in done:
continue
if not link or not link.startswith('http'):
continue
print('[Queue] Processing link #{}'.format(index))
# path_video = Path('{}.mp4'.format(index))
# path_subs = Path('{}.en.srt'.format(index))
# path_thumb_jpg = Path('{}.jpg'.format(index))
# path_thumb_png = Path('{}.png'.format(index))
# if path_video.is_file() and path_subs.is_file() and (path_thumb_jpg.is_file() or path_thumb_png.is_file()):
# print('[Queue] All files for #{} already exist, proceed to muxing')
# else:
result = False
while not result:
info = download(index, link, verbose)
if info:
info = {**info, **forced_info}
result = mux(index, info, output_path)
if result:
with done_path.open('a+') as f:
f.write(str(index) + '\n')
# source_files = Path('.').glob('{}.*'.format(id))
# for file in source_files:
# file.unlink()
def download(id, url, verbose=False, rewrite_info=False):
ytdl = youtube_dl.YoutubeDL(ytops)
ytdl.params['outtmpl'] = ytops['outtmpl'].format(id)
try:
info_path = Path('{}.info.json'.format(id))
if not rewrite_info and info_path.is_file():
with info_path.open('r') as f:
info = json.load(f)
else:
info = ytdl.extract_info(url, download=False)
with info_path.open('w') as f:
json.dump(info, f)
ytdl.process_info(info)
except:
if verbose:
traceback.print_exc()
print("[Queue] Download of item #{} failed. Try again in 30".format(id))
time.sleep(30)
return None
return info
def alpha3(alpha2):
return pycountry.languages.get(alpha_2=alpha2[0:2]).alpha_3
def mux(id, info, output):
fix_aac = False
paths_video = []
paths_audio = []
if 'requested_formats' in info and info['requested_formats']:
for format in info['requested_formats']:
path = Path('{}.f{}.{}'.format(id, format['format_id'], format['ext']))
if format['vcodec'] != 'none':
paths_video.append(path)
if format['acodec'] != 'none':
lang = format.get('language') or info.get('language') or 'en'
paths_audio.append((path if path not in paths_video else None, alpha3(lang)))
if format['acodec'].startswith('mp4a'):
fix_aac = True
else:
paths_video.append(Path('{}.{}'.format(id, info['ext'])))
paths_audio.append((None, info.get('language') or 'en'))
if 'acodec' in info and info['acodec'].startswith('mp4a'):
fix_aac = True
if not paths_video and not paths_audio:
print('[Queue] Muxing failed because no video/audio files were found.')
return False
# Determine which subtitles are available for muxing
available_subs = [i for i in info['subtitles'] if i in SUB_LANGUAGES] if 'subtitles' in info else []
paths_sub = []
for lang in available_subs:
path_sub = Path('{}.{}.vtt'.format(id, lang))
if not path_sub.is_file():
path_sub = Path('{}.{}.srt'.format(id, lang))
if path_sub.is_file():
pass #fix_srt(path_sub)
else:
path_sub = None
if path_sub:
if lang == 'un':
lang = 'en'
paths_sub.append((path_sub, alpha3(lang)))
# Check if thumbnail was downloaded
orig_path_thumb = Path('{}.jpg'.format(id))
if not orig_path_thumb.is_file():
orig_path_thumb = Path('{}.png'.format(id))
if not orig_path_thumb.is_file():
orig_path_thumb = None
# Check if the thumbnail's resolution is high enough
if orig_path_thumb:
with Image.open(orig_path_thumb) as img:
if img.size[1] < 480:
orig_path_thumb = None
# Determine mime type of thumbnail
if orig_path_thumb:
if orig_path_thumb.suffix == '.jpg':
thumb_mime = 'image/jpeg'
elif orig_path_thumb.suffix == '.png':
thumb_mime = 'image/png'
else:
orig_path_thumb = None
# Rename thumbnail file to 'thumbnail'
if orig_path_thumb:
path_thumb = Path('thumbnail{}'.format(orig_path_thumb.suffix))
orig_path_thumb.rename(path_thumb)
else:
path_thumb = None
if 'movie' in info and info['movie'] == True:
title = info['title']
path_final = Path('{}.mkv'.format(title))
else:
if 'episode' not in info or not info['episode']:
if 'title' in info and info['title']:
info['episode'] = info['title']
else:
info['episode'] = 'EPISODE'
if 'season_number' not in info or not info['season_number']:
print('\a')
info['season_number'] = click.prompt(
'No season number found. Please specify for episode "{}"'.format(info['episode']),
default=0)
r = re.match(r'(?:(?:Episode|Folge|Part) )*(?P<nr>\d+)(?:/\d)*', info['episode'])
if r and info['title'] != info['episode']:
info['episode'] = info['title']
if 'series' not in info or not info['series']:
info['series'] = 'SERIES'
info['episode'] = re.sub(r'( \(?\d+/\d+\)?)$', '', info['episode'])
info['episode'] = info['episode'].replace('Season {}'.format(info['season_number']), '')
info['episode'] = info['episode'].replace(info['series'], '')
info['episode'] = info['episode'].replace(' - ', '')
info['episode'] = info['episode'].strip()
if 'episode_number' not in info or not info['episode_number']:
if r and r['nr'].isdigit():
info['episode_number'] = int(r['nr'])
else:
print('\a')
info['episode_number'] = click.prompt(
'No episode number found. Please specify for episode "{}"'.format(info['episode']),
default=100 + id)
if 'episode_offset' in info and info['episode_offset'] is not None:
info['episode_number'] += info['episode_offset']
title = '{series} - {season_number}x{episode_number:02d} - {episode}'.format(**info)
path_final = Path('{}/{}/{}.mkv'.format(output.absolute(), info['season_number'], title))
if path_final.is_file():
path_final.unlink()
path_final.parent.mkdir(exist_ok=True, parents=True)
cmd = 'ffmpeg'
for path_video in paths_video:
cmd += ' -i {}'.format(shlex.quote(str(path_video.absolute())))
for path_audio in paths_audio:
if path_audio[0]:
cmd += ' -i {}'.format(shlex.quote(str(path_audio[0].absolute())))
if paths_sub:
for path_sub in paths_sub:
cmd += ' -i {}'.format(shlex.quote(str(path_sub[0].absolute())))
chapters = info.get('chapters', [])
if chapters:
path_chapters = Path('{}.meta'.format(id))
with path_chapters.open('w') as f:
def ffmpeg_escape(txt):
return re.sub(r'(=|;|#|\\|\n)', r'\\\1', txt)
metadata_file_content = ';FFMETADATA1\n'
for chapter in chapters:
metadata_file_content += '[CHAPTER]\nTIMEBASE=1/1000\n'
metadata_file_content += 'START=%d\n' % (chapter['start_time'] * 1000)
metadata_file_content += 'END=%d\n' % (chapter['end_time'] * 1000)
chapter_title = chapter.get('title')
if chapter_title:
metadata_file_content += 'title=%s\n' % ffmpeg_escape(chapter_title)
f.write(metadata_file_content)
cmd += ' -i "{}" -map_metadata 1'.format(path_chapters.absolute())
cmd += ' -c:v copy -bsf:v "filter_units=remove_types=6" -c:a copy'
if fix_aac:
cmd += ' -bsf:a aac_adtstoasc'
if paths_sub:
cmd += ' -c:s copy'
for index, path_video in enumerate(paths_video):
cmd += ' -disposition:v:{} +default'.format(index)
for index, path_audio in enumerate(paths_audio):
cmd += ' -metadata:s:a:{} language={}'.format(index, path_audio[1])
cmd += ' -disposition:a:{} +default'.format(index)
if paths_sub:
for index, path_sub in enumerate(paths_sub):
cmd += ' -metadata:s:s:{} language={}'.format(index, path_sub[1])
if path_thumb:
cmd += ' -attach {} -metadata:s:t mimetype={}'.format(shlex.quote(str(path_thumb.absolute())), thumb_mime)
cmd += ' -metadata title={}'.format(shlex.quote(title))
if 'description' in info:
cmd += ' -metadata description={}'.format(shlex.quote(info['description']))
cmd += ' -metadata comment={}'.format(shlex.quote(info['description']))
cmd += ' -metadata summary={}'.format(shlex.quote(info['description']))
cmd += ' -metadata synopsis={}'.format(shlex.quote(info['description']))
cmd += ' -y {}'.format(shlex.quote(str(path_final.absolute())))
print('[Queue] Mux #{}: "{}"'.format(id, cmd))
proc = subprocess.run(cmd, shell=True)
if path_thumb:
path_thumb.rename(orig_path_thumb)
if proc.returncode != 0:
print('[Queue]] Muxing #{} failed.'.format(id))
return False
print('[Queue] #{} successfully muxed.'.format(id))
return True
def fix_srt(path):
print("[Queue] Fix corrupted SRT conversion")
with path.open('r', encoding='utf-8-sig') as f:
srt = f.read().split('\n')
i = 0
while i < len(srt):
if srt[i]:
if srt[i].isdigit():
i += 1
if re.match(r'[0-9][0-9]:[0-9][0-9]:[0-9][0-9],[0-9][0-9][0-9] --> [0-9][0-9]:[0-9][0-9]:[0-9][0-9],[0-9][0-9][0-9]', srt[i]):
i += 1
while srt[i+1]:
srt[i] += '<br />' + srt[i+1]
del srt[i+1]
i += 1
with path.open('w', encoding='utf-8-sig') as f:
f.write('\n'.join(srt))
|
nilq/baby-python
|
python
|
from dataclasses import dataclass
@dataclass
class SpotifyConfig:
client_id: str
client_secret: str
refresh_token: str
|
nilq/baby-python
|
python
|
"""
Application load balancer stack for running ConsoleMe on ECS
"""
from aws_cdk import aws_ec2 as ec2
from aws_cdk import aws_elasticloadbalancingv2 as lb
from aws_cdk import core as cdk
class ALBStack(cdk.NestedStack):
"""
Application load balancer stack for running ConsoleMe on ECS
"""
def __init__(
self,
scope: cdk.Construct,
id: str,
vpc: ec2.Vpc,
consoleme_sg: ec2.SecurityGroup,
**kwargs
) -> None:
super().__init__(scope, id, **kwargs)
# ECS Load Balancer
consoleme_alb = lb.ApplicationLoadBalancer(
self,
"ConsoleMeALB",
vpc=vpc,
vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
internet_facing=True,
)
consoleme_alb.add_security_group(
ec2.SecurityGroup.from_security_group_id(
self,
"ImportedConsoleMeLBSG",
security_group_id=consoleme_sg.security_group_id,
mutable=False,
)
)
self.consoleme_alb = consoleme_alb
|
nilq/baby-python
|
python
|
# --------------------------------------------------
# Gene class
# Authors: Thomas Schwarzl, schwarzl@embl.de
# --------------------------------------------------
import gzip
import logging
from collections import OrderedDict, defaultdict
from HTSeq import GenomicArray, GenomicArrayOfSets, GenomicPosition, GenomicFeature
from .GeneRegion import GeneRegion
from .GTxFeature import GTxFeature
from .output import Output
"""
The Class 'Gene' stores genomic gene location information
and flattens gene info.
It sores the Exon information of different transcripts
and calculates the Introns from the inbetween spaces.
Also, the CDS and UTR information is stored and flatten.
"""
class Gene:
__CDS__ = "CDS"
__3UTR__ = "3UTR"
__5UTR__ = "5UTR"
__EXON__ = "exon"
__INTRON__ = "intron"
"""
'Gene': init
"""
def __init__(self, feature,splitExons=True,processGeneOnlyInformation=True):
logging.debug("Initializing new gene")
self.splitExons = splitExons
self.processGeneOnlyInformation = processGeneOnlyInformation
self.stranded = True
self.forwardSymbol = "+"
self.reverseSymbol = "-"
self.regionPriorityOrder = (self.__CDS__,self.__3UTR__,self.__5UTR__)
self.features = {
self.__CDS__ : GenomicArrayOfSets('auto', stranded = self.stranded),
self.__3UTR__ : GenomicArrayOfSets('auto', stranded = self.stranded),
self.__5UTR__ : GenomicArrayOfSets('auto', stranded = self.stranded),
self.__EXON__ : GenomicArrayOfSets('auto', stranded = self.stranded)
}
self.details = GenomicArrayOfSets('auto', stranded = self.stranded)
self.startSites = {
self.__CDS__ : GenomicArray('auto', stranded = self.stranded),
self.__3UTR__ : GenomicArray('auto', stranded = self.stranded),
self.__5UTR__ : GenomicArray('auto', stranded = self.stranded),
self.__EXON__ : GenomicArray('auto', stranded = self.stranded)
}
self.endSites = {
self.__CDS__ : GenomicArray('auto', stranded = self.stranded),
self.__3UTR__ : GenomicArray('auto', stranded = self.stranded),
self.__5UTR__ : GenomicArray('auto', stranded = self.stranded),
self.__EXON__ : GenomicArray('auto', stranded = self.stranded)
}
self.storage = {
self.__CDS__ : [],
self.__3UTR__ : [],
self.__5UTR__ : []
}
self.exonTotal = 0
self.intronTotal = 0
# List of unprocessed gene regions
self.rawRegionList = None
# List of GeneRegions containing the processed exons and introns
self.regionList = list()
# List of GeneRegions containing the processed CDS, UTRs, exons and introns
self.detailedRegionList = None
# gene symbol and gene type
self.symbol = "NA"
self.geneType = "NA"
# Determines if Exons should be split into CDS and UTR regions
self.feature = feature
"""
'Gene': Getters for convenience
"""
def getGeneInterval(self):
return self.feature.getInterval()
def getGeneType(self):
return self.feature.getGeneType()
def getGeneName(self):
return self.feature.getGeneName()
def getStrand(self):
return self.feature.getStrand()
def getId(self):
return self.feature.getName()
"""
'Gene': Basic functions
"""
def isForwardStrand(self):
return self.getStrand() == self.forwardSymbol
def isReverseStrand(self):
return self.getStrand() == self.reverseSymbol
"""
Returns true if the object was processed, which is required for returning output
"""
def isProcessed(self):
return len(self.regionList) > 0
"""
Calculating total exon and total intron number
"""
def calculateTotalExonAndIntronCount(self):
logging.debug("processing gene")
self.exonTotal = int((len(self.rawRegionList) + 1) / 2)
self.intronTotal = int(self.exonTotal - 1)
logging.debug("exon total: {}, intron total: {}".format(str(self.exonTotal), str(self.intronTotal)))
"""
Merge/Flatten the exons and store into regionList
"""
def mergeExons(self):
self.rawRegionList = list(self.features[ self.__EXON__ ][ self.getGeneInterval() ].steps())
def exonsWereAdded(self):
return len(list(self.features[ self.__EXON__ ].steps())) > 1
"""
Gene annotation:
Calculates the exon, intron regions and their corresponding flags
The processed annotation is stored in the variable self.regionList
"""
def process(self):
logging.debug("processing gene")
if self.exonsWereAdded():
self.mergeExons()
# in this function the flattened gene definition is created, also all start and end sites
# of exons and introns are stored so alternative isoforms of exons and introns can be assigned.
# those are used to calculate flags, (no alternative isoform, or 5' or 3' isoform, or 5' and 3'
# isoform variants
self.calculateExonsAndIntrons()
self.processStoredRegions()
if self.splitExons:
self.splitExonsIntoUTRandCDSRegions()
else:
if self.processGeneOnlyInformation:
logging.debug("adding an exon for a gene without exon information.")
feature = GTxFeature(GenomicFeature("name", self.__EXON__, self.feature.feature.iv))
self.addRegion(feature, self.__EXON__)
self.process()
else:
raise Exception("The gene annotation file provides a gene without any exon information. " +
"Either add exon information to the annotation or use the processGeneOnlyInformation " +
"of htseq-clip.")
"""
Gene.getGeneLength:
returns length of gene (sum of all gene regions).
"""
def length(self):
length = 0
for region in self.regionList:
length += region.length()
return length
"""
Gene: Checks if the preprocessing was done, which is an essential step
before providing output
"""
def checkIfProcessed(self):
if not self.isProcessed():
self.process()
"""
Gene: Calculates exon and intron regions and their corresponding flags
and number, as well as the total count (e.g. exon 1/10).
"""
def calculateExonsAndIntrons(self):
self.calculateTotalExonAndIntronCount()
logging.debug("calculating exons and introns and their corresponding flags, number, and total count")
exonIndex = 1 if self.isForwardStrand() else self.exonTotal
intronIndex = 1 if self.isForwardStrand() else self.intronTotal
regionIndex = 0
for (regionInterval, regionInfo) in self.rawRegionList:
logging.debug("processing region {} - {}".format(str(regionInterval), str(regionInfo)))
region = GeneRegion(self, regionInterval)
# if exon
if len(regionInfo) > 0: # == "exon":
logging.debug("processing exon")
upstreamFlag = self.getExonUpstreamFlag(regionInterval)
downstreamFlag = self.getExonDownstreamFlag(regionInterval)
region.type = self.__EXON__
region.index = exonIndex
region.total = self.exonTotal
region.upstreamFlag = upstreamFlag
region.downstreamFlag = downstreamFlag
exonIndex = self.nextIndex(exonIndex)
# else intron
else:
logging.debug("processing intron")
region.type = self.__INTRON__
region.index = intronIndex
region.total = self.intronTotal
# intron flags will be determined after all exon flags have been assigned
intronIndex = self.nextIndex(intronIndex)
# update regionList
self.regionList.append(region)
regionIndex += 1
# calculate all intron flags
self._regionListSanityCheck()
self.calculateIntronFlags()
def _regionListSanityCheck(self):
'''
Sanity check for region list, make sure that the first and last indices are always exons and
two regions of the same type are never next to each other
'''
removeIndices = list() # indices to remove from the region list
prevType = None
for i,rd in enumerate(self.regionList):
if (i==0 or i==len(self.regionList)-1) and rd.type != self.__EXON__:
removeIndices.append(i)
elif prevType == rd.type:
removeIndices.append(i)
prevType = rd.type
if len(removeIndices)>0:
for ri in removeIndices:
del self.regionList[ri]
"""
Calculates flags for Introns by retrieving flags from the neighboring regions.
"""
def calculateIntronFlags(self):
logging.debug("calculating intron flags directional")
regionIndex = 1
while regionIndex < len(self.regionList):
self.regionList[ regionIndex ].upstreamFlag = self.regionList[ self.previousIndex(regionIndex) ].downstreamFlag
self.regionList[ regionIndex ].downstreamFlag = self.regionList[ self.previousIndex(regionIndex) ].upstreamFlag
regionIndex += 2
"""
Splits exon regions into UTR and CDS regions and stores all regions
to 'detailedRegionList'
"""
def splitExonsIntoUTRandCDSRegions(self):
logging.debug("calculate UTR and CDS regions")
self.detailedRegionList = []
for region in self.regionList:
for newRegion in region.split():
self.detailedRegionList.append(newRegion)
"""
Gene: These functions increments or decrements the index,
depending on strandness of Gene
"""
def nextIndex(self, index):
return self.indexStep(index, 1)
def previousIndex(self, index):
return self.indexStep(index, -1)
def indexStep(self, index, step):
if self.isForwardStrand():
index += step
elif self.isReverseStrand():
index -= step
else:
raise Exception("Sorry, but htseq-clip cannot work with unstranded data yet.")
return index
"""
Gene: Get the strand specific exon upstream flag
"""
def getExonUpstreamFlag(self, interval):
length = len(list(self.startSites[ self.__EXON__ ][ interval ].steps()))
logging.debug("Get exon start sites in interval: %s" % length)
return length == 1
"""
Gene: Get the strand specific exon downstream flag
"""
def getExonDownstreamFlag(self, interval):
length = len(list(self.endSites[ self.__EXON__ ][ interval ].steps()))
logging.debug("Get exon end sites in interval: %s" % length)
return length == 1
"""
Gene: Adding a gff/gtf feature to the gene.
"""
def add(self, feature):
if self.isProcessed():
raise Exception("Gene already has been processed, you cannot add additional regions.")
logging.debug("adding info %s " % feature)
self.assertFeatureBelongsToGene(feature)
if feature.isExon():
logging.debug("invoking addRegion %s %s" % (feature, self.__EXON__))
self.addRegion(feature, self.__EXON__)
elif feature.isCDS():
self.storeRegion(feature, self.__CDS__)
elif feature.is5UTR():
self.storeRegion(feature, self.__5UTR__)
elif feature.is3UTR():
self.storeRegion(feature, self.__3UTR__)
else:
logging.debug("ignoring feature %s" % feature)
"""
Assert that the feature belongs to the gene
"""
def assertFeatureBelongsToGene(self, feature):
if not self.feature.getGeneId() == feature.getGeneId():
raise SyntaxError("The order of gene and gene features in the input file are incorrect. The current feature does not belong to the gene being processed.")
"""
Gene: Adds a region to genes and adds the corresponding
start and end sites. Start and end sites are later used to
figure out alternative isoforms for a given exon or intron
"""
def addRegion(self, feature, name):
logging.debug("adding {} {} {}".format(name, feature, feature.getInterval()))
self.features[ name ][ feature.getInterval() ] = name
self.startSites[ name ][ GenomicPosition( feature.getInterval().chrom,
feature.getInterval().start_d,
strand = feature.getStrand() ) ] = True
self.endSites[ name ][ GenomicPosition( feature.getInterval().chrom,
feature.getInterval().end_d,
strand = feature.getStrand() ) ] = True
self.details[ feature.getInterval() ] = name
logging.debug("finished adding %s %s" % (name, feature))
"""
Gene: stores the region for processing.
Processing can be only started once all the regions are stored.
"""
def storeRegion(self, feature, name):
logging.debug("storing %s %s" % (str(name), str(feature)))
self.storage[ name ].append(feature)
"""
This function adds regions according to the region priority order
"""
def processStoredRegions(self):
logging.debug("processing stored regions")
for regionName in self.regionPriorityOrder:
for feature in self.storage[ regionName ]:
self.addRegion(feature, regionName)
""""
Gene: Outputs the genomic location info of
exons and introns to a bed format
"""
def toBed(self):
self.checkIfProcessed()
for region in self.regionList:
yield region.toBed()
""""
Gene: Outputs the genomic location info of
CDS, UTRs, introns and remaining exons to
a bed format
"""
def toBedDetailed(self):
# calculates all the coordinates if not calculated
self.checkIfProcessed()
regionList = None
if self.splitExons:
regionList = self.detailedRegionList
else:
regionList = self.regionList
# write the individual regions to the output
for region in regionList:
yield region.toBed()
|
nilq/baby-python
|
python
|
from __future__ import print_function
import os
import subprocess
import shlex
from getpass import getuser
from distutils.command.build import build # type: ignore
from setuptools import (
find_packages,
setup,
Command
)
import numpy as np
CUSTOM_COMMANDS = [
shlex.split(command_line) for command_line in [
'apt-get update',
'apt-get --assume-yes install libxml2',
'apt-get --assume-yes install poppler-utils',
'apt-get --assume-yes install libgl1'
]
]
with open(os.path.join('requirements.txt'), 'r', encoding='utf-8') as f:
REQUIRED_PACKAGES = f.readlines()
packages = find_packages()
# This class handles the pip install mechanism.
class CustomBuild(build):
"""A build command class that will be invoked during package install.
The package built using the current setup.py will be staged and later
installed in the worker using `pip install package'. This class will be
instantiated during install for this specific scenario and will trigger
running the custom commands specified.
"""
sub_commands = build.sub_commands + [('CustomCommands', None)]
class CustomCommands(Command):
"""A setuptools Command class able to run arbitrary commands."""
def initialize_options(self):
pass
def finalize_options(self):
pass
def _run_custom_command(self, command_list):
if getuser() != 'root' or os.environ.get('SCIENCEBEAM_GYM_NO_APT'):
print('Skipping setup command (not root): %s' % command_list)
return
print('Running command: %s' % command_list)
p = subprocess.Popen(
command_list,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
# Can use communicate(input='y\n'.encode()) if the command run requires
# some confirmation.
stdout_data, _ = p.communicate()
print('Command output: %s' % stdout_data)
if p.returncode != 0:
raise RuntimeError(
'Command %s failed: exit code: %s (output: %s)' %
(command_list, p.returncode, stdout_data)
)
def run(self):
for command in CUSTOM_COMMANDS:
self._run_custom_command(command)
setup(
name='sciencebeam_gym',
version='0.0.1',
install_requires=REQUIRED_PACKAGES,
packages=packages,
include_package_data=True,
description='ScienceBeam Gym',
include_dirs=[np.get_include()],
cmdclass={
'build': CustomBuild,
'CustomCommands': CustomCommands
}
)
|
nilq/baby-python
|
python
|
# Copyright 2004-present, Facebook. All Rights Reserved.
from django.contrib.auth.decorators import login_required
from django.urls import path
from . import views
urlpatterns = [
# products
path(
"store/<int:storeId>/products/create",
login_required(views.createProduct),
name="createProduct",
),
path(
"store/<int:storeId>/product/<str:productId>/update",
login_required(views.updateProduct),
name="updateProduct",
),
path(
"store/<int:storeId>/products",
login_required(views.viewProducts),
name="viewProducts",
),
path(
"store/<int:storeId>/product/<str:productId>",
login_required(views.viewProduct),
name="viewProduct",
),
# catalog
path(
"store/<int:storeId>/catalog/sync",
login_required(views.syncCatalog),
name="syncCatalog",
),
# dummy products
path(
"store/<int:storeId>/create_dummy_products",
login_required(views.createDummyProducts),
name="createDummyProducts",
),
]
|
nilq/baby-python
|
python
|
from detective import functions
import math
MOCK_ATTRIBUTE = {
"battery_level": 61,
"unit_of_measurement": "°C",
"friendly_name": "Living room sensor temperature",
"device_class": "temperature",
}
def test_device_class():
"""Test get_device_class"""
assert functions.get_device_class(MOCK_ATTRIBUTE) == MOCK_ATTRIBUTE["device_class"]
assert functions.get_device_class({}) == functions.UNKNOWN
def test_get_unit_of_measurement():
"""Test get_unit_of_measurement"""
assert (
functions.get_unit_of_measurement(MOCK_ATTRIBUTE)
== MOCK_ATTRIBUTE["unit_of_measurement"]
)
assert functions.get_unit_of_measurement({}) == functions.UNKNOWN
def test_get_friendly_name():
"""Test get_friendly_name"""
assert (
functions.get_friendly_name(MOCK_ATTRIBUTE) == MOCK_ATTRIBUTE["friendly_name"]
)
assert functions.get_friendly_name({}) == functions.UNKNOWN
|
nilq/baby-python
|
python
|
from argparse import ArgumentParser
import examples.example02.tasks
from cline.cli import ArgumentParserCli, RegisteredTasks
class ExampleCli(ArgumentParserCli):
def make_parser(self) -> ArgumentParser:
parser = ArgumentParser()
parser.add_argument("a", help="first number", nargs="?")
parser.add_argument("b", help="second number", nargs="?")
parser.add_argument("--sub", help="subtracts", action="store_true")
parser.add_argument("--sum", help="sums", action="store_true")
return parser
def register_tasks(self) -> RegisteredTasks:
return [
examples.example02.tasks.SumTask,
examples.example02.tasks.SubtractTask,
]
|
nilq/baby-python
|
python
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from pandas.compat import StringIO
import sys
import re
import os
import ntpath
def file_to_df(filename):
with open(filename, 'r') as file:
contents = file.read()
# Read run configurations
start = contents.find("[", contents.find("Run Configurations"))
end = contents.find("config")
config = pd.DataFrame(eval(contents[start:end]))
#config['config'] = [i for i in range(config.shape[0])]
# Read data
data = pd.read_csv(StringIO(contents[end:]), delim_whitespace=True)
# Join tables and return
return data.join(config, on='config', how='inner')
def _is_nr(str):
return str.find("NR") != -1
is_nr = np.vectorize(_is_nr)
def _gap(arr):
if len(arr) < 2:
raise Exception('length 0 or 1 pattern')
return arr[1]
gap = np.vectorize(_gap)
def _pct(name):
return int(re.findall('\d+', name)[0])
pct = np.vectorize(_pct)
colors = {'on':'#26CAD3', 'off':'black'}
def prefetch(name):
n = ntpath.basename(os.path.splitext(name)[0])
n = n[n.find("_")+1:n.rfind("_")]
if n.find("on") != -1:
return "on"
elif n.find("off") != -1:
return "off"
raise Exception("could not determine prefetch")
def get_arch(name):
n = ntpath.basename(os.path.splitext(name)[0])
n = n[n.rfind("_")+1:]
return n
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python3 {} input.dat".format(sys.argv[0]))
exit(1)
#Read files
dfs = []
for f in sys.argv[1:]:
tmp = file_to_df(f)
tmp['arch'] = get_arch(f)
tmp['gap'] = gap(tmp['name'])
tmp['prefetch'] = prefetch(f)
tmp['norm_local'] = tmp['bw(MB/s)'] / max(tmp['bw(MB/s)'])
dfs.append(tmp)
df = pd.concat(dfs)
df['norm_global'] = df['bw(MB/s)'] / max(df['bw(MB/s)'])
df['bw(GB/s)'] = df['bw(MB/s)'] / 1000
all_arch = ""
for key, _ in df.groupby(['arch']):
all_arch = all_arch + key
SMALL_SIZE = 15
MEDIUM_SIZE = 18
BIGGER_SIZE = 20
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
#Plot against global max
print("Making plot 1")
fix, ax = plt.subplots()
#ax = plt.subplot(1, 2, 1) #plot 1
for key, grp in df.groupby(['prefetch']):
ax = grp.plot(ax=ax, kind='line', x='config', y='bw(GB/s)', label=key, color=colors[key])
print(key)
ax.set_xlabel("Stride (Doubles)")
ax.set_ylabel("Bandwidth (GB/s)")
ax.get_legend().remove()
ax2 = ax.twinx()
for key, grp in df.groupby(['prefetch']):
ax2 = grp.plot(ax=ax2, kind='line', x='config', y='norm_global', label=key, color=colors[key], linewidth=4)
print(key)
MODE="normal"
print(f"Mode is {MODE}")
ax2.set_ylabel("Normalized bandwidth")
ax2.get_legend().remove()
if MODE == "opt":
ax2.axhline(y=1, linestyle=":", color="black")
ax2.axhline(y=.5, linestyle=":", color="black", xmin=1/7)
ax2.axhline(y=.25, linestyle=":", color="black", xmin=2/7)
ax2.axhline(y=.125, linestyle=":", color="black", xmin=3/7)
ax2.axhline(y=.0625, linestyle=":", color="black", xmin=4/7)
else:
ax2.axhline(y=1, linestyle=":", color="black")
ax2.axhline(y=.5, linestyle=":", color="black")
ax2.axhline(y=.25, linestyle=":", color="black")
ax2.axhline(y=.125, linestyle=":", color="black")
ax2.axhline(y=.0625, linestyle=":", color="black")
plt.yticks([.0625, .125, .25, .5, 1], ['1/16', '1/8', '1/4', '1/2', '1'])
ax2.set_xticklabels([])
# ax.set_xticklabels([7, 7, 7, 7, 7, 7, 7, 7])
#def format_func(value, tick_number):
# return r"$2^{}$".format(value)
#ax.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
ax.xaxis.set_major_formatter(ticker.ScalarFormatter())
ax.set_xticklabels(["$2^{{{}}}$".format(x) for x in range(0,8)])
#ax.xaxis.set_major_formatter(ticker.ScalarFormatter())
#ax.ticklabel_format(axis='x', useMathText=True)
ax.tick_params(axis=u'y', which=u'both',length=0)
ax2.tick_params(axis=u'both', which=u'both',length=0)
if MODE == "opt":
for a in [ax, ax2]:
a.spines["top"].set_visible(False)
a.spines["right"].set_visible(False)
a.spines["bottom"].set_visible(False)
a.spines["left"].set_visible(False)
plt.legend(loc='best', title='Prefetch')
# Change figure size
fig = plt.gcf()
fig.set_size_inches(6, 6)
outname = "prefetch_{}_{}.png".format(all_arch, MODE)
plt.savefig(outname, transparent=True, bbox_inches='tight')
ax.xaxis.set_major_formatter(ticker.ScalarFormatter())
ax.set_xticklabels(["$2^{{{}}}$".format(x) for x in range(0,8)])
print("Saved plot to {}".format(outname))
handles,labels = ax.get_legend_handles_labels()
handles = [handles[1], handles[0]]
labels = [labels[1], labels[0]]
plt.clf()
exit(0)
#Plot against a local max
fig, ax = plt.subplots()
for key, grp in df.groupby(['arch']):
ax = grp.plot(ax=ax, kind='line', x='gap', y='norm_local', label=key)
print(key)
plt.legend(loc='best', title='log2(gap)')
outname = "ustride_local.png"
plt.savefig(outname)
plt.clf()
|
nilq/baby-python
|
python
|
import os
import socket
import struct
import sys
import select
os.system("")
UDP_PORT = 13117
MESSAGE_LEN = 1024
GAME_TIME = 10
sockUDP = None
sockTCP = None
# Colors for prints
class Colors:
GREEN = '\033[32m'
BLUE = '\033[34m'
PINK = '\033[35m'
def printMessageOrRead():
# wait for read or write from client or server
readers, _, _ = select.select([sys.stdin, sockTCP], [], [])
for reader in readers:
if reader is sockTCP:
data = sockTCP.recv(MESSAGE_LEN).decode()
print(Colors.PINK + data)
else:
ch = sys.stdin.read(1)
sockTCP.sendall(ch.encode())
printMessageOrRead() # because will need to print server answer
def start_udp():
global sockUDP
# create UDP socket with the variables we need
sockUDP = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) # init UDP socket
sockUDP.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sockUDP.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sockUDP.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sockUDP.bind(("", UDP_PORT))
# assume server started and need to connect
message, location = sockUDP.recvfrom(MESSAGE_LEN)
server_ip_address = str(location[0])
return server_ip_address, message
def start_tcp(ip, tcp_port):
global sockTCP
# create custom TCP socket
sockTCP = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockTCP.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sockTCP.connect((ip, tcp_port))
def print_start(first_print):
if first_print == 0:
print(Colors.GREEN + "Client started, listening for offer requests...")
first_print = 0
while True:
print_start(first_print)
first_print += 1
# waits for server suggestion
# part 1 get udp message
ip, message = start_udp()
try:
# part 2 connect to server
printBool = True
cookie, msg_type, tcp_port = struct.unpack('IBH', message)
# part 3 make sure it's the correct server
if cookie == 0xabcddcba and msg_type == 0x2: # check if message is as expected
print(Colors.GREEN + "Received offer from " + ip + " attempting to connect...")
start_tcp(ip, tcp_port)
# part 4 start game with group name
group_name = input(Colors.PINK + 'Enter your group name: ')
sockTCP.sendall(group_name.encode()) # send team's name to server
# part 5 start game
print(sockTCP.recv(MESSAGE_LEN).decode()) # the game begin message
print(sockTCP.recv(MESSAGE_LEN).decode()) # math question
# part 6 play game, win or lost
printMessageOrRead()
print(Colors.BLUE + "Server disconnected, listening for offer requests...")
first_print = 0
else:
print(Colors.GREEN + "Bad UDP Message Format")
# got message not in the expected format
except Exception as e:
pass
|
nilq/baby-python
|
python
|
from tests.testcases import TestCaseUsingRealAPI
from vortexasdk import Products, Geographies, Corporations, Vessels
endpoints_and_searchterms = [
(Products(), "Gasoil"),
(Geographies(), "China"),
(Corporations(), "Oil"),
(Vessels(), "Ocean"),
]
class TestSearchReal(TestCaseUsingRealAPI):
def test_search_exact_match_yields_fewer_results_than_non_exact_match(
self,
):
for endpoint, term in endpoints_and_searchterms:
result_loose_match = endpoint.search(
term=term, exact_term_match=False
)
result_exact_match = endpoint.search(
term=term, exact_term_match=True
)
assert len(result_exact_match) < len(result_loose_match)
def test_search_exact_match_yields_exact_matches_only(self):
for endpoint, term in endpoints_and_searchterms:
result_exact_match = endpoint.search(
term=term, exact_term_match=True
)
actual_names = {e["name"] for e in result_exact_match}
# result must be the exact term, or must contain no results if there's no reference objects with that names
assert actual_names == {term} or actual_names == set()
|
nilq/baby-python
|
python
|
from app import app
from flask import request, session
from helpers.database import *
from helpers.hashpass import *
from helpers.mailer import *
from bson import json_util, ObjectId
import json
def checkloginusername():
username = request.form["username"]
check = db.users.find_one({"username": username})
if check is None:
return "No User"
else:
return "User exists"
def checkloginpassword():
username = request.form["username"]
check = db.users.find_one({"username": username})
password = request.form["password"]
# hashpassword = getHashed(password)
if password == check["password"]:
sendmail(subject="Login on Flask Admin Boilerplate", sender="Flask Admin Boilerplate", recipient=check["email"], body="You successfully logged in on Flask Admin Boilerplate")
session["username"] = username
return "correct"
else:
return "wrong"
def checkusername():
username = request.form["username"]
check = db.users.find_one({"username": username})
if check is None:
return "Available"
else:
return "Username taken"
def registerUser():
fields = [k for k in request.form]
values = [request.form[k] for k in request.form]
data = dict(zip(fields, values))
user_data = json.loads(json_util.dumps(data))
# user_data["password"] = getHashed(user_data["password"])
# user_data["confirmpassword"] = getHashed(user_data["confirmpassword"])
db.users.insert(user_data)
sendmail(subject="Registration for Flask Admin Boilerplate", sender="Flask Admin Boilerplate", recipient=user_data["email"], body="You successfully registered on Flask Admin Boilerplate")
print("Done")
|
nilq/baby-python
|
python
|
"""Hyperparameters from paper """
import numpy as np
import torch.optim as optim
from .model import DQN, DuelingDQN
class AtariHyperparams:
ALGO = "DQN"
SEED = 2
LOG_DISPLAY_FREQ = 10
# Image sizing
WIDTH = 84
HEIGHT = 84
# Number of most recent frames given as input to Q-network
AGENT_HISTORY = 4
STATE_DIMS = (AGENT_HISTORY, WIDTH, HEIGHT)
NORMALIZE = False
DISCOUNT = 0.99
MINIBATCH_SIZE = 32
REPLAY_SIZE = int(1e6)
REPLAY_S_DTYPE = np.uint8
# Number of network updates between target network updates
# TARGET_NETWORK_UPDATE_FREQ = 10000
TARGET_NETWORK_UPDATE_FREQ = 2500 # every 10000 frames
# Number of times an action is repeated, i.e. number of frames skipped
ACTION_REPEAT = 4
# Num actions (ignoring repeats) performed before Gradient descent update
NETWORK_UPDATE_FREQUENCY = 4
# Parameters for network learning
OPTIMIZER = optim.RMSprop
LEARNING_RATE = 0.00025
GRADIENT_MOMENTUM = 0.95
SQUARED_GRADIENT_MOMENTUM = 0.95
MIN_SQUARED_GRADIENT = 0.01
OPTIMIZER_KWARGS = {
"lr": LEARNING_RATE,
"momentum": GRADIENT_MOMENTUM,
"eps": MIN_SQUARED_GRADIENT
}
GRAD_CLIP = [-1, 1]
# for reward
R_CLIP = [-1, 1]
# Exploration
EXPLORATION_SCHEDULE = "Linear"
INITIAL_EXPLORATION = 1.0
FINAL_EXPLORATION = 0.1
FINAL_EXPLORATION_FRAME = 1000000
# Number of frames to run random policy and before learning starts
REPLAY_START_SIZE = 50000
# Max number of "do nothing" actions to be performed at start of episode
NO_OP_MAX = 30
# Network architecture
INPUT_DIMS = (WIDTH, HEIGHT, AGENT_HISTORY)
LAYER_1 = {"type": "convolutional",
"filters": 32, "kernel_size": (8, 8),
"stride": 4, "activation": "relu"}
LAYER_2 = {"type": "convolutional",
"filters": 64, "kernel_size": (4, 4),
"stride": 2, "activation": "relu"}
LAYER_3 = {"type": "convolutional",
"filters": 64, "kernel_size": (3, 3),
"stride": 1, "activation": "relu"}
LAYER_4 = {"type": "fully_connected",
"size": 512, "activation": "relu"}
OUTPUT = {"type": "fully_connected"}
MODEL = DQN
# training duration (50 million)
TRAINING_FRAMES = int(5e7)
# Other hyperparams not related to paper
# Model Save Freq
MODEL_SAVE_FREQ = int(1e6)
# Evaluation
EVAL_FREQ = int(1e6)
EVAL_STEPS = 125000
EVAL_EPSILON = 0.05
@classmethod
def set_seed(cls, seed):
cls.SEED = seed
@classmethod
def set_mode(cls, mode='dqn'):
if mode == "testing":
print("WARNING: using test hyperparams")
input("Press any key to continue..")
cls.ALGO += "_test"
cls.REPLAY_SIZE = int(1e4)
cls.REPLAY_START_SIZE = 100
cls.INITIAL_EXPLORATION = 0.1
cls.TARGET_NETWORK_UPDATE_FREQ = 1000
cls.EVAL_FREQ = 2000
cls.EVAL_STEPS = 1000
cls.MODEL_SAVE_FREQ = 2500
cls.LOG_DISPLAY_FREQ = 1
cls.MINIBATCH_SIZE = 12
elif mode == "eval":
cls.ALGO += "_eval"
cls.REPLAY_SIZE = int(1e4)
elif mode == "ddqn":
print("Using DDQN hyperparams")
cls.ALGO = "DDQN"
elif mode == "ddqn-tuned":
print("Using DDQN-Tuned hyperparams")
cls.ALGO = "DDQN-Tuned"
cls.TARGET_NETWORK_UPDATE_FREQ = 30000
cls.FINAL_EXPLORATION = 0.01
cls.EVAL_EPSILON = 0.001
elif mode == "dqn":
print("Using DQN hyperparams")
pass
elif mode == "duelingdqn":
print("Using Dueling DQN hyperparams")
cls.ALGO = "DuelingDQN"
cls.MODEL = DuelingDQN
elif mode == "normalized":
print("Using normalized observations")
cls.NORMALIZE = True
cls.REPLAY_S_DTYPE = np.float16
elif mode == "pong_tuned":
print("Using pong tuned hyperparams")
cls.REPLAY_SIZE = 100000
cls.REPLAY_START_SIZE = 10000
cls.INITIAL_EXPLORATION = 1.0
cls.FINAL_EXPLORATION = 0.02
cls.FINAL_EXPLORATION_FRAME = 100000
# this corresponds to updating every 1000 frames
cls.TARGET_NETWORK_UPDATE_FREQ = 250
cls.OPTIMIZER = optim.Adam
cls.OPTIMIZER_KWARGS = {"lr": 1e-4}
else:
raise ValueError("Unsupported Hyper param mode")
@classmethod
def get_all_hyperparams(cls):
all_kwargs = {}
for k, v in cls.__dict__.items():
if not any([k.startswith("__"),
isinstance(v, classmethod)]):
all_kwargs[k] = v
return all_kwargs
|
nilq/baby-python
|
python
|
"""
Queue backend abstraction manager.
"""
import json
import logging
import sched
import socket
import time
import uuid
from typing import Any, Callable, Dict, List, Optional, Union
from pydantic import BaseModel, validator
import qcengine as qcng
from qcfractal.extras import get_information
from ..interface.data import get_molecule
from .adapters import build_queue_adapter
from .compress import compress_results
__all__ = ["QueueManager"]
class QueueStatistics(BaseModel):
"""
Queue Manager Job statistics
"""
# Dynamic quantities
total_successful_tasks: int = 0
total_failed_tasks: int = 0
total_worker_walltime: float = 0.0
total_task_walltime: float = 0.0
maximum_possible_walltime: float = 0.0 # maximum_workers * time_delta, experimental
active_task_slots: int = 0
# Static Quantities
max_concurrent_tasks: int = 0
cores_per_task: int = 0
memory_per_task: float = 0.0
last_update_time: float = None
def __init__(self, **kwargs):
if kwargs.get("last_update_time", None) is None:
kwargs["last_update_time"] = time.time()
super().__init__(**kwargs)
@property
def total_completed_tasks(self) -> int:
return self.total_successful_tasks + self.total_failed_tasks
@property
def theoretical_max_consumption(self) -> float:
"""In Core Hours"""
return self.max_concurrent_tasks * self.cores_per_task * (time.time() - self.last_update_time) / 3600
@property
def active_cores(self) -> int:
return self.active_task_slots * self.cores_per_task
@property
def active_memory(self) -> float:
return self.active_task_slots * self.memory_per_task
@validator("cores_per_task", pre=True)
def cores_per_tasks_none(cls, v):
if v is None:
v = 1
return v
class QueueManager:
"""
This object maintains a computational queue and watches for finished tasks for different
queue backends. Finished tasks are added to the database and removed from the queue.
Attributes
----------
client : FractalClient
A FractalClient connected to a server.
queue_adapter : QueueAdapter
The DBAdapter class for queue abstraction
errors : dict
A dictionary of current errors
logger : logging.logger. Optional, Default: None
A logger for the QueueManager
"""
def __init__(
self,
client: "FractalClient",
queue_client: "BaseAdapter",
logger: Optional[logging.Logger] = None,
max_tasks: int = 200,
queue_tag: Optional[Union[str, List[str]]] = None,
manager_name: str = "unlabeled",
update_frequency: Union[int, float] = 2,
verbose: bool = True,
server_error_retries: Optional[int] = 1,
stale_update_limit: Optional[int] = 10,
cores_per_task: Optional[int] = None,
memory_per_task: Optional[float] = None,
nodes_per_task: Optional[int] = None,
cores_per_rank: Optional[int] = 1,
scratch_directory: Optional[str] = None,
retries: Optional[int] = 2,
configuration: Optional[Dict[str, Any]] = None,
):
"""
Parameters
----------
client : FractalClient
A FractalClient connected to a server
queue_client : BaseAdapter
The DBAdapter class for queue abstraction
logger : Optional[logging.Logger], optional
A logger for the QueueManager
max_tasks : int, optional
The maximum number of tasks to hold at any given time
queue_tag : str, optional
Allows managers to pull from specific tags
manager_name : str, optional
The cluster the manager belongs to
update_frequency : Union[int, float], optional
The frequency to check for new tasks in seconds
verbose : bool, optional
Whether or not to have the manager be verbose (logger level debug and up)
server_error_retries : Optional[int], optional
How many times finished jobs are attempted to be pushed to the server in
in the event of a server communication error.
After number of attempts, the failed jobs are dropped from this manager and considered "stale"
Set to `None` to keep retrying
stale_update_limit : Optional[int], optional
Number of stale update attempts to keep around
If this limit is ever hit, the server initiates as shutdown as best it can
since communication with the server has gone wrong too many times.
Set to `None` for unlimited
cores_per_task : Optional[int], optional
How many CPU cores per computation task to allocate for QCEngine
None indicates "use however many you can detect"
memory_per_task : Optional[float], optional
How much memory, in GiB, per computation task to allocate for QCEngine
None indicates "use however much you can consume"
nodes_per_task : Optional[int], optional
How many nodes to use per task. Used only for node-parallel tasks
cores_per_rank: Optional[int], optional
How many CPUs per rank of an MPI application. Used only for node-parallel tasks
scratch_directory : Optional[str], optional
Scratch directory location to do QCEngine compute
None indicates "wherever the system default is"'
retries : Optional[int], optional
Number of retries that QCEngine will attempt for RandomErrors detected when running
its computations. After this many attempts (or on any other type of error), the
error will be raised.
configuration : Optional[Dict[str, Any]], optional
A JSON description of the settings used to create this object for the database.
"""
# Setup logging
if logger:
self.logger = logger
else:
self.logger = logging.getLogger("QueueManager")
self.name_data = {"cluster": manager_name, "hostname": socket.gethostname(), "uuid": str(uuid.uuid4())}
self._name = self.name_data["cluster"] + "-" + self.name_data["hostname"] + "-" + self.name_data["uuid"]
self.client = client
self.cores_per_task = cores_per_task
self.memory_per_task = memory_per_task
self.nodes_per_task = nodes_per_task or 1
self.scratch_directory = scratch_directory
self.retries = retries
self.cores_per_rank = cores_per_rank
self.configuration = configuration
self.queue_adapter = build_queue_adapter(
queue_client,
logger=self.logger,
cores_per_task=self.cores_per_task,
memory_per_task=self.memory_per_task,
nodes_per_task=self.nodes_per_task,
scratch_directory=self.scratch_directory,
cores_per_rank=self.cores_per_rank,
retries=self.retries,
verbose=verbose,
)
self.max_tasks = max_tasks
self.queue_tag = queue_tag
self.verbose = verbose
self.statistics = QueueStatistics(
max_concurrent_tasks=self.max_tasks,
cores_per_task=(cores_per_task or 0),
memory_per_task=(memory_per_task or 0),
update_frequency=update_frequency,
)
self.scheduler = None
self.update_frequency = update_frequency
self.periodic = {}
self.active = 0
self.exit_callbacks = []
# Server response/stale job handling
self.server_error_retries = server_error_retries
self.stale_update_limit = stale_update_limit
self._stale_updates_tracked = 0
self._stale_payload_tracking = []
self.n_stale_jobs = 0
# QCEngine data
self.available_programs = qcng.list_available_programs()
self.available_procedures = qcng.list_available_procedures()
# Display a warning if there are non-node-parallel programs and >1 node_per_task
if self.nodes_per_task > 1:
for name in self.available_programs:
program = qcng.get_program(name)
if not program.node_parallel:
self.logger.warning(
"Program {} is not node parallel," " but manager will use >1 node per task".format(name)
)
# Print out configuration
self.logger.info("QueueManager:")
self.logger.info(" Version: {}\n".format(get_information("version")))
if self.verbose:
self.logger.info(" Name Information:")
self.logger.info(" Cluster: {}".format(self.name_data["cluster"]))
self.logger.info(" Hostname: {}".format(self.name_data["hostname"]))
self.logger.info(" UUID: {}\n".format(self.name_data["uuid"]))
self.logger.info(" Queue Adapter:")
self.logger.info(" {}\n".format(self.queue_adapter))
if self.verbose:
self.logger.info(" QCEngine:")
self.logger.info(" Version: {}".format(qcng.__version__))
self.logger.info(" Task Cores: {}".format(self.cores_per_task))
self.logger.info(" Task Mem: {}".format(self.memory_per_task))
self.logger.info(" Task Nodes: {}".format(self.nodes_per_task))
self.logger.info(" Cores per Rank: {}".format(self.cores_per_rank))
self.logger.info(" Scratch Dir: {}".format(self.scratch_directory))
self.logger.info(" Programs: {}".format(self.available_programs))
self.logger.info(" Procedures: {}\n".format(self.available_procedures))
# DGAS Note: Note super happy about how this if/else turned out. Looking for alternatives.
if self.connected():
# Pull server info
self.server_info = client.server_information()
self.server_name = self.server_info["name"]
self.server_version = self.server_info["version"]
self.server_query_limit = self.server_info["query_limit"]
if self.max_tasks > self.server_query_limit:
self.max_tasks = self.server_query_limit
self.logger.warning(
"Max tasks was larger than server query limit of {}, reducing to match query limit.".format(
self.server_query_limit
)
)
self.heartbeat_frequency = self.server_info["heartbeat_frequency"]
# Tell the server we are up and running
payload = self._payload_template()
payload["data"]["operation"] = "startup"
payload["data"]["configuration"] = self.configuration
self.client._automodel_request("queue_manager", "put", payload)
if self.verbose:
self.logger.info(" Connected:")
self.logger.info(" Version: {}".format(self.server_version))
self.logger.info(" Address: {}".format(self.client.address))
self.logger.info(" Name: {}".format(self.server_name))
self.logger.info(" Queue tag: {}".format(self.queue_tag))
self.logger.info(" Username: {}\n".format(self.client.username))
else:
self.logger.info(" QCFractal server information:")
self.logger.info(" Not connected, some actions will not be available")
def _payload_template(self):
meta = {
**self.name_data.copy(),
# Version info
"qcengine_version": qcng.__version__,
"manager_version": get_information("version"),
# User info
"username": self.client.username,
# Pull info
"programs": self.available_programs,
"procedures": self.available_procedures,
"tag": self.queue_tag,
# Statistics
"total_worker_walltime": self.statistics.total_worker_walltime,
"total_task_walltime": self.statistics.total_task_walltime,
"active_tasks": self.statistics.active_task_slots,
"active_cores": self.statistics.active_cores,
"active_memory": self.statistics.active_memory,
}
return {"meta": meta, "data": {}}
## Accessors
def name(self) -> str:
"""
Returns the Managers full name.
"""
return self._name
def connected(self) -> bool:
"""
Checks the connection to the server.
"""
return self.client is not None
def assert_connected(self) -> None:
"""
Raises an error for functions that require a server connection.
"""
if self.connected() is False:
raise AttributeError("Manager is not connected to a server, this operations is not available.")
## Start/stop functionality
def start(self) -> None:
"""
Starts up all IOLoops and processes.
"""
self.assert_connected()
self.scheduler = sched.scheduler(time.time, time.sleep)
heartbeat_time = int(0.4 * self.heartbeat_frequency)
def scheduler_update():
self.update()
self.scheduler.enter(self.update_frequency, 1, scheduler_update)
def scheduler_heartbeat():
self.heartbeat()
self.scheduler.enter(heartbeat_time, 1, scheduler_heartbeat)
self.logger.info("QueueManager successfully started.\n")
self.scheduler.enter(0, 1, scheduler_update)
self.scheduler.enter(0, 2, scheduler_heartbeat)
self.scheduler.run()
def stop(self, signame="Not provided", signum=None, stack=None) -> None:
"""
Shuts down all IOLoops and periodic updates.
"""
self.logger.info("QueueManager received shutdown signal: {}.\n".format(signame))
# Cancel all events
if self.scheduler is not None:
for event in self.scheduler.queue:
self.scheduler.cancel(event)
# Push data back to the server
self.shutdown()
# Close down the adapter
self.close_adapter()
# Call exit callbacks
for func, args, kwargs in self.exit_callbacks:
func(*args, **kwargs)
self.logger.info("QueueManager stopping gracefully.\n")
def close_adapter(self) -> bool:
"""
Closes down the underlying adapter.
"""
return self.queue_adapter.close()
## Queue Manager functions
def heartbeat(self) -> None:
"""
Provides a heartbeat to the connected Server.
"""
self.assert_connected()
payload = self._payload_template()
payload["data"]["operation"] = "heartbeat"
try:
self.client._automodel_request("queue_manager", "put", payload)
self.logger.debug("Heartbeat was successful.")
except IOError:
self.logger.warning("Heartbeat was not successful.")
def shutdown(self) -> Dict[str, Any]:
"""
Shutdown the manager and returns tasks to queue.
"""
self.assert_connected()
self.update(new_tasks=False, allow_shutdown=False)
payload = self._payload_template()
payload["data"]["operation"] = "shutdown"
try:
response = self.client._automodel_request("queue_manager", "put", payload, timeout=5)
response["success"] = True
shutdown_string = "Shutdown was successful, {} tasks returned to master queue."
except IOError:
# TODO something as we didnt successfully add the data
self.logger.warning("Shutdown was not successful. This may delay queued tasks.")
response = {"nshutdown": 0, "success": False}
shutdown_string = "Shutdown was not successful, {} tasks not returned."
nshutdown = response["nshutdown"]
if self.n_stale_jobs:
shutdown_string = shutdown_string.format(
f"{min(0, nshutdown-self.n_stale_jobs)} active and {nshutdown} stale"
)
else:
shutdown_string = shutdown_string.format(nshutdown)
self.logger.info(shutdown_string)
response["info"] = shutdown_string
return response
def add_exit_callback(self, callback: Callable, *args: List[Any], **kwargs: Dict[Any, Any]) -> None:
"""Adds additional callbacks to perform when closing down the server.
Parameters
----------
callback : callable
The function to call at exit
*args
Arguments to call with the function.
**kwargs
Kwargs to call with the function.
"""
self.exit_callbacks.append((callback, args, kwargs))
def _post_update(self, payload_data, allow_shutdown=True):
"""Internal function to post payload update"""
payload = self._payload_template()
# Update with data
payload["data"] = payload_data
try:
self.client._automodel_request("queue_manager", "post", payload, full_return=True)
except IOError:
# Trapped behavior elsewhere
raise
except Exception as fatal:
# Non IOError, something has gone very wrong
self.logger.error(
"An error was detected which was not an expected requests-type error. The manager "
"will attempt shutdown as best it can. Please report this error to the QCFractal "
"developers as this block should not be "
"seen outside of debugging modes. Error is as follows\n{}".format(fatal)
)
try:
if allow_shutdown:
self.shutdown()
finally:
raise fatal
def _update_stale_jobs(self, allow_shutdown=True):
"""
Attempt to post the previous payload failures
"""
clear_indices = []
for index, (results, attempts) in enumerate(self._stale_payload_tracking):
try:
self._post_update(results)
self.logger.info(f"Successfully pushed jobs from {attempts+1} updates ago")
self.logger.info(f"Tasks pushed: " + str(list(results.keys())))
clear_indices.append(index)
except IOError:
# Tried and failed
attempts += 1
# Case: Still within the retry limit
if self.server_error_retries is None or self.server_error_retries > attempts:
self._stale_payload_tracking[index][-1] = attempts
self.logger.warning(f"Could not post jobs from {attempts} updates ago, will retry on next update.")
# Case: Over limit
else:
self.logger.warning(
f"Could not post jobs from {attempts} ago and over attempt limit, marking " f"jobs as stale."
)
self.n_stale_jobs += len(results)
clear_indices.append(index)
self._stale_updates_tracked += 1
# Cleanup clear indices
for index in clear_indices[::-1]:
self._stale_payload_tracking.pop(index)
# Check stale limiters
if (
self.stale_update_limit is not None
and (len(self._stale_payload_tracking) + self._stale_updates_tracked) > self.stale_update_limit
):
self.logger.error("Exceeded number of stale updates allowed! Attempting to shutdown gracefully...")
# Log all not-quite stale jobs to stale
for (results, _) in self._stale_payload_tracking:
self.n_stale_jobs += len(results)
try:
if allow_shutdown:
self.shutdown()
finally:
raise RuntimeError("Exceeded number of stale updates allowed!")
def update(self, new_tasks: bool = True, allow_shutdown=True) -> bool:
"""Examines the queue for completed tasks and adds successful completions to the database
while unsuccessful are logged for future inspection.
Parameters
----------
new_tasks: bool, optional, Default: True
Try to get new tasks from the server
allow_shutdown: bool, optional, Default: True
Allow function to attempt graceful shutdowns in the case of stale job or fatal error limits.
Does not prevent errors from being raise, but mostly used to prevent infinite loops when update is
called from `shutdown` itself
"""
self.assert_connected()
self._update_stale_jobs(allow_shutdown=allow_shutdown)
results = self.queue_adapter.acquire_complete()
# Compress the stdout/stderr/error outputs
results = compress_results(results)
# Stats fetching for running tasks, as close to the time we got the jobs as we can
last_time = self.statistics.last_update_time
now = self.statistics.last_update_time = time.time()
time_delta_seconds = now - last_time
try:
self.statistics.active_task_slots = self.queue_adapter.count_active_task_slots()
log_efficiency = True
except NotImplementedError:
log_efficiency = False
timedelta_worker_walltime = time_delta_seconds * self.statistics.active_cores / 3600
timedelta_maximum_walltime = (
time_delta_seconds * self.statistics.max_concurrent_tasks * self.statistics.cores_per_task / 3600
)
self.statistics.total_worker_walltime += timedelta_worker_walltime
self.statistics.maximum_possible_walltime += timedelta_maximum_walltime
# Process jobs
n_success = 0
n_fail = 0
n_result = len(results)
task_cpu_hours = 0
error_payload = []
if n_result:
# For logging
failure_messages = {}
try:
self._post_update(results, allow_shutdown=allow_shutdown)
task_status = {k: "sent" for k in results.keys()}
except IOError:
if self.server_error_retries is None or self.server_error_retries > 0:
self.logger.warning("Post complete tasks was not successful. Attempting again on next update.")
self._stale_payload_tracking.append([results, 0])
task_status = {k: "deferred" for k in results.keys()}
else:
self.logger.warning("Post complete tasks was not successful. Data may be lost.")
self.n_stale_jobs += len(results)
task_status = {k: "unknown_error" for k in results.keys()}
self.active -= n_result
for key, result in results.items():
wall_time_seconds = 0
if result.success:
n_success += 1
if hasattr(result.provenance, "wall_time"):
wall_time_seconds = float(result.provenance.wall_time)
task_status[key] += " / success"
else:
task_status[key] += f" / failed: {result.error.error_type}"
failure_messages[key] = result.error
# Try to get the wall time in the most fault-tolerant way
try:
wall_time_seconds = float(result.input_data.get("provenance", {}).get("wall_time", 0))
except AttributeError:
# Trap the result.input_data is None, but let other attribute errors go
if result.input_data is None:
wall_time_seconds = 0
else:
raise
except TypeError:
# Trap wall time corruption, e.g. float(None)
# Other Result corruptions will raise an error correctly
wall_time_seconds = 0
task_cpu_hours += wall_time_seconds * self.statistics.cores_per_task / 3600
n_fail = n_result - n_success
# Now print out all the info
self.logger.info(f"Processed {len(results)} tasks: {n_success} succeeded / {n_fail} failed).")
self.logger.info(f"Task ids, submission status, calculation status below")
for task_id, status_msg in task_status.items():
self.logger.info(f" Task {task_id} : {status_msg}")
if n_fail:
self.logger.info("The following tasks failed with the errors:")
for task_id, error_info in failure_messages.items():
self.logger.info(f"Error message for task id {task_id}")
self.logger.info(" Error type: " + str(error_info.error_type))
self.logger.info(" Backtrace: \n" + str(error_info.error_message))
open_slots = max(0, self.max_tasks - self.active)
# Crunch Statistics
self.statistics.total_failed_tasks += n_fail
self.statistics.total_successful_tasks += n_success
self.statistics.total_task_walltime += task_cpu_hours
na_format = ""
float_format = ",.2f"
if self.statistics.total_completed_tasks == 0:
task_stats_str = "Task statistics unavailable until first tasks return"
worker_stats_str = None
else:
success_rate = self.statistics.total_successful_tasks / self.statistics.total_completed_tasks * 100
success_format = float_format
task_stats_str = (
f"Task Stats: Processed={self.statistics.total_completed_tasks}, "
f"Failed={self.statistics.total_failed_tasks}, "
f"Success={success_rate:{success_format}}%"
)
worker_stats_str = (
f"Worker Stats (est.): Core Hours Used={self.statistics.total_worker_walltime:{float_format}}"
)
# Handle efficiency calculations
if log_efficiency:
# Efficiency calculated as:
# sum_task(task_wall_time * nthread / task)
# -------------------------------------------------------------
if self.statistics.total_task_walltime == 0 or self.statistics.maximum_possible_walltime == 0:
efficiency_of_running = "(N/A yet)"
efficiency_of_potential = "(N/A yet)"
efficiency_format = na_format
else:
efficiency_of_running = (
self.statistics.total_task_walltime / self.statistics.total_worker_walltime * 100
)
efficiency_of_potential = (
self.statistics.total_worker_walltime / self.statistics.maximum_possible_walltime * 100
)
efficiency_format = float_format
worker_stats_str += f", Core Usage Efficiency: {efficiency_of_running:{efficiency_format}}%"
if self.verbose:
worker_stats_str += (
f", Core Usage vs. Max Resources Requested: " f"{efficiency_of_potential:{efficiency_format}}%"
)
self.logger.info(task_stats_str)
if worker_stats_str is not None:
self.logger.info(worker_stats_str)
if (new_tasks is False) or (open_slots == 0):
return True
# Get new tasks
payload = self._payload_template()
payload["data"]["limit"] = open_slots
try:
new_tasks = self.client._automodel_request("queue_manager", "get", payload)
except IOError:
# TODO something as we didnt successfully get data
self.logger.warning("Acquisition of new tasks was not successful.")
return False
self.logger.info("Acquired {} new tasks.".format(len(new_tasks)))
# Add new tasks to queue
self.queue_adapter.submit_tasks(new_tasks)
self.active += len(new_tasks)
return True
def await_results(self) -> bool:
"""A synchronous method for testing or small launches
that awaits task completion.
Returns
-------
bool
Return True if the operation completed successfully
"""
self.assert_connected()
self.update()
self.queue_adapter.await_results()
self.update(new_tasks=False)
return True
def list_current_tasks(self) -> List[Any]:
"""Provides a list of tasks currently in the queue along
with the associated keys.
Returns
-------
ret : list of tuples
All tasks currently still in the database
"""
return self.queue_adapter.list_tasks()
def test(self, n=1) -> bool:
"""
Tests all known programs with simple inputs to check if the Adapter is correctly instantiated.
"""
from qcfractal import testing
self.logger.info("Testing requested, generating tasks")
task_base = json.dumps(
{
"spec": {
"function": "qcengine.compute",
"args": [
{
"molecule": get_molecule("hooh.json").dict(encoding="json"),
"driver": "energy",
"model": {},
"keywords": {},
},
"program",
],
"kwargs": {},
},
"parser": "single",
}
)
programs = {
"rdkit": {"method": "UFF", "basis": None},
"torchani": {"method": "ANI1", "basis": None},
"psi4": {"method": "HF", "basis": "sto-3g"},
}
tasks = []
found_programs = []
for program, model in programs.items():
if testing.has_module(program):
self.logger.info("Found program {}, adding to testing queue.".format(program))
else:
self.logger.warning("Could not find program {}, skipping tests.".format(program))
continue
for x in range(n):
task = json.loads(task_base)
program_id = program + str(x)
task["id"] = program_id
task["spec"]["args"][0]["model"] = model
task["spec"]["args"][0]["keywords"] = {"e_convergence": (x * 1.0e-6 + 1.0e-6)}
task["spec"]["args"][1] = program
tasks.append(task)
found_programs.append(program_id)
self.queue_adapter.submit_tasks(tasks)
self.logger.info("Testing tasks submitting, awaiting results.\n")
self.queue_adapter.await_results()
results = self.queue_adapter.acquire_complete()
self.logger.info("Testing results acquired.")
missing_programs = results.keys() - set(found_programs)
if len(missing_programs):
self.logger.error("Not all tasks were retrieved, missing programs {}.".format(missing_programs))
raise ValueError("Testing failed, not all tasks were retrieved.")
else:
self.logger.info("All tasks retrieved successfully.")
failures = 0
fail_report = {}
for k, result in results.items():
if result.success:
self.logger.info(" {} - PASSED".format(k))
else:
self.logger.error(" {} - FAILED!".format(k))
failed_program = "Return Mangled!" # This should almost never be seen, but is in place as a fallback
for program in programs.keys():
if program in k:
failed_program = program
break
if failed_program not in fail_report:
fail_report[failed_program] = (
f"On test {k}:"
f"\nException Type: {result.error.error_type}"
f"\nException Message: {result.error.error_message}"
)
failures += 1
if failures:
self.logger.error("{}/{} tasks failed!".format(failures, len(results)))
self.logger.error(
f"A sample error from each program to help:\n" + "\n".join([e for e in fail_report.values()])
)
return False
else:
self.logger.info("All tasks completed successfully!")
return True
|
nilq/baby-python
|
python
|
import logging
from importlib import import_module
from .groups import Groups
log = logging.getLogger(__name__)
class ConfigHelper:
@classmethod
def cog_name(cls, key):
return ''.join(map(str.capitalize, key.split('_')))
CONFIG_GROUP_MAPPINGS = [
('sudo', 'user', 'sudo'),
('sysbot_channels', 'channel', 'sysbots')
]
def __init__(self, bot, config):
self.bot = bot
self.configs = {
'guild': config.pop('guilds', {}),
'channel': config.pop('channels', {}),
'user': config.pop('users', {})
}
self.groups = {
'guild': Groups(config.pop('guild_groups', {}),
config.pop('guild_groups_save', None)),
'channel': Groups(config.pop('channel_groups', {}),
config.pop('channel_groups_save', None)),
'user': Groups(config.pop('user_groups', {}),
config.pop('user_groups_save', {})),
}
# Map some config from root to user/channel groups
for name, group_type, map_to in self.CONFIG_GROUP_MAPPINGS:
self.groups[group_type].update({map_to: config.pop(name, {})})
self.motd = config.pop('motd', 'motd.txt')
# The remaining configs are used to load cogs
self.cog_config = config
self.cog_list = set()
def get_config(self, category, key=None):
raw_config = self.configs[category]
# Filter all non-int keys as global config
config = {k: v for k, v in raw_config if not isinstance(k, int)}
# apply guild specific configs
if key in raw_config:
config.update(raw_config[key])
return config
def get_cog(self, key):
return self.bot.get_cog(self.cog_name(key))
def get_motd(self):
if not self.motd:
return
try:
with open(self.motd, 'r') as f:
motd = f.read().strip()
return motd
except FileNotFoundError:
log.info(f'{self.motd} not found, will not print MOTD.')
def template_variables_base(self, ctx):
result = {'ctx': ctx}
if hasattr(ctx, 'author'):
result.update(
name=ctx.author.name,
mention=ctx.author.mention)
return result
def register_all_cogs(self):
# Load the cogs from config file
for pkg, configs in self.cog_config.items():
for cog_key, args in configs.items():
module_name = f"{pkg}.{cog_key}"
cls_name = self.cog_name(cog_key)
module = import_module(module_name)
if not hasattr(module, cls_name):
log.warn('Unable to load cog %s from package %s!', cls_name, module_name)
continue
cls = getattr(module, cls_name)
# Create a cog instance (with config) and add to the bot
if hasattr(cls, 'Config'):
log.info('Load cog with config: %s', cls_name)
cls_config = getattr(cls, 'Config')
if isinstance(args, dict):
instance = cls(self.bot, cls_config(**args))
elif isinstance(args, list):
instance = cls(self.bot, cls_config(*args))
else:
instance = cls(self.bot, cls_config(args))
else:
log.info('Load cog: %s', cls_name)
instance = cls(self.bot)
self.bot.add_cog(instance)
self.cog_list.add(cls_name)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.apps import apps, AppConfig
class PanopticonConfig(AppConfig):
name = "panopticon.django"
label = "panopticon"
verbose_name = "Panopticon"
def ready(self):
package_names = (a.module.__name__ for a in apps.get_app_configs())
from panopticon.loader import load_healthcheck_modules
load_healthcheck_modules(package_names)
from django.conf import settings
from panopticon.datadog import DataDog
DataDog.configure_settings(settings)
|
nilq/baby-python
|
python
|
import numpy as np
from numba import jit
from scipy.sparse.construct import random
from ..tools import compute_dist
from ._utils import _CheckInputs
from .base import IndependenceTest, IndependenceTestOutput
from scipy.stats import rankdata
class HHG(IndependenceTest):
r"""
Heller Heller Gorfine (HHG) test statistic and p-value.
This is a powerful test for independence based on calculating pairwise
Euclidean distances and associations between these distance matrices. The
test statistic is a function of ranks of these distances, and is
consistent against similar tests
:footcite:p:`hellerConsistentMultivariateTest2013`. It can also operate on multiple
dimensions :footcite:p:`hellerConsistentMultivariateTest2013`.
Parameters
----------
compute_distance : str, callable, or None, default: "euclidean"
A function that computes the distance among the samples within each
data matrix.
Valid strings for ``compute_distance`` are, as defined in
:func:`sklearn.metrics.pairwise_distances`,
- From scikit-learn: [``"euclidean"``, ``"cityblock"``, ``"cosine"``,
``"l1"``, ``"l2"``, ``"manhattan"``] See the documentation for
:mod:`scipy.spatial.distance` for details
on these metrics.
- From scipy.spatial.distance: [``"braycurtis"``, ``"canberra"``,
``"chebyshev"``, ``"correlation"``, ``"dice"``, ``"hamming"``,
``"jaccard"``, ``"kulsinski"``, ``"mahalanobis"``, ``"minkowski"``,
``"rogerstanimoto"``, ``"russellrao"``, ``"seuclidean"``,
``"sokalmichener"``, ``"sokalsneath"``, ``"sqeuclidean"``,
``"yule"``] See the documentation for :mod:`scipy.spatial.distance` for
details on these metrics.
Set to ``None`` or ``"precomputed"`` if ``x`` and ``y`` are already distance
matrices. To call a custom function, either create the distance matrix
before-hand or create a function of the form ``metric(x, **kwargs)``
where ``x`` is the data matrix for which pairwise distances are
calculated and ``**kwargs`` are extra arguments to send to your custom
function.
**kwargs
Arbitrary keyword arguments for ``compute_distance``.
Notes
-----
The statistic can be derived as follows
:footcite:p:`hellerConsistentMultivariateTest2013`:
Let :math:`x` and :math:`y` be :math:`(n, p)` samples of random variables
:math:`X` and :math:`Y`. For every sample :math:`j \neq i`, calculate the
pairwise distances in :math:`x` and :math:`y` and denote this as
:math:`d_x(x_i, x_j)` and :math:`d_y(y_i, y_j)`. The indicator function is
denoted as :math:`\mathbb{1} \{ \cdot \}`. The cross-classification
between these two random variables can be calculated as
.. math::
A_{11} = \sum_{k=1, k \neq i,j}^n
\mathbb{1} \{ d_x(x_i, x_k) \leq d_x(x_i, x_j) \}
\mathbb{1} \{ d_y(y_i, y_k) \leq d_y(y_i, y_j) \}
and :math:`A_{12}`, :math:`A_{21}`, and :math:`A_{22}` are defined
similarly. This is organized within the following table:
+--------------------------------------------+--------------------------------------------+--------------------------------------------+---------------------------+
| | :math:`d_x(x_i, \cdot) \leq d_x(x_i, x_j)` | :math:`d_x(x_i, \cdot) \leq d_x(x_i, x_j)` | |
+--------------------------------------------+--------------------------------------------+--------------------------------------------+---------------------------+
| :math:`d_x(x_i, \cdot) \leq d_x(x_i, x_j)` | :math:`A_{11} (i,j)` | :math:`A_{12} (i,j)` | :math:`A_{1 \cdot} (i,j)` |
+--------------------------------------------+--------------------------------------------+--------------------------------------------+---------------------------+
| :math:`d_x(x_i, \cdot) > d_x(x_i, x_j)` | :math:`A_{21} (i,j)` | :math:`A_{22} (i,j)` | :math:`A_{2 \cdot} (i,j)` |
+--------------------------------------------+--------------------------------------------+--------------------------------------------+---------------------------+
| | :math:`A_{\cdot 1} (i,j)` | :math:`A_{\cdot 2} (i,j)` | :math:`n - 2` |
+--------------------------------------------+--------------------------------------------+--------------------------------------------+---------------------------+
Here, :math:`A_{\cdot 1}` and :math:`A_{\cdot 2}` are the column sums,
:math:`A_{1 \cdot}` and :math:`A_{2 \cdot}` are the row sums, and
:math:`n - 2` is the number of degrees of freedom. From this table, we can
calculate the Pearson's chi squared test statistic using,
.. math::
S(i, j) = \frac{(n-2) (A_{12} A_{21} - A_{11} A_{22})^2}
{A_{1 \cdot} A_{2 \cdot} A_{\cdot 1} A_{\cdot 2}}
and the HHG test statistic is then,
.. math::
\mathrm{HHG}_n (x, y) = \sum_{i=1}^n \sum_{j=1, j \neq i}^n S(i, j)
The p-value returned is calculated using a permutation test using
:math:`hyppo.tools.perm_test`.
The fast version of this test performs a multivariate independence test
based on univariate test statistics :footcite:p:`hellerMultivariateTestsOfAssociation2016`.
The univariate test statistic used is Hoeffding's independence test, derived as follows
:footcite:p:`sasHoeffdingDependenceCoefficient`:
Let :math:`x` and :math:`y` be :math:`(n, p)` samples of random variables
:math:`X` and :math:`Y`. A center point - the center of mass of points in 'X' and 'Y'
- is chosen. For every sample :math:`i`, calculate the distances from the center point
in :math:`x` and :math:`y` and denote this as :math:`d_x(x_i)`
and :math:`d_y(y_i)`. This will create a 1D collection of distances for each
sample group.
From these distances, we can calculate the Hoeffding's dependence score between
the two groups - denoted as :math:`D` - using,
.. math::
D &= \frac{(n-2) (n-3) D_{1} + D_{2} - 2(n-2) D_{3}}
{n (n-1) (n-2) (n-3) (n-4)}
D_{1} &= \sum_{i} (Q_{i}-1) (Q_{i}-2)
D_{2} &= \sum_{i} (R_{i} - 1) (R_{i} - 2) (S_{i} - 1) (S_{i} - 2)
D_{3} &= \sum_{i} {R_{i} - 2} (S_{i} - 2) (Q_{i}-1)
where :math:`R_{i}` is the rank of :math:`x_{i}`,
:math:`D_{i}` is the rank of :math:`y_{i}`,
:math:`Q_{i}` is the bivariate rank = 1 plus the number of points with both x and y
values less than the :math:`i`-th point.
:math:`D` is notably sensitive to ties and gets smaller the more pairs of variables with identical values.
If there are no ties in the data,D ranges between -0.5 and 1, with 1 indicating complete dependence.
:footcite:p:`sasHoeffdingDependenceCoefficient`
The p-value returned is calculated using a permutation test using
:meth:`hyppo.tools.perm_test`.
References
----------
.. footbibliography::
"""
def __init__(self, compute_distance="euclidean", **kwargs):
self.is_distance = False
if not compute_distance:
self.is_distance = True
self.auto = False
IndependenceTest.__init__(self, compute_distance=compute_distance, **kwargs)
def statistic(self, x, y):
r"""
Helper function that calculates the HHG test statistic.
Parameters
----------
x,y : ndarray of float
Input data matrices. ``x`` and ``y`` must have the same number of
samples. That is, the shapes must be ``(n, p)`` and ``(n, q)`` where
`n` is the number of samples and `p` and `q` are the number of
dimensions.
Alternatively, ``x`` and ``y`` can be distance matrices,
where the shapes must both be ``(n, n)``.
For fast version, ``x`` and ``y`` can be 1D collections of distances
from a chosen center point, where the shapes must be ``(n,1)`` or ``(n-1,1)``
depending on choice of center point.
Returns
-------
stat : float
The computed HHG statistic.
"""
distx = x
disty = y
if not self.is_distance:
distx, disty = compute_dist(
x, y, metric=self.compute_distance, **self.kwargs
)
if self.auto:
if not self.is_distance:
distx, disty = _centerpoint_dist(
x, y, metric=self.compute_distance, **self.kwargs
)
stat = hoeffdings(distx, disty)
else:
S = _pearson_stat(distx, disty)
mask = np.ones(S.shape, dtype=bool)
np.fill_diagonal(mask, 0)
stat = np.sum(S[mask])
self.stat = stat
return stat
def test(self, x, y, reps=1000, workers=1, auto=False, random_state=None):
r"""
Calculates the HHG test statistic and p-value.
Parameters
----------
x,y : ndarray of float
Input data matrices. ``x`` and ``y`` must have the same number of
samples. That is, the shapes must be ``(n, p)`` and ``(n, q)`` where
`n` is the number of samples and `p` and `q` are the number of
dimensions.
Alternatively, ``x`` and ``y`` can be distance matrices,
where the shapes must both be ``(n, n)``.
For fast version, ``x`` and ``y`` can be 1D collections of distances
from a chosen center point, where the shapes must be ``(n,1)`` or ``(n-1,1)``
depending on choice of center point.
reps : int, default: 1000
The number of replications used to estimate the null distribution
when using the permutation test used to calculate the p-value.
workers : int, default: 1
The number of cores to parallelize the p-value computation over.
Supply ``-1`` to use all cores available to the Process.
auto : boolean, default: False
Automatically use fast approximation of HHG test. :class:`hyppo.tools.perm_test`
will still be run.
Returns
-------
stat : float
The computed HHG statistic.
pvalue : float
The computed HHG p-value.
Examples
--------
>>> import numpy as np
>>> from hyppo.independence import HHG
>>> x = np.arange(7)
>>> y = x
>>> stat, pvalue = HHG().test(x, y)
>>> '%.1f, %.2f' % (stat, pvalue)
'160.0, 0.00'
In addition, the inputs can be distance matrices. Using this is the,
same as before, except the ``compute_distance`` parameter must be set
to ``None``.
>>> import numpy as np
>>> from hyppo.independence import HHG
>>> x = np.ones((10, 10)) - np.identity(10)
>>> y = 2 * x
>>> hhg = HHG(compute_distance=None)
>>> stat, pvalue = hhg.test(x, y)
>>> '%.1f, %.2f' % (stat, pvalue)
'0.0, 1.00'
"""
check_input = _CheckInputs(x, y, reps=reps)
x, y = check_input()
self.auto = auto
# Fast HHG Test
if self.auto:
distx, disty = _centerpoint_dist(
x, y, metric=self.compute_distance, **self.kwargs
)
self.is_distance = True
stat, pvalue = super(HHG, self).test(
distx, disty, reps, workers, is_distsim=False
)
else:
x, y = compute_dist(x, y, metric=self.compute_distance, **self.kwargs)
self.is_distance = True
stat, pvalue = super(HHG, self).test(x, y, reps, workers)
return IndependenceTestOutput(stat, pvalue)
@jit(nopython=True, cache=True)
def _pearson_stat(distx, disty): # pragma: no cover
"""Calculate the Pearson chi square stats"""
n = distx.shape[0]
S = np.zeros((n, n))
# iterate over all samples in the distance matrix
for i in range(n):
for j in range(n):
if i != j:
a = distx[i, :] <= distx[i, j]
b = disty[i, :] <= disty[i, j]
t11 = np.sum(a * b) - 2
t12 = np.sum(a * (1 - b))
t21 = np.sum((1 - a) * b)
t22 = np.sum((1 - a) * (1 - b))
denom = (t11 + t12) * (t21 + t22) * (t11 + t21) * (t12 + t22)
if denom > 0:
S[i, j] = ((n - 2) * (t12 * t21 - t11 * t22) ** 2) / denom
return S
def hoeffdings(x, y):
"""For fast HHG, calculates the Hoeffding's dependence statistic"""
R = rankdata(x)
S = rankdata(y)
# core processing
N = x.shape
D = _hoeffdings_d_calc(R, S, N)
return D
@jit(nopython=True, cache=True)
def _hoeffdings_d_calc(R, S, N): # pragma: no cover
Q = np.ones(N[0])
for i in range(0, N[0]):
Q[i] = Q[i] + np.sum(np.bitwise_and(R < R[i], S < S[i]))
Q[i] = Q[i] + 1 / 4 * (np.sum(np.bitwise_and(R == R[i], S == S[i])) - 1)
Q[i] = Q[i] + 1 / 2 * (np.sum(np.bitwise_and(R == R[i], S < S[i])))
Q[i] = Q[i] + 1 / 2 * (np.sum(np.bitwise_and(R < R[i], S == S[i])))
D1 = np.sum(np.multiply((Q - 1), (Q - 2)))
D2 = np.sum(
np.multiply(np.multiply((R - 1), (R - 2)), np.multiply((S - 1), (S - 2)))
)
D3 = np.sum(np.multiply(np.multiply((R - 2), (S - 2)), (Q - 1)))
D = (
30
* ((N[0] - 2) * (N[0] - 3) * D1 + D2 - 2 * (N[0] - 2) * D3)
/ (N[0] * (N[0] - 1) * (N[0] - 2) * (N[0] - 3) * (N[0] - 4))
)
return D
def _centerpoint_dist(x, y, metric, **kwargs):
"""Calculate the distance of x and y from center of mass"""
pointer = (np.mean(x, axis=0), np.mean(y, axis=0))
zx, zy = pointer
zx = np.array(zx).reshape(1, -1)
zy = np.array(zy).reshape(1, -1)
xin = np.concatenate((zx, x))
yin = np.concatenate((zy, y))
distx, disty = compute_dist(xin, yin, metric=metric, **kwargs)
# take first row of distance matrix = distance of sample points from center point
distx = np.delete(distx[0], 0)
distx = distx.reshape(-1, 1)
disty = np.delete(disty[0], 0)
disty = disty.reshape(-1, 1)
return distx, disty
|
nilq/baby-python
|
python
|
from django.contrib import admin
from spaweb.models import Customer, ProductCategory, City
from spaweb.models import Product, Order, OrderItem
from spaweb.models import BusinessDirection, Topic
admin.site.register(ProductCategory)
admin.site.register(City)
admin.site.register(BusinessDirection)
admin.site.register(Topic)
class OrderItemInline(admin.TabularInline):
model = OrderItem
extra = 0
list_display = ['product', 'quantity', 'order_cost']
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
list_display = ['registrated_at','is_complete', 'is_digital', 'customer']
inlines = [OrderItemInline]
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_filter = ['category']
class OrderInline(admin.TabularInline):
model = Order
list_display = ['registrated_at', 'is_complete']
@admin.register(Customer)
class CustomerAdmin(admin.ModelAdmin):
inlines = [OrderInline]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Pype custom errors."""
class PypeError(Exception):
"""Custom error."""
pass
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.