id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
5194430 | # Functions
# Write a function that converts a floating point number to a string
# with a specific number of digits past the decimal place.
def solution(f, places):
y = f"{f:.{places}f}"
return y
# Write a function that returns true if a number is between 2 and 7, exclusive.
def solution(n):
if n > 2 and n < 7:
return True
else:
return False
# Return the larger of two characters
def solution(a, b):
if a > b:
return a
else:
return b
# This function will analyze its input and return a string based on it
def solution(n):
if n == 0:
return "none"
elif n == 1:
return "one"
elif n == 2:
return "two"
else:
return "some"
# Return the larger of three numbers
def solution(a, b, c):
if a > b and a > c:
return a
elif b > a and b > c:
return b
elif c > a and c > b:
return c
else:
return a
# Return the largest of six numbers
def solution(a, b, c, d, e, f):
numbers = [a, b, c, d, e, f]
max_num = a
for number in numbers:
if number > max_num:
max_num = number
return max_num
# Return the largest of six numbers
def solution(a, b, c, d, e, f):
numbers = [a, b, c, d, e, f]
max_num = a
for number in numbers:
if number > max_num:
max_num = number
return max_num
# This function will run a set of tests on three numbers.
# If the test passes, the function will return the string "OK".
# Otherwise it returns "NOK"
def solution(a, b, c):
if a > b and b <= c and (a*3) > c:
return "OK"
elif a > b and b <= c and c <= a:
return "OK"
else:
return "NOK"
# If there isn't enough in the account to make the withdrawal, return -1.
# Otherwise return the remaining balance after the withdrawal.
def solution(amount, balance):
if amount > balance:
return -1
else:
return balance - amount
#
| StarcoderdataPython |
3384223 | """Defines class for json schema version 2019-09."""
# builtin
from datetime import datetime
# package
from .version import SchemaVersion
from ..types import JSONSchema
class JSONSchema2019_09(SchemaVersion):
"""Class representing JSON schema version 2019-09."""
def __init__(self) -> None:
"""See class docstring."""
super().__init__(
name="2019-09",
published=datetime(2019, 9, 16),
getSchemaVersion=self._getSchemaVersion,
getId=self._getId,
getTitle=self._getTitle,
getDescription=self._getDescription,
)
def _getSchemaVersion(self, schema: JSONSchema) -> str:
ver: str = schema["$schema"]
return ver
def _getId(self, schema: JSONSchema) -> str:
id: str = schema["$id"]
return id
def _getTitle(self, schema: JSONSchema) -> str:
title: str = schema["title"]
return title
def _getDescription(self, schema: JSONSchema) -> str:
desc: str = schema["description"]
return desc
| StarcoderdataPython |
6672508 | <filename>ensemble.py
""" Ensemble을 수행하는 코드입니다. """
import json
import os.path as p
from collections import defaultdict
import numpy as np
from tqdm.auto import tqdm
from utils.tools import get_args, update_args
from utils.prepare import get_retriever, get_reader, get_dataset
TOPK = 5
MAX_ANSWER_LENGTH = 30
OFFSET_DEFAULT = 0
SPAN_DEFAULT = 0
def postprocess(predictions, key="sp"):
""" 0이 아닌 값들의 최소값을 1로 맞춘다. """
min_value_list = []
for que_id in predictions.keys():
for doc_id in predictions[que_id].keys():
doc_min_score = predictions[que_id][doc_id][key].min()
min_value_list.append(doc_min_score)
best_min = min(min_value_list) + 1
for que_id in predictions.keys():
for doc_id in predictions[que_id].keys():
f_idxs = np.where(predictions[que_id][doc_id][key] != OFFSET_DEFAULT)
predictions[que_id][doc_id][key][f_idxs] += best_min
def offset_postprocess(predictions):
postprocess(predictions, key="sp")
postprocess(predictions, key="ep")
def span_postprocess(predictions):
postprocess(predictions, key="span")
def logit_list_standardization(logit_list):
start_logits, end_logits = [], []
for logits in logit_list:
for logit in logits:
start_logits.append(logit["start_logit"])
end_logits.append(logit["end_logit"])
start_logits = np.array(start_logits)
end_logits = np.array(end_logits)
for logits in logit_list:
for logit in logits:
logit["start_logit"] = (logit["start_logit"] - start_logits.mean()) / start_logits.std()
logit["end_logit"] = (logit["end_logit"] - end_logits.mean()) / end_logits.std()
return logit_list
def update_hard_offsets(start_scores, end_scores, logits):
for logit in logits:
start_scores[logit["offsets"][0]] = max(start_scores[logit["offsets"][0]], logit["start_logit"])
end_scores[logit["offsets"][1]] = max(start_scores[logit["offsets"][1]], logit["end_logit"])
def update_soft_offsets(start_scores, end_scores, logits):
for logit in logits:
start_scores[logit["offsets"][0]] += logit["start_logit"]
end_scores[logit["offsets"][1]] += logit["end_logit"] # pred["text"] = context[offsets[0] : offsets[1]]
def update_spans(span_scores, logits):
for logit in logits:
span_scores[logit["offsets"][0] : logit["offsets"][1]] += logit["start_logit"] + logit["end_logit"] # broadcast
def soft_voting_use_offset(predictions, logits, contexts, document_ids, question_ids):
for logit, context, doc_id, que_id in tqdm(
zip(logits, contexts, document_ids, question_ids), desc="Soft Voting Use Offset"
):
if que_id not in predictions:
predictions[que_id] = dict()
if doc_id not in predictions[que_id]:
predictions[que_id][doc_id] = dict()
predictions[que_id][doc_id]["sp"] = np.zeros(len(context) + 1) + OFFSET_DEFAULT
predictions[que_id][doc_id]["ep"] = np.zeros(len(context) + 1) + OFFSET_DEFAULT
predictions[que_id][doc_id]["context"] = context
start_scores = predictions[que_id][doc_id]["sp"]
end_scores = predictions[que_id][doc_id]["ep"]
update_soft_offsets(start_scores, end_scores, logit)
def hard_voting_use_offset(predictions, logits, contexts, document_ids, question_ids):
for logit, context, doc_id, que_id in tqdm(
zip(logits, contexts, document_ids, question_ids), desc="Soft Voting Use Offset"
):
if que_id not in predictions:
predictions[que_id] = dict()
if doc_id not in predictions[que_id]:
predictions[que_id][doc_id] = dict()
predictions[que_id][doc_id]["sp"] = np.zeros(len(context) + 1) + OFFSET_DEFAULT
predictions[que_id][doc_id]["ep"] = np.zeros(len(context) + 1) + OFFSET_DEFAULT
predictions[que_id][doc_id]["context"] = context
start_scores = predictions[que_id][doc_id]["sp"]
end_scores = predictions[que_id][doc_id]["ep"]
update_hard_offsets(start_scores, end_scores, logit)
def soft_voting_use_span(predictions, logits, contexts, document_ids, question_ids):
for logit, context, doc_id, que_id in tqdm(
zip(logits, contexts, document_ids, question_ids), desc="Soft Voting Use Span"
):
if que_id not in predictions:
predictions[que_id] = dict()
if doc_id not in predictions[que_id]:
predictions[que_id][doc_id] = dict()
predictions[que_id][doc_id]["span"] = np.zeros(len(context) + 1) + SPAN_DEFAULT
predictions[que_id][doc_id]["context"] = context
span_scores = predictions[que_id][doc_id]["span"]
update_spans(span_scores, logit)
def save_offset_ensemble(args, predictions, filename):
ensemble_results = {}
for que_id in predictions.keys():
used_doc = None
best_score = float("-inf")
for doc_id in predictions[que_id].keys():
max_score = predictions[que_id][doc_id]["sp"].max()
if best_score < max_score:
best_score = max_score
used_doc = doc_id
s_offset, e_offset = None, None
s_offset = predictions[que_id][used_doc]["sp"].argmax()
e_offset_start = s_offset + 1
e_offset_end = e_offset_start + args.data.max_answer_length + 1
e_offset = e_offset_start + predictions[que_id][used_doc]["ep"][e_offset_start:e_offset_end].argmax()
ensemble_results[que_id] = predictions[que_id][used_doc]["context"][s_offset:e_offset]
save_path = p.join(args.path.info, filename)
with open(save_path, "w") as f:
f.write(json.dumps(ensemble_results, indent=4, ensure_ascii=False) + "\n")
def save_span_ensemble(args, predictions, filename, percent=75):
ensemble_results = {}
for que_id in predictions.keys():
used_doc = None
best_score = float("-inf")
for doc_id in predictions[que_id].keys():
max_score = predictions[que_id][doc_id]["span"].max()
if best_score < max_score:
best_score = max_score
used_doc = doc_id
peak = np.argmax(predictions[que_id][used_doc]["span"])
sample = predictions[que_id][doc_id]["span"][
max(peak - MAX_ANSWER_LENGTH // 2, 0) : peak + MAX_ANSWER_LENGTH // 2
]
if len(sample) != 0:
sample_75 = np.percentile(sample, percent)
sample = np.array(list(map(lambda x: 0 if x < sample_75 else x, sample)))
sample_index = np.where(sample > 0)
sample_index = (sample_index[0] + peak - 15,) # tuple
s_offset, e_offset = sample_index[0][0], sample_index[0][-1] + 1
ensemble_results[que_id] = predictions[que_id][used_doc]["context"][s_offset:e_offset]
save_path = p.join(args.path.info, filename)
with open(save_path, "w") as f:
f.write(json.dumps(ensemble_results, indent=4, ensure_ascii=False) + "\n")
def run(args, models, eval_answers, datasets):
"""Ensemble을 수행합니다.
1. Soft Voting Use Offset
2. Soft Voting Use Span
3. Hard Voting Use Offset
"""
soft_offset_predictions = defaultdict(dict)
soft_span_predictions = defaultdict(dict)
hard_offset_predictions = defaultdict(dict)
for model_path, strategy in models:
args.model_name_or_path = model_path
args.model.reader_name = "DPR"
if strategy is not None:
args = update_args(args, strategy)
args.retriever.topk = TOPK
reader = get_reader(args, eval_answers=eval_answers)
reader.set_dataset(eval_dataset=datasets["validation"])
trainer = reader.get_trainer()
logit_list, (contexts, document_ids, question_ids) = trainer.get_logits_with_keys(
reader.eval_dataset, datasets["validation"], keys=["context", "context_id", "id"]
)
# Logit Standardization, -1 ~ 1
logit_list = logit_list_standardization(logit_list)
soft_voting_use_offset(soft_offset_predictions, logit_list, contexts, document_ids, question_ids)
hard_voting_use_offset(hard_offset_predictions, logit_list, contexts, document_ids, question_ids)
soft_voting_use_span(soft_span_predictions, logit_list, contexts, document_ids, question_ids)
offset_postprocess(soft_offset_predictions)
offset_postprocess(hard_offset_predictions)
span_postprocess(soft_span_predictions)
filename = "soft_offset_predictions.json"
save_offset_ensemble(args, soft_offset_predictions, filename)
filename = "hard_offset_predictions.json"
save_offset_ensemble(args, hard_offset_predictions, filename)
filename = "soft_span_predictions.json"
save_span_ensemble(args, hard_offset_predictions, filename)
def model_ensemble(args):
""" 직접 모델과 전략 입력해주시면 됩니다! """
MODELS = [
("../input/model_ensemble_checkpoint/gunmo/RD_G04_C01_KOELECTRA_BASE_V3_FINETUNED_95/checkpoint-6000/", None),
(
"../input/model_ensemble_checkpoint/suyeon/KOELECTRA_FINETUNED_TRAIN_KOELECTRA_FINETUNED_95/checkpoint-5400/",
None,
),
("../input/model_ensemble_checkpoint/suyeon/ST05_AtireBM25_95/checkpoint-5000/", None),
("../input/model_ensemble_checkpoint/jonghun/ST101_CNN_95/checkpoint-15100/", "ST101"),
("../input/model_ensemble_checkpoint/jonghun/ST103_CNN_LSTM_95/checkpoint-5500/", "ST103"),
("../input/model_ensemble_checkpoint/jonghun/ST104_CCNN_v2_95/checkpoint-15100/", "ST104"),
("../input/model_ensemble_checkpoint/jonghun/ST106_LSTM_95/checkpoint-1500/", "ST106"),
]
args.retriever.topk = TOPK
args.data.max_answer_length = MAX_ANSWER_LENGTH
args.retriever.model_name = "ATIREBM25_DPRBERT"
args.train.do_predict = True
datasets = get_dataset(args, is_train=False)
retriever = get_retriever(args)
eval_answers = datasets["validation"]
datasets["validation"] = retriever.retrieve(datasets["validation"], topk=args.retriever.topk)["validation"]
run(args, MODELS, eval_answers, datasets)
if __name__ == "__main__":
args = get_args()
model_ensemble(args)
| StarcoderdataPython |
1618312 | <gh_stars>10-100
import falcon
def test_get_returns_hello_name(client):
doc = {"message": "Hello, Bob!"}
result = client.simulate_get("/hello/bob")
assert result.status == falcon.HTTP_OK
assert result.json == doc
| StarcoderdataPython |
226240 | <reponame>samysweb/dnnv<filename>dnnv/nn/transformers/simplifiers/squeeze_gemms.py
import numpy as np
from .base import Simplifier
from ... import operations
from ...graph import OperationGraph
class SqueezeGemms(Simplifier):
def visit_Gemm(self, operation: operations.Gemm) -> operations.Gemm:
if isinstance(operation.a, operations.Gemm) and not operation.transpose_a:
input_op = operation.a
if (
not isinstance(input_op.a, operations.Operation)
or input_op.alpha != 1.0
or input_op.beta != 1.0
):
return operation
a = input_op.a
b_0 = input_op.b.T if input_op.transpose_b else input_op.b
b_1 = operation.b.T if operation.transpose_b else operation.b
b = np.matmul(b_0, b_1)
if input_op.c is not None and operation.c is not None:
c = np.matmul(input_op.c, b_1) + operation.c
elif operation.c is not None:
c = operation.c
elif input_op.c is not None:
c = np.matmul(input_op.c, b_1)
else:
c = None
return operations.Gemm(
a,
b,
c,
transpose_a=input_op.transpose_a,
alpha=operation.alpha,
beta=operation.beta,
)
elif isinstance(operation.b, operations.Gemm):
# TODO : reduce when operation.b is Gemm
return operation
elif isinstance(operation.a, operations.Flatten) and isinstance(
operation.a.x, operations.Conv
):
if operation.transpose_a:
return operation
flatten_op = operation.a
conv_op = flatten_op.x
if conv_op.w.shape[0] != conv_op.w.shape[1]:
return operation
if conv_op.w.shape[2] != conv_op.w.shape[3] and conv_op.shape[2] != 1:
# TODO : handle this case
return operation
input_shape = OperationGraph([conv_op]).output_shape[0]
flat_input_shape = np.product(input_shape[1:])
W = np.zeros((flat_input_shape, flat_input_shape)).astype(operation.b.dtype)
for (b, i, h, w) in np.ndindex(input_shape):
for j in range(input_shape[1]):
k = np.ravel_multi_index((b, i, h, w), input_shape)
l = np.ravel_multi_index((b, j, h, w), input_shape)
W[k, l] = conv_op.w[i, j, 0, 0]
op_b = operation.b
if operation.transpose_b:
op_b = op_b.T
W = W @ op_b
bias = np.tile(conv_op.b, np.product(input_shape[2:]))
bias = bias @ op_b + operation.c
new_flatten_op = operations.Flatten(conv_op.x, axis=flatten_op.axis)
gemm_op = operations.Gemm(new_flatten_op, W, bias)
return gemm_op
return operation
| StarcoderdataPython |
9745567 | N = int(input())
S = [input() for _ in range(N)]
for i in range(N):
S[i] = ''.join(sorted(S[i]))
t = {}
for i in range(N):
if S[i] in t:
t[S[i]] += 1
else:
t[S[i]] = 1
print(sum(t[k] * (t[k] - 1) // 2 for k in t))
| StarcoderdataPython |
6542222 | <gh_stars>1-10
#imports
import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets
from torchvision import transforms, utils
from torchvision.transforms import ToTensor
from torchvision.io import read_image
from torch.utils.data import DataLoader
from PIL import Image
import matplotlib.pyplot as plt
import constants
from constants import IMG_DIR
class OverlapMNISTNDF(Dataset):
'''
Dataset of Overlapping MNIST Images for use in NDFs.
Images returned as (h,w,3) size images.
'''
def __init__(self,
directory: str,
transforms: ToTensor(),
set_name: str,
):
'''
:param directory: path to directory of data
:param transform: makes transforms
:param set name: set name (test, train, val)
'''
assert set_name in ['test', 'train', 'val'], "choose valid set name"
self.img_dir = directory+"/"+set_name
self.transforms = transforms
self.set_name = set_name
if set_name == 'test':
self.set_list = constants.TEST_NAMES
elif set_name == 'train':
self.set_list = constants.TRAIN_NAMES
elif set_name == 'val':
self.set_list = constants.VAL_NAMES
def __len__(self):
if self.set_name == 'train':
return len(self.set_list)*10*1024
else:
return len(self.set_list)*2*1024
def __getitem__(self, idx: int):
if self.set_name == 'train':
img_label, rem = divmod(idx, 1024*10) #returns the img label and file index
else:
img_label, rem = divmod(idx, 1024*2) #returns the img label and file index
folder_name = self.set_list[img_label]
index, pos = divmod(rem,1024) #index of image
file_path = self.img_dir+'/'+folder_name+'/'+str(index)+'_'+folder_name+'.png'
img = Image.open(file_path)
x_coord, y_coord = divmod(pos, 32) #tuple used in NDF
pos = torch.tensor([x_coord,y_coord]).reshape(2,-1)
intensity = torch.tensor(img.getpixel((x_coord,y_coord)))/255
transforms = self.transforms
if transforms is not None:
img = transforms(img)
return ((img, pos), intensity)
if __name__ == '__main__':
data = OverlapMNISTNDF(IMG_DIR, ToTensor(),'train')
print(data[0][0][0].size())
| StarcoderdataPython |
5043148 | <reponame>DsCodeStudio98m0f/DTia-Nugrahab
"""param
Request Parameter
PROJECT: BaoAI Backend
AUTHOR: henry <<EMAIL>>
WEBSITE: http://www.baoai.co
COPYRIGHT: Copyright © 2016-2020 广州源宝网络有限公司 Guangzhou Yuanbao Network Co., Ltd. ( http://www.ybao.org )
LICENSE: Apache-2.0
"""
from flask_marshmallow import base_fields
from marshmallow import validate, ValidationError
from flask_restplus_patched import Parameters, PostFormParameters, JSONParameters
from .schema import *
def validate_length(value):
if len(value) < 3 or len(value) > 30:
raise ValidationError('Length [3-30]')
class AdminLoginParameters(JSONParameters):
username = base_fields.String(required=True, validate=validate_length)
password = base_fields.String(required=True)
class AdminParameters(JSONParameters, AdminSchema):
class Meta(AdminSchema.Meta):
pass
class FindPassParameters(JSONParameters):
email = base_fields.String(required=True, validate=validate.Email(error='Format Error'))
class RefleshTokenParameters(JSONParameters):
rftoken = base_fields.String(required=True)
username = base_fields.String(required=True)
class UidsRidsParameters(JSONParameters):
rids = base_fields.List(base_fields.Integer())
uids = base_fields.List(base_fields.Integer())
| StarcoderdataPython |
8032960 | <filename>setup.py
#!/usr/bin/env python3
from setuptools import setup
setup(
name="pub-sub-demo",
version="1.0",
description="",
packages=["pub_sub_demo"],
include_package_data=True,
zip_safe=False,
install_requires=[
'flask',
'gunicorn',
'requests',
'pyjwt[crypto]',
]
)
| StarcoderdataPython |
1817595 | <filename>pytket/pytket/routing/__init__.py
# Copyright 2019-2022 Cambridge Quantum Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The routing module provides access to the tket :py:class:`Architecture` structure and
methods for modifying circuits to satisfy the architectural constraints. It also
provides acess to the :py:class:`Placement` constructors for relabelling Circuit qubits
and has some methods for routing circuits. This module is provided in binary form during
the PyPI installation.
"""
from pytket._tket.routing import * # type: ignore
| StarcoderdataPython |
8166264 | """
Copyright 2015, Cisco Systems, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: <NAME>, Cisco Systems, Inc.
"""
import os
import glob
import logging
import lxml.etree as ET
from django.conf import settings
from explorer.models import User, UserProfile
from explorer.utils.yang import Compiler
from explorer.utils.dygraph import DYGraph
from explorer.utils.misc import ServerSettings
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class ModuleAdmin:
@staticmethod
def get_modules(username):
"""
Return list of modules available to user + subscribed
"""
logger.debug("ModuleAdmin.get_modules: enter")
modules = ET.Element('modulelist')
user = User.objects.filter(username=username)
mlist = list()
for _file in glob.glob(os.path.join(ServerSettings.yang_path(username), '*.yang')):
mlist.append(os.path.basename(_file))
mlist.sort()
for fname in mlist:
module = ET.Element('module')
module.text = os.path.basename(fname)
name = module.text.split('.yang')[0]
if UserProfile.objects.filter(user=user, module=name).exists():
module.set('subscribed', 'true')
modules.append(module)
logger.info("ModuleAdmin.get_modules: returning (%d) modules .. exit" % len(modules))
return modules
@staticmethod
def find_matching(target, directory, modules):
logger.debug('Searching target %s in %s' % (target, directory))
if not modules:
modules = [os.path.basename(_file) for _file in glob.glob(os.path.join(directory, '*.yang'))]
for module in modules:
if module == target + '.yang':
return os.path.join(directory, module)
if module.startswith(target + '@'):
return os.path.join(directory, module)
return None
@staticmethod
def cxml_path(username, modulename):
_dir = ServerSettings.cxml_path(username)
modules = [os.path.basename(_file) for _file in glob.glob(os.path.join(_dir, '*.xml'))]
for module in modules:
if module == modulename + '.xml':
return os.path.join(_dir, module)
if module.startswith(modulename + '@'):
return os.path.join(_dir, module)
return None
@staticmethod
def get_modulelist(username):
"""
Return list of modules available to user
"""
users = User.objects.filter(username=username)
if not users:
logger.warning("ModuleAdmin.admin_action: Invalid user " + username)
return []
modules = []
files = glob.glob(os.path.join(ServerSettings.cxml_path(username), '*.xml'))
for _file in files:
module = os.path.basename(_file).split('.xml')[0]
if UserProfile.objects.filter(user=users[0], module=module).exists():
modules.append(module)
return modules
@staticmethod
def admin_action(username, payload, request):
logger.info("ModuleAdmin.admin_action: enter (%s -> %s)" % (username, request))
if payload is None:
logger.error('ModuleAdmin.admin_action: invalid payload in request !!')
return False, "Invalid payload !!"
modified = False
modules = ET.fromstring(payload)
if request == 'graph':
return dependencies_graph(username, modules)
users = User.objects.filter(username=username)
if not users:
logger.error("ModuleAdmin.admin_action: invalid user " + username)
return False, 'Unknown User %s !!' % username
user = users[0]
if not ServerSettings.user_aware():
if (request == 'delete') and not user.has_perm('explorer.delete_yangmodel'):
return False, 'User %s does not have permission to delete models!!' % username
for module in modules:
name = module.text.split('.yang')[0]
logger.debug("ModuleAdmin.admin_action: %s -> %s" % (request, name))
# delete modules from user profile
if request in ['delete', 'unsubscribe']:
if UserProfile.objects.filter(user=user, module=name).exists():
profile = UserProfile.objects.filter(user=user, module=name)
profile.delete()
logger.debug('Module %s deleted for user %s' % (module.text, username))
# delete yang and cxml files for delete request
if request == 'delete':
for _type in [('cxml', '.xml'), ('yang', '.yang')]:
_file = os.path.join('data', 'users', username, _type[0], name + _type[1])
if os.path.exists(_file):
os.remove(_file)
modified = True
logging.debug('Deleted %s (user: %s)' % (_file, username))
if request == 'subscribe':
if not is_browsable(username, name):
logger.debug('Module %s can not be subscribed ' % (module.text))
continue
if not UserProfile.objects.filter(user=user, module=name).exists():
profile = UserProfile(user=user, module=name)
profile.save()
logger.debug('User %s subscribed to %s module ..' % (username, module.text))
else:
logger.debug('User %s already subscribed to %s module ' % (username, module.text))
# if any yang model modified, delete dependency file
if modified:
_file = os.path.join(ServerSettings.yang_path(username), 'dependencies.xml')
if os.path.exists(_file):
os.remove(_file)
logger.debug('Deleted dependency file %s (user: %s)' % (_file, username))
return True, None
def dependencies_graph(username, modules=[]):
depfile = os.path.join(ServerSettings.yang_path(username), 'dependencies.xml')
if not os.path.exists(depfile):
(rc, msg) = Compiler.compile_pyimport(username, None)
if not rc:
return rc, msg
dgraph = DYGraph(depfile)
g = dgraph.digraph([m.text.split('.yang')[0] for m in modules])
if g is None:
return (False, """Failed to generate dependency graph, please make sure that grapviz
python package is installed !!""")
try:
g.render(filename=os.path.join(settings.BASE_DIR, 'static', 'graph'))
except:
return (False, """Failed to render dependency graph, please make sure that grapviz
binaries (http://www.graphviz.org/Download.php) are installed on
the server !!""")
return True, g.comment
def is_browsable(username, module):
cxml_path = os.path.join(ServerSettings.cxml_path(username), module + '.xml')
browsable = False
if os.path.exists(cxml_path):
try:
root = ET.parse(cxml_path).getroot()
if root.find('node'):
browsable = True
except:
logger.error('is_browsable: Exception in parse -> ' + cxml_path)
return browsable
| StarcoderdataPython |
6439566 | """
Tests for the cache management code.
"""
import pathlib
import os
import appdirs
import stdpopsim
import tests
class TestSetCacheDir(tests.CacheWritingTest):
"""
Tests the set_cache_dir function.
"""
paths = [
"/somefile", "/some/other/file/", "relative/path", "relative/path/"]
def test_paths(self):
for test in self.paths:
stdpopsim.set_cache_dir(test)
self.assertEqual(stdpopsim.get_cache_dir(), pathlib.Path(test))
stdpopsim.set_cache_dir(pathlib.Path(test))
self.assertEqual(stdpopsim.get_cache_dir(), pathlib.Path(test))
def test_none(self):
stdpopsim.set_cache_dir(None)
cache_dir = pathlib.Path(appdirs.user_cache_dir("stdpopsim", "popgensims"))
self.assertEqual(stdpopsim.get_cache_dir(), cache_dir)
def test_environment_var(self):
try:
for test in self.paths:
os.environ["STDPOPSIM_CACHE"] = test
stdpopsim.set_cache_dir()
self.assertEqual(stdpopsim.get_cache_dir(), pathlib.Path(test))
finally:
os.environ.pop("STDPOPSIM_CACHE")
| StarcoderdataPython |
9689285 | # Databricks notebook source
# MAGIC %pip install great-expectations==0.14.4
# COMMAND ----------
dbutils.widgets.text("loadid", "", "Load Id")
loadid = dbutils.widgets.get("loadid")
# COMMAND ----------
import datetime
import os
from pyspark.sql.functions import col, lit
import ddo_transform.transform as t
import ddo_transform.util as util
load_id = loadid
loaded_on = datetime.datetime.now()
base_path = 'dbfs:/mnt/datalake/data/dw/'
# Read interim cleansed data
parkingbay_sdf = spark.read.table("interim.parking_bay").filter(col('load_id') == lit(load_id))
sensordata_sdf = spark.read.table("interim.sensor").filter(col('load_id') == lit(load_id))
# COMMAND ----------
# MAGIC %md
# MAGIC ### Transform and load Dimension tables
# COMMAND ----------
# Read existing Dimensions
dim_parkingbay_sdf = spark.read.table("dw.dim_parking_bay")
dim_location_sdf = spark.read.table("dw.dim_location")
dim_st_marker = spark.read.table("dw.dim_st_marker")
# Transform
new_dim_parkingbay_sdf = t.process_dim_parking_bay(parkingbay_sdf, dim_parkingbay_sdf, load_id, loaded_on).cache()
new_dim_location_sdf = t.process_dim_location(sensordata_sdf, dim_location_sdf, load_id, loaded_on).cache()
new_dim_st_marker_sdf = t.process_dim_st_marker(sensordata_sdf, dim_st_marker, load_id, loaded_on).cache()
# Load
util.save_overwrite_unmanaged_table(spark, new_dim_parkingbay_sdf, table_name="dw.dim_parking_bay", path=os.path.join(base_path, "dim_parking_bay"))
util.save_overwrite_unmanaged_table(spark, new_dim_location_sdf, table_name="dw.dim_location", path=os.path.join(base_path, "dim_location"))
util.save_overwrite_unmanaged_table(spark, new_dim_st_marker_sdf, table_name="dw.dim_st_marker", path=os.path.join(base_path, "dim_st_marker"))
# COMMAND ----------
# MAGIC %md
# MAGIC ### Transform and load Fact tables
# COMMAND ----------
# Read existing Dimensions
dim_parkingbay_sdf = spark.read.table("dw.dim_parking_bay")
dim_location_sdf = spark.read.table("dw.dim_location")
dim_st_marker = spark.read.table("dw.dim_st_marker")
# Process
nr_fact_parking = t.process_fact_parking(sensordata_sdf, dim_parkingbay_sdf, dim_location_sdf, dim_st_marker, load_id, loaded_on)
# Insert new rows
nr_fact_parking.write.mode("append").insertInto("dw.fact_parking")
# COMMAND ----------
# MAGIC %md
# MAGIC ### Data Quality
# COMMAND ----------
import pandas as pd
from ruamel import yaml
from great_expectations.core.batch import RuntimeBatchRequest
from great_expectations.data_context import BaseDataContext
from great_expectations.data_context.types.base import (
DataContextConfig,
FilesystemStoreBackendDefaults,
)
from pyspark.sql import SparkSession, Row
# Configure root directory
root_directory = "/dbfs/great_expectations/"
data_context_config = DataContextConfig(
store_backend_defaults=FilesystemStoreBackendDefaults(
root_directory=root_directory
),
)
context = BaseDataContext(project_config=data_context_config)
# Datasource configuration
my_spark_datasource_config = {
"name": "transformed_data_source",
"class_name": "Datasource",
"execution_engine": {"class_name": "SparkDFExecutionEngine"},
"data_connectors": {
"transformed_data_connector": {
"module_name": "great_expectations.datasource.data_connector",
"class_name": "RuntimeDataConnector",
"batch_identifiers": [
"environment",
"pipeline_run_id",
],
}
},
}
# create a BatchRequest using the DataAsset we configured earlier to use as a sample of data when creating# Check the Datasource:
context.test_yaml_config(yaml.dump(my_spark_datasource_config))
# Add the Datasource
context.add_datasource(**my_spark_datasource_config)
# create a BatchRequest using the DataAsset (parkingbay_sdf) we configured earlier from parkingbay data
batch_request = RuntimeBatchRequest(
datasource_name="transformed_data_source",
data_connector_name="transformed_data_connector",
data_asset_name="paringbaydataaset", # This can be anything that identifies this data_asset for you
batch_identifiers={
"environment": "stage",
"pipeline_run_id": "pipeline_run_id",
},
runtime_parameters={"batch_data": nr_fact_parking}, # Your dataframe goes here
)
# Define Data Quality metric and run Verification
# create the suite and get a Validator
expectation_suite_name = "Transfomed_data_exception_suite_basic"
context.create_expectation_suite(expectation_suite_name=expectation_suite_name, overwrite_existing=True)
validator = context.get_validator(
batch_request=batch_request,
expectation_suite_name=expectation_suite_name,
)
#print(validator.head())
# validator.list_available_expectation_types()
# https://legacy.docs.greatexpectations.io/en/latest/autoapi/great_expectations/expectations/index.html
# Add Validatons
# https://legacy.docs.greatexpectations.io/en/latest/reference/core_concepts/expectations/standard_arguments.html#meta
validator.expect_column_values_to_not_be_null(column="status")
validator.expect_column_values_to_be_of_type(column="status", type_="StringType")
validator.expect_column_values_to_not_be_null(column="dim_time_id")
validator.expect_column_values_to_be_of_type(column="dim_time_id", type_="IntegerType")
validator.expect_column_values_to_not_be_null(column="dim_parking_bay_id")
validator.expect_column_values_to_be_of_type(column="dim_parking_bay_id", type_="StringType")
#validator.validate()
#validator.list_available_expectation_types() # Check all available expectations
validator.save_expectation_suite(discard_failed_expectations=False)
#validator.validate() # To run run validations without checkpoint
# Configure Checkpoint
my_checkpoint_name = "Transformed Data"
checkpoint_config = {
"name": my_checkpoint_name,
"config_version": 1.0,
"class_name": "SimpleCheckpoint",
"run_name_template": "%Y%m%d-%H%M%S-my-run-name-template",
}
my_checkpoint = context.test_yaml_config(yaml.dump(checkpoint_config,default_flow_style=False))
context.add_checkpoint(**checkpoint_config)
# Run Checkpoint
checkpoint_result = context.run_checkpoint(
checkpoint_name=my_checkpoint_name,
validations=[
{
"batch_request": batch_request,
"expectation_suite_name": expectation_suite_name,
}
],
)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Data Quality Monitoring
# COMMAND ----------
## Report Data Quality Metrics to Azure Monitor using python Azure Monitor open-census exporter
import logging
import time
from opencensus.ext.azure.log_exporter import AzureLogHandler
logger = logging.getLogger(__name__)
logger.addHandler(AzureLogHandler(connection_string=dbutils.secrets.get(scope = "storage_scope", key = "applicationInsightsKey")))
result_dic = checkpoint_result.to_json_dict()
key_name=[key for key in result_dic['_run_results'].keys()][0]
results = result_dic['_run_results'][key_name]['validation_result']['results']
checks = {'check_name':checkpoint_result['checkpoint_config']['name'],'pipelinerunid':loadid}
for i in range(len(results)):
validation_name= results[i]['expectation_config']['expectation_type'] + "_on_" + results[i]['expectation_config']['kwargs']['column']
checks[validation_name]=results[i]['success']
properties = {'custom_dimensions': checks}
if checkpoint_result.success is True:
logger.setLevel(logging.INFO)
logger.info('verifychecks', extra=properties)
else:
logger.setLevel(logging.ERROR)
logger.error('verifychecks', extra=properties)
time.sleep(16)
# COMMAND ----------
dbutils.notebook.exit("success")
| StarcoderdataPython |
6459038 | from hubcheck.pageobjects.basepagewidget import BasePageWidget
from hubcheck.pageobjects.basepageelement import TextReadOnly, Link
class PopularItem(BasePageWidget):
def __init__(self, owner, locatordict={}, item_number=0):
# initialize variables
self.__item_number = item_number
super(PopularItem,self).__init__(owner,locatordict)
# load hub's classes
object_locators = self.load_class('PopularItem_Locators')
# update this object's locator
self.locators.update(object_locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.title = Link(self,{'base':'title'})
self.description = TextReadOnly(self,{'base':'description'})
# update the component's locators with this objects overrides
self._updateLocators()
def _updateLocators(self):
super(PopularItem,self)._updateLocators()
for k,v in self.locators.items():
self.locators[k] = v % self.__item_number
self.update_locators_in_widgets()
def value(self):
"""return a dictionary with the properties of the group"""
return({'title':self.title.text(),'description':self.description.value})
def goto_group(self):
"""click the group title"""
self.title.click()
class PopularItem_Locators_Base(object):
"""locators for PopularItem object"""
locators = {
'base' : "xpath=//*[contains(@class,'group-list')]/../../div[%s]",
'title' : "xpath=//*[contains(@class,'group-list')]/../../div[%s]//*[contains(@class,'details-w-logo')]//h3//a",
'description': "xpath=//*[contains(@class,'group-list')]/../../div[%s]//*[contains(@class,'details-w-logo')]//p",
'logo' : "xpath=//*[contains(@class,'group-list')]/../../div[%s]//*[contains(@class,'logo')]//img",
}
| StarcoderdataPython |
11309877 | from app.common.model.page import Page
class OrderPage(Page):
def __init__(self, orders, page, size, total_count):
super().__init__(page, size, total_count)
self.orders = orders
| StarcoderdataPython |
3318540 | ######################################################################
#
# File: b2sdk/unfinished_large_file.py
#
# Copyright 2019 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
class UnfinishedLargeFile(object):
"""
A structure which represents a version of a file (in B2 cloud).
:ivar str ~.file_id: ``fileId``
:ivar str ~.file_name: full file name (with path)
:ivar str ~.account_id: account ID
:ivar str ~.bucket_id: bucket ID
:ivar str ~.content_type: :rfc:`822` content type, for example ``"application/octet-stream"``
:ivar dict ~.file_info: file info dict
"""
def __init__(self, file_dict):
"""
Initialize from one file returned by ``b2_start_large_file`` or ``b2_list_unfinished_large_files``.
"""
self.file_id = file_dict['fileId']
self.file_name = file_dict['fileName']
self.account_id = file_dict['accountId']
self.bucket_id = file_dict['bucketId']
self.content_type = file_dict['contentType']
self.file_info = file_dict['fileInfo']
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__, self.bucket_id, self.file_name)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| StarcoderdataPython |
9740328 | <filename>plugin.py
# BTH address presence Python Plugin
#
# Author: Herman
#
# BTH address in de format xx:xx:xx:xx:xx:xx
# changed to bluetooth library. much more responsive
"""
<plugin key="bthpresence" name="Bluetooth address presence" author="heggink" version="0.0.2" externallink="https://github.com/heggink/domoticz-bluetooth-ping">
<params>
<param field="Address" label="BTH address" width="1000px" required="true"/>
<param field="Mode1" label="Minutes between check" width="100px" required="true" default="1"/>
<param field="Mode2" label="Minutes for timeout" width="100px" required="true" default="10"/>
<param field="Mode6" label="Debug" width="75px">
<options>
<option label="True" value="Debug"/>
<option label="False" value="Normal" default="true"/>
</options>
</param>
</params>
</plugin>
"""
import Domoticz
import platform
import os
import bluetooth
class BasePlugin:
__MINUTE = 6
__UNIT = 1
def __init__(self):
self.__platform = platform.system()
self.__address = ""
self.__heartbeat = 1
self.__timeout = 1
self.__deftimeout = 1
self.__runAgain = 0
self.__config_ok = False
self.__COMMAND = ""
self.__OPTIONS = ""
return
def onStart(self):
Domoticz.Debug("onStart called")
# Debug
if Parameters["Mode6"] == "Debug":
Domoticz.Debugging(1)
else:
Domoticz.Debugging(0)
# Validate parameters
Domoticz.Debug("Platform: "+self.__platform)
if self.__platform == "Linux":
self.__COMMAND = "l2ping"
self.__OPTIONS = "-c1 -s32 -t1"
ret = os.popen("dpkg -l | grep " + "bluez").read()
pos = ret.find("bluez")
if pos >= 0:
self.__config_ok = True
self.__COMMAND = "sudo " + self.__COMMAND
else:
Domoticz.Error("bluez not found")
return
elif self.__platform == "Windows":
# Not implemented yet
pass
Domoticz.Debug("Command: " + self.__COMMAND + " " + self.__OPTIONS)
# Check parameter for heartbeat. Default is 1. Check every 1 minute for the presence of the defined mac addresses
self.__heartbeat = int(Parameters["Mode1"])
if self.__heartbeat < 1:
self.__heartbeat = 1
# Check parameter for timeout. Default is 10, minimum is 5. After absence of the mac address for 10 minutes, then switch off
self.__deftimeout = int(Parameters["Mode2"])
if self.__deftimeout < 1:
self.__deftimeout = 5
self.__timeout = self.__deftimeout
# Initialize all defined devices
self.__address = Parameters["Address"].lower().strip().replace("-", ":")
if self.__UNIT not in Devices:
Domoticz.Device(Unit=self.__UNIT, Name="BTH Presence", TypeName="Switch", Image=18, Used=1).Create()
DumpConfigToLog()
def onStop(self):
Domoticz.Debug("onStop called")
def onConnect(self, Connection, Status, Description):
Domoticz.Debug("onConnect called")
def onMessage(self, Connection, Data):
Domoticz.Debug("onMessage called")
def onCommand(self, Unit, Command, Level, Hue):
Domoticz.Debug("onCommand called for Unit " + str(Unit) + ": Parameter '" + str(Command) + "', Level: " + str(Level))
def onNotification(self, Name, Subject, Text, Status, Priority, Sound, ImageFile):
Domoticz.Debug("Notification: " + Name + "," + Subject + "," + Text + "," + Status + "," + str(Priority) + "," + Sound + "," + ImageFile)
def onDisconnect(self, Connection):
Domoticz.Debug("onDisconnect called")
def onHeartbeat(self):
Domoticz.Debug("onHeartbeat called")
if not self.__config_ok:
return
self.__runAgain -= 1
if self.__runAgain <= 0:
found = False
# Scan for mac addresses in the network
ret = bluetooth.lookup_name(self.__address, timeout=2)
if (ret != None):
# ret = os.popen(self.__COMMAND + " " + self.__OPTIONS + " " + self.__address).read().lower()
# Domoticz.Debug("address: '" + self.__address + "'")
# pos = ret.find(self.__address)
# Domoticz.Debug("pos: "+str(pos))
# if pos >= 0:
Domoticz.Debug("address: " + self.__address + " found. Timeout: " + str(self.__timeout))
found = True
self.__timeout = self.__deftimeout
else:
# Device not found
self.__timeout -= 1
if self.__timeout > 0:
# Device not timed out yet
Domoticz.Debug("address: " + self.__address + " not timed out yet: "+str(self.__timeout))
found = True
if found:
Domoticz.Debug("An address found or not timed out yet")
UpdateDevice(self.__UNIT, 1, "On")
else:
Domoticz.Debug("No addresses found")
UpdateDevice(self.__UNIT, 0, "Off")
self.__runAgain = self.__MINUTE*self.__heartbeat
global _plugin
_plugin = BasePlugin()
def onStart():
global _plugin
_plugin.onStart()
def onStop():
global _plugin
_plugin.onStop()
def onConnect(Connection, Status, Description):
global _plugin
_plugin.onConnect(Connection, Status, Description)
def onMessage(Connection, Data):
global _plugin
_plugin.onMessage(Connection, Data)
def onCommand(Unit, Command, Level, Hue):
global _plugin
_plugin.onCommand(Unit, Command, Level, Hue)
def onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile):
global _plugin
_plugin.onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile)
def onDisconnect(Connection):
global _plugin
_plugin.onDisconnect(Connection)
def onHeartbeat():
global _plugin
_plugin.onHeartbeat()
################################################################################
# Generic helper functions
################################################################################
def DumpConfigToLog():
for x in Parameters:
if Parameters[x] != "":
Domoticz.Debug("'" + x + "':'" + str(Parameters[x]) + "'")
Domoticz.Debug("Device count: " + str(len(Devices)))
for x in Devices:
Domoticz.Debug("Device: " + str(x) + " - " + str(Devices[x]))
Domoticz.Debug("Device ID: '" + str(Devices[x].ID) + "'")
Domoticz.Debug("Device Name: '" + Devices[x].Name + "'")
Domoticz.Debug("Device nValue: " + str(Devices[x].nValue))
Domoticz.Debug("Device sValue: '" + Devices[x].sValue + "'")
Domoticz.Debug("Device LastLevel: " + str(Devices[x].LastLevel))
for x in Settings:
Domoticz.Debug("Setting: " + str(x) + " - " + str(Settings[x]))
def UpdateDevice(Unit, nValue, sValue, TimedOut=0, AlwaysUpdate=False):
# Make sure that the Domoticz device still exists (they can be deleted) before updating it
if Unit in Devices:
if Devices[Unit].nValue != nValue or Devices[Unit].sValue != sValue or Devices[Unit].TimedOut != TimedOut or AlwaysUpdate:
Devices[Unit].Update(nValue=nValue, sValue=str(sValue), TimedOut=TimedOut)
Domoticz.Debug("Update " + Devices[Unit].Name + ": " + str(nValue) + " - '" + str(sValue) + "'")
| StarcoderdataPython |
5002330 | <filename>instructors/projects-2015/workshop_100515/bubble_sort.py
""" Bubble Sort
"""
import random
import time
def compare_two_numbers(a, b):
""" Return the two numbers as a tuple, smallest to largest.
"""
if a > b:
result = (b, a)
else:
# this covers <=
result = (a, b)
return result
mylist = range(1,21)
random.shuffle(mylist)
print mylist
#mylist = [4,2,3,1]
for j in range(len(mylist)-1):
# lets iterate over the list length minus one times.
for i in range(len(mylist)-j-1):
print mylist
mylist[i], mylist[i+1] = compare_two_numbers(mylist[i], mylist[i+1])
time.sleep(.4)
print mylist
| StarcoderdataPython |
4814575 | <reponame>slp-ntua/slp-labs<filename>lab2/dnn/torch_dataset.py
import os
import kaldi_io
import numpy as np
from torch.utils.data import Dataset
class TorchSpeechDataset(Dataset):
def __init__(self, recipe_dir, ali_dir, dset, feature_context=2):
self.recipe_dir = recipe_dir
self.ali_dir = ali_dir
self.feature_context = feature_context
self.dset = dset
self.feats, self.labels = self.read_data()
self.feats, self.labels, self.uttids, self.end_indices = self.unify_data(self.feats, self.labels)
def read_data(self):
feat_path = os.path.join(self.recipe_dir, 'data', self.dset, 'feats.scp')
label_path = os.path.join(self.recipe_dir, self.ali_dir)
feat_opts = "apply-cmvn --utt2spk=ark:{0} ark:{1} ark:- ark:- |". \
format(
os.path.join(self.recipe_dir, 'data', self.dset, 'utt2spk'),
os.path.join(self.recipe_dir, 'data', self.dset, self.dset + '_cmvn_speaker.ark')
)
feat_opts += " add-deltas --delta-order=2 ark:- ark:- |"
if self.feature_context:
feat_opts += " splice-feats --left-context={0} --right-context={0} ark:- ark:- |". \
format(str(self.feature_context))
label_opts = 'ali-to-pdf'
feats = {k: m for k, m in kaldi_io.read_mat_ark(
'ark:copy-feats scp:{} ark:- | {}'.format(feat_path, feat_opts))}
lab = {k: v for k, v in kaldi_io.read_vec_int_ark(
'gunzip -c {0}/ali*.gz | {1} {0}/final.mdl ark:- ark:-|'.format(label_path, label_opts))
if k in feats}
feats = {k: v for k, v in feats.items() if k in lab}
return feats, lab
def unify_data(self, feats, lab, optional_array=None):
fea_conc = np.concatenate([v for k, v in sorted(feats.items())])
lab_conc = np.concatenate([v for k, v in sorted(lab.items())])
if optional_array:
opt_conc = np.concatenate([v for k, v in sorted(optional_array.items())])
names = [k for k, v in sorted(lab.items())]
end_snt = 0
end_indices = []
for k, v in sorted(lab.items()):
end_snt += v.shape[0]
end_indices.append(end_snt)
lab = lab_conc.astype('int64')
if optional_array:
opt = opt_conc.astype('int64')
return fea_conc, lab, opt, names, end_indices
return fea_conc, lab, names, end_indices
def __getitem__(self, idx):
return self.feats[idx], self.labels[idx]
def __len__(self):
return len(self.labels)
if __name__ == "__main__":
data = TorchSpeechDataset("./", "./exp_tri1_ali_train", "train")
import ipdb; ipdb.set_trace()
| StarcoderdataPython |
295193 | <reponame>jasonjoh/kiota<filename>abstractions/python/kiota/abstractions/api_client_builder.py
from .serialization import (
ParseNodeFactory,
ParseNodeFactoryRegistry,
SerializationWriterFactory,
SerializationWriterFactoryRegistry,
SerializationWriterProxyFactory,
)
from .store import BackingStoreParseNodeFactory, BackingStoreSerializationWriterProxyFactory
def register_default_serializer(factory_class: SerializationWriterFactory) -> None:
"""Registers the default serializer to the registry.
Args:
factory_class (SerializationWriterFactory):the class of the factory to be registered.
"""
base_class = type(factory_class)
serializer = base_class()
SerializationWriterFactoryRegistry().CONTENT_TYPE_ASSOCIATED_FACTORIES[
serializer.get_valid_content_type()] = serializer
def register_default_deserializer(self, factory_class: ParseNodeFactory) -> None:
"""Registers the default deserializer to the registry.
Args:
factory_class (ParseNodeFactory):the class of the factory to be registered.
"""
base_class = type(factory_class)
deserializer = base_class()
ParseNodeFactoryRegistry().CONTENT_TYPE_ASSOCIATED_FACTORIES[
deserializer.get_valid_content_type()] = deserializer
def enable_backing_store_for_serialization_writer_factory(
original: SerializationWriterFactory
) -> SerializationWriterFactory:
"""Enables the backing store on default serialization writers and the given serialization
writer.
Args:
original (SerializationWriterFactory):The serialization writer to enable the backing
store on.
Returns:
SerializationWriterFactory: A new serialization writer with the backing store enabled.
"""
result = original
if isinstance(original, SerializationWriterFactoryRegistry):
enable_backing_store_for_serialization_registry(original)
else:
result = BackingStoreSerializationWriterProxyFactory(original)
enable_backing_store_for_serialization_registry(SerializationWriterFactoryRegistry())
enable_backing_store_for_parse_node_registry(ParseNodeFactoryRegistry())
return result
def enable_backing_store_for_parse_node_factory(original: ParseNodeFactory) -> ParseNodeFactory:
"""Enables the backing store on default parse node factories and the given parse node factory.
Args:
original (ParseNodeFactory):The parse node factory to enable the backing store on.
Returns:
ParseNodeFactory: A new parse node factory with the backing store enabled.
"""
result = original
if isinstance(original, ParseNodeFactoryRegistry):
enable_backing_store_for_parse_node_registry(original)
else:
result = BackingStoreParseNodeFactory(original)
enable_backing_store_for_parse_node_registry(ParseNodeFactoryRegistry())
return result
def enable_backing_store_for_parse_node_registry(registry: ParseNodeFactoryRegistry) -> None:
for key, val in registry.CONTENT_TYPE_ASSOCIATED_FACTORIES.items():
if not isinstance(val, (BackingStoreParseNodeFactory, ParseNodeFactoryRegistry)):
registry.CONTENT_TYPE_ASSOCIATED_FACTORIES[key] = BackingStoreParseNodeFactory(val)
def enable_backing_store_for_serialization_registry(
registry: SerializationWriterFactoryRegistry
) -> None:
for key, val in registry.CONTENT_TYPE_ASSOCIATED_FACTORIES.items():
if not isinstance(
val, (SerializationWriterProxyFactory, SerializationWriterFactoryRegistry)
):
registry.CONTENT_TYPE_ASSOCIATED_FACTORIES[
key] = BackingStoreSerializationWriterProxyFactory(val)
| StarcoderdataPython |
79934 | """
Project Euler Problem 173: https://projecteuler.net/problem=173
We shall define a square lamina to be a square outline with a square "hole" so that
the shape possesses vertical and horizontal symmetry. For example, using exactly
thirty-two square tiles we can form two different square laminae:
With one-hundred tiles, and not necessarily using all of the tiles at one time, it is
possible to form forty-one different square laminae.
Using up to one million tiles how many different square laminae can be formed?
"""
from math import ceil, sqrt
def solution(limit: int = 1000000) -> int:
"""
Return the number of different square laminae that can be formed using up to
one million tiles.
>>> solution(100)
41
"""
answer = 0
for outer_width in range(3, (limit // 4) + 2):
if outer_width**2 > limit:
hole_width_lower_bound = max(ceil(sqrt(outer_width**2 - limit)), 1)
else:
hole_width_lower_bound = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"{solution() = }")
| StarcoderdataPython |
8117142 | #!/usr/bin/env python
''' getbibref.py
Author: <NAME>
Version: 0.2
Small attempt at a program to take a DOI input (unique address for research papers) and
return the Bibtex formatted result from the above website in a copy/pastable form. '''
import urllib.request
from tkinter import *
##This is the logic
# x = urllib.request.urlopen('http://api.crossref.org/works/10.1098/rsta.2010.0348/transform/application/x-bibtex')
# data = x.read()
# print(data)
class MyFirstGUI(Tk):
def __init__(self):
# create main window by calling the __init__ method of parent class Tk
Tk.__init__(self)
self.geometry("600x400")
self.title("DOI to Bibtex Tool")
label1 = Label(self, text="Enter DOI")
label1.pack()
##Give a default, customisable DOI value
self.entry1 = Entry(self, bd=5)
self.entry1.insert(0, '10.1098/rsta.2010.0348')
self.entry1.pack()
submit = Button(self, text ="Submit", command = self.update)
submit.pack()
close_button = Button(self, text="Close", command=self.quit)
close_button.pack()
##Here I want to produce the result of my http request call
self.w = Text(self, relief='flat',
bg = self.cget('bg'),
highlightthickness=0, height=100)
# trick to make disabled text copy/pastable
self.w.bind("<1>", lambda event: self.w.focus_set())
self.w.insert('1.0', "Bibtex Reference Here")
self.w.configure(state="disabled", inactiveselectbackground=self.w.cget("selectbackground"))
self.w.pack()
self.mainloop()
def update_text(self, new_text):
""" update the content of the text widget """
new_text = new_text.decode('unicode-escape').replace("%2F", "/", 1).encode() ##Removes the %2F that replaces the / in the URL
print(type(new_text))
self.w.configure(state='normal')
self.w.delete('1.0', 'end') # clear text
self.w.insert('1.0', new_text) # display new text
self.w.configure(state='disabled')
def update(self):
doi = str(self.entry1.get()) ##Get the user inputted DOI
print(str(self.entry1.get()))
url = 'http://api.crossref.org/works/'+ doi + '/transform/application/x-bibtex'
print(url)
try:
x = urllib.request.urlopen(url)
except urllib.error.URLError as e:
##Show user an error if they put in the wrong DOI
self.update_text(str(e))
else:
##Update the output area to the returned form of the text entry, ideally highlightable for copying
data = x.read()
self.update_text(data)
if __name__ == '__main__':
my_gui = MyFirstGUI()
__author__ = "<NAME>"
__version__ = "0.2" | StarcoderdataPython |
1704639 | <gh_stars>1-10
import numpy as np
import pandas as pd
from sklearn.inspection import permutation_importance
from sklearn.model_selection import KFold
from art.utils import to_categorical
from sklearn.preprocessing import StandardScaler
import math
import sys
import argparse
import os
from elm_model import ExtremeLearningMachine
from sklearn.metrics import recall_score, precision_score, accuracy_score, f1_score
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--threshold", default=10, type=int)
parser.add_argument("-n", "--n_unit", default=600, type=int)
parser.add_argument("-f", "--file_name_path", default="file_name_path", type=str)
args = parser.parse_args()
threshold = args.threshold
n_unit = args.n_unit
file_name = args.file_name_path
def standard_trans(x_train, x_test):
stdsc = StandardScaler()
x_train_std = stdsc.fit_transform(x_train)
# print(stdsc.mean_,stdsc.var_)
x_test_std = stdsc.transform(x_test)
return (
x_train_std.astype(np.float64),
x_test_std.astype(np.float64),
(stdsc.mean_, stdsc.var_),
)
def shuffle_data(x_data, y_data):
n_data = np.shape(y_data)[0]
shuffled_indices = np.arange(n_data)
np.random.shuffle(shuffled_indices)
x_data = x_data[shuffled_indices]
y_data = y_data[shuffled_indices]
return x_data, y_data
def extract_data_balanced(df):
benign = df["label"] == 0
benign[24127:] = False
df = pd.concat([df.loc[benign, :], df.loc[df["label"] == 1, :]], axis=0)
return df
def extract_data_unbalanced_more_malicious(df):
# b:6050 m:19500
benign = df["label"] == 0
benign[6051:] = False
malicious = df["label"] == 1
malicious_index = np.where(malicious == True)
malicious_index = malicious_index[0][:19500]
ind = np.zeros(len(malicious), dtype=bool)
ind[malicious_index] = True
df = pd.concat([df.loc[benign, :], df.loc[ind, :]], axis=0)
return df
def extract_data_unbalanced_more_benign(df):
# m:19500 b:6050
benign = df["label"] == 0
benign[19500:] = False
print(df.loc[benign, :].shape)
malicious = df["label"] == 1
malicious_index = np.where(malicious == True)
malicious_index = malicious_index[0][:6050]
print(len(malicious_index))
ind = np.zeros(len(malicious), dtype=bool)
ind[malicious_index] = True
df = pd.concat([df.loc[benign, :], df.loc[ind, :]], axis=0)
return df
def get_average_result(perm_list):
result = pd.DataFrame(
data=0,
columns=["importances_mean", "importances_std"],
index=list(columns_dic.keys()),
)
for pi in perm_list:
result += pi
result = result / len(perm_list)
print(result)
return result
def compute_permutaion_importance(perm_list, x_train_std, y_train, model, columns_dic):
y_train_one_hot = np.argmax(y_train, axis=1)
result = permutation_importance(
model,
x_train_std,
y_train_one_hot,
scoring="accuracy",
n_repeats=10,
n_jobs=-1,
random_state=71,
)
print(result)
perm_imp_df = pd.DataFrame(
{
"importances_mean": result["importances_mean"],
"importances_std": result["importances_std"],
},
index=list(columns_dic.keys()),
)
perm_list.append(perm_imp_df)
return perm_list
def feature_selection(file_name, threshold, columns):
if os.path.isfile(file_name):
perm_imp_df = pd.read_csv(file_name, index_col=0)
return list(perm_imp_df.index)[:threshold]
else:
if threshold == 25:
return columns
else:
raise ValueError("file not exists")
def output_perm_imp(perm_list, columns_dic, n_unit):
result = get_average_result(perm_list)
perm_imp_df = pd.DataFrame(
{
"importances_mean": result["importances_mean"],
"importances_std": result["importances_std"],
},
index=list(columns_dic.keys()),
)
print(perm_imp_df)
perm_imp_df = perm_imp_df.sort_values("importances_mean", ascending=False)
perm_imp_df.to_csv("./data/perm_imp_nunit{}.csv".format(n_unit))
def Training(x_data, y_data, n_unit, threshold, save_mode, shi_work):
"""
Training a model by the entire dataset and save the trained model and the parameters.
"""
stdsc = StandardScaler()
x_data = stdsc.fit_transform(x_data)
y_data = to_categorical(y_data, 2).astype(np.float64)
model = ExtremeLearningMachine(n_unit=n_unit)
model.fit(X=x_data, y=y_data)
if not shi_work and save_mode:
model.save_weights("./data/elm_threshold{}_nunit{}".format(threshold, n_unit))
mean, var = stdsc.mean_, stdsc.var_
np.savez(
"./data/param_threshold{}_nunit{}".format(threshold, n_unit),
mean=mean,
var=var,
)
elif shi_work and save_mode:
model.save_weights("./data/elm_shi{}".format(n_unit))
mean, var = stdsc.mean_, stdsc.var_
np.savez(
"./data/param_shi",
mean=mean,
var=var,
)
# Define parameters
extraction_dataset_mode = (
"normal" # bengin than malicous("btm") or malicous than bengin ("mtb") or "normal"
)
shi_work = False
save_mode = True
pi_mode = False
####################################
# load data
df = pd.read_csv(file_name)
# Replace nan
df = df.replace(np.nan, 0)
# Select extraction dataset mode
if extraction_dataset_mode == "btm":
df = extract_data_unbalanced_more_benign(df)
elif extraction_dataset_mode == "mtb":
df = extract_data_unbalanced_more_malicious(df)
elif extraction_dataset_mode == "normal":
df = extract_data_balanced(df)
columns_dic = {
column: index for index, column in enumerate(df.drop("label", axis=1).columns)
}
if shi_work:
features = [
"length",
"max_consecutive_chars",
"entropy",
"n_ip",
"n_countries",
"mean_TTL",
"stdev_TTL",
"life_time",
"active_time",
]
else:
features = feature_selection(
"./data/perm_imp_nunit{}.csv".format(n_unit),
threshold=threshold,
columns=list(columns_dic.keys()),
)
df = df.loc[:, features + ["label"]]
x_data = df.drop("label", axis=1).values
y_data = df["label"].values
FOLD_NUM = 5
fold_seed = 71
folds = KFold(n_splits=FOLD_NUM, shuffle=True, random_state=fold_seed)
fold_iter = folds.split(x_data)
perm_list = []
# acc,precision,recall,f1 train
acc_train_total = []
precision_train_total = []
recall_train_total = []
f1_train_total = []
# acc,precision,recall,f1 test
acc_test_total = []
precision_test_total = []
recall_test_total = []
f1_test_total = []
eval_result = {}
for n_fold, (trn_idx, val_idx) in enumerate(fold_iter):
print(f"Fold times:{n_fold}")
x_train, x_test = x_data[trn_idx], x_data[val_idx]
y_train, y_test = y_data[trn_idx], y_data[val_idx]
x_train_std, x_test_std, _ = standard_trans(x_train, x_test)
y_train, y_test = (
to_categorical(y_train, 2).astype(np.float64),
to_categorical(y_test, 2).astype(np.float64),
)
# Training
model = ExtremeLearningMachine(n_unit=n_unit, activation=None)
model.fit(x_train_std, y_train)
# Results
y_train_pred = np.argmax(model.transform(x_train_std), axis=1)
y_train_true = np.argmax(y_train, axis=1)
y_test_pred = np.argmax(model.transform(x_test_std), axis=1)
y_test_true = np.argmax(y_test, axis=1)
# evaluate train
acc_train_total.append(accuracy_score(y_train_true, y_train_pred))
precision_train_total.append(precision_score(y_train_true, y_train_pred))
recall_train_total.append(recall_score(y_train_true, y_train_pred))
f1_train_total.append(f1_score(y_train_true, y_train_pred))
# evaluate test
acc_test_total.append(accuracy_score(y_test_true, y_test_pred))
precision_test_total.append(precision_score(y_test_true, y_test_pred))
recall_test_total.append(recall_score(y_test_true, y_test_pred))
f1_test_total.append(f1_score(y_test_true, y_test_pred))
# permutation importance
if threshold == 25 and pi_mode:
perm_list = compute_permutaion_importance(
perm_list, x_train_std, y_train, model, columns_dic
)
eval_result["train_accuracy"] = np.average(acc_train_total)
eval_result["train_precision"] = np.average(precision_train_total)
eval_result["train_recall"] = np.average(recall_train_total)
eval_result["train_f1"] = np.average(f1_train_total)
eval_result["test_accuracy"] = np.average(acc_test_total)
eval_result["test_precision"] = np.average(precision_test_total)
eval_result["test_recall"] = np.average(recall_test_total)
eval_result["test_f1"] = np.average(f1_test_total)
eval_df = pd.DataFrame.from_dict(eval_result, orient="index")
eval_df = eval_df.rename(columns={0: threshold})
# Output results
if shi_work and save_mode:
eval_df.to_csv("./eval_result/eval_shi_nunit{}.csv".format(n_unit))
elif save_mode:
eval_df.to_csv(
"./eval_unbalanced_more_benign/eval_threshold{}_nunit{}.csv".format(
threshold, n_unit
)
)
if threshold == 25 and pi_mode:
output_perm_imp(perm_list, columns_dic, n_unit)
# Training(x_data, y_data, n_unit, threshold, save_mode=True, shi_work=shi_work) | StarcoderdataPython |
1813457 | <reponame>omnihenry/python
#!/usr/bin/python
# title :ssh_handler.py
# description :This script contains the definition of the class
# that encapsulate the functionalities of remote access
# author :<NAME>
# date :20170820
# version :0.1
# usage :to be imported
# notes :
# python_version :3.6.2
#==============================================================================
from ssh_globals import *
import paramiko
class SSHHandler:
'''Handle SSH connections and remote command executions.'''
def __init__(self):
'''Do nothing here.'''
pass
def connect(self, remote):
'''
Connect to remote host
:param remote: remote host ip
:type remote: str
:returns: successful or not & message
:rtype: tuple of 2 (bool, str)
'''
result_successful, result_message = True, ''
for host, username, password in [conn for conn in CONNECTION_LIST]:
if host == remote:
break;
# try to connect for pre-definded number of times
for idx in range(CONNECTION_ATTEMPT_MAX):
try:
logger.info('Connecting to {} - attempt #{}'.format(host, idx+1))
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(host, username=username, password=password)
except paramiko.AuthenticationException:
result_message = 'Error: Authentication failed when connecting to {}'.format(host)
logger.error(result_message)
result_successful = False
except Exception as e: # Other exceptions
result_message = 'Error: Could not connect to {} - {}'.format(host, e)
logger.error(result_message)
result_successful = False
else: # If it's all good
result_message = 'Successfully connected to {}'.format(host)
logger.info(result_message)
result_successful = True
break
return (result_successful, result_message)
def execute_cmd(self, cmd):
'''
Execute command on connected host.
:param cmd: the command to run
:type cmd: str
:returns: successful or not & message
:rtype: tuple of 2 (bool, str)
'''
logger.info('Executing command: {}'.format(cmd))
result_successful, result_message = True, ''
try:
stdin, stdout, stderr = self.ssh.exec_command(cmd)
except Exception as e: # All exceptions
result_successful = False
result_message = str(e)
else:
# If command was run, catch the result.
err = stderr.read()
if (err):
result_successful = False
result_message = err
logger.info('Failed.')
else:
result_successful = True
result_message = stdout.read()
logger.info('Successful.')
finally:
# Release resources before leaving
stdin.close()
stderr.close()
stdout.close()
return (result_successful, result_message)
def disconnect(self):
'''Disconnect from remote host.'''
if hasattr(self, 'ssh'):
self.ssh.close()
logger.info('Disconnected')
| StarcoderdataPython |
243351 | #funzioni di utilita' (gestione cartelle, etc.
import os
# %% utilities
def creaCartella(currentDir,newFolderName):
newDirPath = os.path.join(currentDir, newFolderName)
try:
os.mkdir(newDirPath)
except OSError:
print(f"Cartella {newFolderName} già esistente")
return currentDir, newDirPath | StarcoderdataPython |
9665796 |
from math import sqrt
def bracket_block(text):
"""
finds start and end of the first bracketed block
"""
istart=text.find('(')
cursor=istart
if cursor != -1:
counter=1
while (cursor<len(text)) and counter>0:
cursor+=1
if text[cursor]=='(':
counter += 1
elif text[cursor]==')':
counter -= 1
return istart, cursor
def nw_split(text):
istart,iend=bracket_block(text)
if istart == -1:
return text.split(',')
else:
# include the opcode at the end
result=[text[istart:iend+2],]
if iend<len(text)-2:
# following text may contain further brackets
result = result + nw_split(text[iend+3:])
if istart>0:
# preceding text does not contain brackets by construction
# so can be readily included without further special splitting
result = text[:istart-1].split(',') + result
return result
def newick2Latex(text, index_start=0):
pieces = {}
output = '**** LATEX *******\n\n'
output += '$ \\dot{S} = %s $ \\newline \\newline \n'%newick2human(text,pieces, index_start=index_start)
coef_list = pieces.items()
coef_list.sort()
# list coeficients (constants) in Latex
for coef_tuple in coef_list:
output += '$ %s = %s $ \\newline \n'%(coef_tuple[0],coef_tuple[1])
output += '\n**** END LATEX *******\n\n'
# list them again for copy paste into Python
for coef_tuple in coef_list:
output += '%s = %s\n'%(coef_tuple[0],coef_tuple[1])
return output
def newick2human(text, pieces = None, SPACEDIM=2, index_start=0):
strings = {'V':'[%s , %s]','A':'(%s + %s)','S':'(%s - %s)','M':'%s %s','D':'%s / %s','Q': '\sqrt{%s}','I':'%s $ for $ %s>0 $ and $ %s $ otherwise $ '}
leaf_ops = {'Q':'\sqrt','E':'e','O':'1/','T':'tanh','L':'log'}
if pieces == None:
pieces = {}
if '(' in text:
opcode = text[-1]
childrentext = text[1:-2]
childtexts = nw_split(childrentext)
if (opcode == 'Q') and ('(' not in childtexts[0]) and ('p' not in childtexts[0]):
newvarname='c_%d'%len([el for el in pieces if 'c' in el])
pieces[newvarname] = '%.2e'%sqrt(float(childtexts[0]))
return newvarname
# print strings[opcode]
# print tuple([newick2human(ct) for ct in childtexts])
if (opcode == 'I'):
# print [el for el in pieces if 'a' in el], len([el for el in pieces if 'a' in el])
newvarname='a_%d'%len([el for el in pieces if 'a' in el])
pieces[newvarname] = 'placeholder'
pieces[newvarname] = strings[opcode]%tuple([newick2human(ct,pieces, index_start=index_start) for ct in [childtexts[1], childtexts[0],childtexts[2] ] ])
return newvarname
return strings[opcode]%tuple([newick2human(ct,pieces, index_start=index_start) for ct in childtexts])
else:
if text in leaf_ops:
return leaf_ops[text]
if 'p' in text:
if text[1:] in [str(e) for e in range(SPACEDIM)]:
return 'S_%d'%(int(text[1:])+index_start)
else:
return 'f_%d'%(int(text[1:])-SPACEDIM+index_start)
else:
newvarname='c_%d'%(len([el for el in pieces if 'c' in el])+index_start)
pieces[newvarname] = '%.5e'%float(text)
return newvarname
| StarcoderdataPython |
4871237 | import torch
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
class ModelTrainer:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@staticmethod
def train(model, criterion, optimizer, trainloader, epochs=5):
criterion = criterion.to(ModelTrainer.device)
model = model.to(ModelTrainer.device)
model.train()
for epoch in range(epochs):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data
inputs = inputs.to(ModelTrainer.device)
labels = labels.to(ModelTrainer.device)
inputs.requires_grad = True
optimizer.zero_grad()
outputs = model(inputs).to(ModelTrainer.device)
loss = criterion(outputs, labels).to(ModelTrainer.device)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 100 == 99 or i == len(trainloader) - 1:
_, predicted = torch.max(outputs.data, 1)
total = labels.size(0)
accuracy = (predicted == labels).sum().item() / total
loss = running_loss / 100
print('Epoch: %d | Batch: %d | Loss: %.3f | Accuracy: %.3f' %
(epoch + 1, i + 1, loss, accuracy))
running_loss = 0.0
print('Finished Training')
return model
@staticmethod
def train_mnist_gan(generator, discriminator, dataloader, lr, latent_dim, epochs=100):
generator = generator.to(ModelTrainer.device)
discriminator = discriminator.to(ModelTrainer.device)
gen_optimizer = torch.optim.Adam(generator.parameters(), lr=lr)
disc_optimizer = torch.optim.Adam(discriminator.parameters(), lr=lr)
adversarial_loss = torch.nn.BCELoss().to(ModelTrainer.device)
Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
batches_done = 0
gen_losses = []
disc_losses = []
for epoch in range(epochs):
for i, (imgs, _) in enumerate(dataloader):
valid = Variable(Tensor(imgs.size(0), 1).fill_(0.9), requires_grad=False).to(ModelTrainer.device)
fake = Variable(Tensor(imgs.size(0), 1).fill_(0.1), requires_grad=False).to(ModelTrainer.device)
real_imgs = Variable(imgs.type(Tensor)).to(ModelTrainer.device)
gen_optimizer.zero_grad()
z = Variable(Tensor(np.random.normal(0, 1, (imgs.shape[0], latent_dim)))).to(ModelTrainer.device)
gen_imgs = generator(z).to(ModelTrainer.device)
predicted_true = discriminator(gen_imgs).to(ModelTrainer.device)
g_loss = adversarial_loss(predicted_true, valid).to(ModelTrainer.device)
g_loss.backward()
gen_optimizer.step()
disc_optimizer.zero_grad()
predicted_true = discriminator(real_imgs).to(ModelTrainer.device)
real_loss = adversarial_loss(predicted_true, valid).to(ModelTrainer.device)
predicted_false = discriminator(gen_imgs.detach()).to(ModelTrainer.device)
fake_loss = adversarial_loss(predicted_false, fake).to(ModelTrainer.device)
disc_loss = (real_loss + fake_loss) / 2
disc_loss.to(ModelTrainer.device)
disc_loss.backward()
disc_optimizer.step()
if i % 100 == 0:
print(f'Epoch: {epoch+1}/{epochs} | Batch: {batches_done % len(dataloader)}/{len(dataloader)} | Discriminator Loss: {disc_loss.item():.3f} | Generator Loss: {g_loss.item():.3f}')
disc_losses.append(disc_loss.item())
gen_losses.append(g_loss)
batches_done += 1
return gen_losses, disc_losses
@staticmethod
def train_evidence(model, criterion, optimizer, trainloader, epochs=30):
criterion = criterion.to(ModelTrainer.device)
model.train()
for epoch in range(epochs):
running_loss = 0.0
running_uncertainty = 0.0
running_evidence = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data
inputs = inputs.to(ModelTrainer.device)
labels = labels.to(ModelTrainer.device)
inputs.requires_grad = True
optimizer.zero_grad()
outputs = model(inputs).to(ModelTrainer.device)
loss = criterion(outputs, labels, epoch, annealing_step=10).to(ModelTrainer.device)
evidence = F.relu(outputs)
alpha = evidence + 1
u = outputs.size(1) / torch.sum(alpha, dim=1, keepdim=True)
loss.backward()
optimizer.step()
running_loss += loss.item()
running_uncertainty += u.mean().item()
running_evidence += torch.sum(evidence, dim=1).mean().item()
if i % 100 == 99 or i == len(trainloader) - 1:
_, predicted = torch.max(outputs.detach(), 1)
total = labels.size(0)
accuracy = (predicted == labels).sum().item() / total
match = (predicted == labels).float()
loss = running_loss / 100
uncertainty = running_uncertainty / 100
evidence = running_evidence / 100
print('Epoch: {:d} | Batch: {:d} | Loss: {:.3f} | Accuracy: {:.1f}% | '
'Evidence: {:.1f} | Uncertainty: {:.3f}'.format
(epoch + 1, i + 1, loss, 100 * accuracy, evidence, uncertainty))
running_loss = 0.0
running_uncertainty = 0.0
running_evidence = 0.0
print('Finished Training')
return model
@staticmethod
def validate(model, criterion, testloader):
running_loss = 0.
accuracy = 0.
total = 0.
model.eval()
criterion = criterion.to(ModelTrainer.device)
with torch.no_grad():
for i, data in enumerate(testloader):
inputs, labels = data
inputs = inputs.to(ModelTrainer.device)
labels = labels.to(ModelTrainer.device)
outputs = model(inputs).to(ModelTrainer.device)
score, predicted = torch.max(outputs, 1)
accuracy += (labels == predicted).sum().item()
total += labels.size()[0]
loss = criterion(outputs, labels).to(ModelTrainer.device)
running_loss += loss.item()
print('Average Test loss: {:.3f}. Accuracy: {:.2f}%'.format(
(running_loss / len(testloader)), (100 * accuracy / total)))
| StarcoderdataPython |
3491724 | """Tasks deferred outside the request context."""
from .article import clean_articles, crawl_articles, pair_article
| StarcoderdataPython |
3319077 | <filename>ros/geometry/tf/src/tf/broadcaster.py<gh_stars>100-1000
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the <NAME> nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import rospy
import tf
import tf.msg
import geometry_msgs.msg
import math
class TransformBroadcaster:
"""
:class:`TransformBroadcaster` is a convenient way to send transformation updates on the ``"/tf"`` message topic.
"""
def __init__(self, queue_size=100):
self.pub_tf = rospy.Publisher("/tf", tf.msg.tfMessage,
queue_size=queue_size)
def sendTransform(self, translation, rotation, time, child, parent):
"""
:param translation: the translation of the transformtion as a tuple (x, y, z)
:param rotation: the rotation of the transformation as a tuple (x, y, z, w)
:param time: the time of the transformation, as a rospy.Time()
:param child: child frame in tf, string
:param parent: parent frame in tf, string
Broadcast the transformation from tf frame child to parent on ROS topic ``"/tf"``.
"""
t = geometry_msgs.msg.TransformStamped()
t.header.frame_id = parent
t.header.stamp = time
t.child_frame_id = child
t.transform.translation.x = translation[0]
t.transform.translation.y = translation[1]
t.transform.translation.z = translation[2]
t.transform.rotation.x = rotation[0]
t.transform.rotation.y = rotation[1]
t.transform.rotation.z = rotation[2]
t.transform.rotation.w = rotation[3]
self.sendTransformMessage(t)
def sendTransformMessage(self, transform):
"""
:param transform: geometry_msgs.msg.TransformStamped
Broadcast the transformation from tf frame child to parent on ROS topic ``"/tf"``.
"""
tfm = tf.msg.tfMessage([transform])
self.pub_tf.publish(tfm)
if __name__ == '__main__':
rospy.init_node('tf_turtle')
tfb = TurtleTFBroadcaster(rospy.get_param('~turtle'))
rospy.spin()
| StarcoderdataPython |
1143 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
'''
gluoncv backbone + multi_gpu
'''
# ------------------------------------------------
VERSION = 'Cascade_FPN_Res50_COCO_1x_20190421_v3'
NET_NAME = 'resnet50_v1d'
ADD_BOX_IN_TENSORBOARD = True
# ---------------------------------------- System_config
ROOT_PATH = os.path.abspath('../')
print(20*"++--")
print(ROOT_PATH)
GPU_GROUP = "0,1,2,3,4,5,6,7"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SHOW_TRAIN_INFO_INTE = 20
SMRY_ITER = 200
SAVE_WEIGHTS_INTE = 80000
SUMMARY_PATH = ROOT_PATH + '/output/summary'
TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'
INFERENCE_IMAGE_PATH = ROOT_PATH + '/tools/inference_image'
INFERENCE_SAVE_PATH = ROOT_PATH + '/tools/inference_results'
if NET_NAME.startswith("resnet"):
weights_name = NET_NAME
elif NET_NAME.startswith("MobilenetV2"):
weights_name = "mobilenet/mobilenet_v2_1.0_224"
else:
raise NotImplementedError
PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'
# ------------------------------------------ Train config
RESTORE_FROM_RPN = False
IS_FILTER_OUTSIDE_BOXES = False
FIXED_BLOCKS = 0 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
USE_07_METRIC = True
CUDA9 = True
EVAL_THRESHOLD = 0.5
RPN_LOCATION_LOSS_WEIGHT = 1.
RPN_CLASSIFICATION_LOSS_WEIGHT = 1.0
FAST_RCNN_LOCATION_LOSS_WEIGHT = 1.0
FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT = 1.0
RPN_SIGMA = 3.0
FASTRCNN_SIGMA = 1.0
MUTILPY_BIAS_GRADIENT = None # 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = None # 10.0 if None, will not clip
EPSILON = 1e-5
MOMENTUM = 0.9
BATCH_SIZE = 1
WARM_SETP = int(0.25 * SAVE_WEIGHTS_INTE)
LR = 5e-4 * 2 * 1.25 * NUM_GPU * BATCH_SIZE
DECAY_STEP = [11*SAVE_WEIGHTS_INTE, 16*SAVE_WEIGHTS_INTE, 20*SAVE_WEIGHTS_INTE] # 50000, 70000
MAX_ITERATION = 20*SAVE_WEIGHTS_INTE
# -------------------------------------------- Data_preprocess_config
DATASET_NAME = 'coco' # 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
PIXEL_MEAN_ = [0.485, 0.456, 0.406]
PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = 800
IMG_MAX_LENGTH = 1333
CLASS_NUM = 80
# --------------------------------------------- Network_config
INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01)
BBOX_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.001)
WEIGHT_DECAY = 0.00004 if NET_NAME.startswith('Mobilenet') else 0.0001
IS_ASSIGN = True
# ---------------------------------------------Anchor config
USE_CENTER_OFFSET = True
LEVLES = ['P2', 'P3', 'P4', 'P5', 'P6']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE_LIST = [4, 8, 16, 32, 64]
ANCHOR_SCALES = [1.0]
ANCHOR_RATIOS = [0.5, 1., 2.0]
ROI_SCALE_FACTORS = [[10., 10., 5.0, 5.0], [20., 20., 10.0, 10.0], [40., 40., 20.0, 20.0]]
ANCHOR_SCALE_FACTORS = [10., 10., 5.0, 5.0]
# --------------------------------------------FPN config
SHARE_HEADS = True
KERNEL_SIZE = 3
RPN_IOU_POSITIVE_THRESHOLD = 0.7
RPN_IOU_NEGATIVE_THRESHOLD = 0.3
TRAIN_RPN_CLOOBER_POSITIVES = False
RPN_MINIBATCH_SIZE = 256
RPN_POSITIVE_RATE = 0.5
RPN_NMS_IOU_THRESHOLD = 0.7
RPN_TOP_K_NMS_TRAIN = 12000
RPN_MAXIMUM_PROPOSAL_TARIN = 2000
RPN_TOP_K_NMS_TEST = 6000
RPN_MAXIMUM_PROPOSAL_TEST = 1000
# -------------------------------------------Fast-RCNN config
ROI_SIZE = 14
ROI_POOL_KERNEL_SIZE = 2
USE_DROPOUT = False
KEEP_PROB = 1.0
SHOW_SCORE_THRSHOLD = 0.6 # only show in tensorboard
FAST_RCNN_NMS_IOU_THRESHOLD = 0.5 # 0.6
FAST_RCNN_NMS_MAX_BOXES_PER_CLASS = 100
FAST_RCNN_IOU_POSITIVE_THRESHOLD = 0.5
FAST_RCNN_IOU_NEGATIVE_THRESHOLD = 0.0 # 0.1 < IOU < 0.5 is negative
FAST_RCNN_MINIBATCH_SIZE = 512 # if is -1, that is train with OHEM
FAST_RCNN_POSITIVE_RATE = 0.25
ADD_GTBOXES_TO_TRAIN = False
| StarcoderdataPython |
232095 | import os
from os import path
from exechelper import func_exec_run
localdir = path.join(path.dirname(path.dirname(path.dirname(__file__))), 'storage')
usearch = path.join(path.abspath(path.dirname(__file__)), path.join('bin', 'usearch10.0.240_i86linux32'))
def run_usearch(*args):
cmdargs = ["-" + args[0]]
cmdargs.append(path.join(localdir, args[1]))
output_file = ''
if len(args) > 2:
out_opt = args[2]
cmdargs.append('-' + out_opt)
output_file = path.join(localdir, args[3])
cmdargs.append(output_file)
for arg in args[4:]:
cmdargs.append('-' + arg)
func_exec_run(usearch, *cmdargs)
return output_file
| StarcoderdataPython |
8084456 | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
def set_parameter_requires_grad(model, feature_extracting, trainable_layers):
if feature_extracting:
for name, param in model.named_parameters():
print(name)
if name not in trainable_layers:
param.requires_grad = False
def init_layer(layer):
"""Initialize a Linear or Convolutional layer.
Ref: He, Kaiming, et al. "Delving deep into rectifiers: Surpassing
human-level performance on imagenet classification." Proceedings of the
IEEE international conference on computer vision. 2015.
"""
if layer.weight.ndimension() == 4:
(n_out, n_in, height, width) = layer.weight.size()
n = n_in * height * width
elif layer.weight.ndimension() == 2:
(n_out, n) = layer.weight.size()
std = math.sqrt(2. / n)
scale = std * math.sqrt(3.)
layer.weight.data.uniform_(-scale, scale)
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
#################################################################
# Baseline
class BaselineCnn(nn.Module):
def __init__(self, class_num):
super(BaselineCnn, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64,
kernel_size=5, stride=1,
padding=2, bias=False)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=5, stride=1,
padding=2, bias=False)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256,
kernel_size=5, stride=1,
padding=2, bias=False)
self.fc1 = nn.Linear(256, class_num, bias=True)
self.bn0 = nn.BatchNorm2d(64)
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
self.init_weights()
def init_weights(self):
init_bn(self.bn0)
init_layer(self.conv1)
init_layer(self.conv2)
init_layer(self.conv3)
init_layer(self.fc1)
init_bn(self.bn1)
init_bn(self.bn2)
init_bn(self.bn3)
def forward(self, input):
(_, seq_len, mel_bins) = input.shape
x = input.view(-1, 1, seq_len, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
x = F.relu(self.bn1(self.conv1(x)))
x = F.max_pool2d(x, kernel_size=2)
x = F.relu(self.bn2(self.conv2(x)))
x = F.max_pool2d(x, kernel_size=2)
x = F.relu(self.bn3(self.conv3(x)))
x = F.max_pool2d(x, kernel_size=2)
x = F.max_pool2d(x, kernel_size=x.shape[2:])
x = F.dropout(x, p=0.3, training=self.training)
x = x.view(x.shape[0:2])
x = self.fc1(x)
return x
###############################################################
# VGG
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class VggishConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(VggishConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=3, stride=1,
padding=1, bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=3, stride=1,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weights()
def init_weights(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input):
x = input
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.max_pool2d(x, kernel_size=2)
return x
class Vggish(nn.Module):
def __init__(self, classes_num):
super(Vggish, self).__init__()
self.conv_block1 = VggishConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = VggishConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = VggishConvBlock(in_channels=128, out_channels=256)
self.bn0 = nn.BatchNorm2d(64)
self.fc_final = nn.Linear(256, classes_num, bias=True)
self.init_weights()
def init_weights(self):
init_layer(self.fc_final)
init_bn(self.bn0)
def forward(self, input):
(_, seq_len, mel_bins) = input.shape
x = input.view(-1, 1, seq_len, mel_bins)
'''(samples_num, feature_maps, time_steps, freq_num)'''
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
x = self.conv_block1(x)
x = self.conv_block2(x)
x = self.conv_block3(x)
x = F.max_pool2d(x, kernel_size=x.shape[2:])
x = F.dropout(x, p=0.3, training=self.training)
x = x.view(x.shape[0:2])
x = self.fc_final(x)
return x
#################################################################
# ResNet
class ResNetBlock(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super(ResNetBlock, self).__init__()
self.conv1 = nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=stride, padding=1, bias=False)
self.conv2 = nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=False)
self.conv3 = nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=stride, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(out_ch)
self.bn2 = nn.BatchNorm2d(out_ch)
self.bn3 = nn.BatchNorm2d(out_ch)
self.init_weights()
def init_weights(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_layer(self.conv3)
init_bn(self.bn1)
init_bn(self.bn2)
init_bn(self.bn3)
def forward(self, x):
identity = x
x = F.relu(self.bn1(self.conv1(x)))
x = self.bn2(self.conv2(x))
identity = self.bn3(self.conv3(identity))
x += identity
x = F.relu(x)
x = F.max_pool2d(x, kernel_size=2)
return x
class ResNet(nn.Module):
def __init__(self, class_num):
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.resblock1 = ResNetBlock(64, 128, 1)
self.resblock2 = ResNetBlock(128, 256, 2)
self.fc1 = nn.Linear(256, class_num, bias=True)
self.bn0 = nn.BatchNorm2d(64)
self.init_weights()
def init_weights(self):
init_bn(self.bn0)
init_layer(self.conv1)
init_layer(self.fc1)
init_bn(self.bn1)
def forward(self, input):
(_, seq_len, mel_bins) = input.shape
x = input.view(-1, 1, seq_len, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
x = F.relu(self.bn1(self.conv1(x)))
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
x = self.resblock1(x)
x = self.resblock2(x)
x = F.max_pool2d(x, kernel_size=x.shape[2:])
x = F.dropout(x, p=0.3, training=self.training)
x = x.view(x.shape[0:2])
x = self.fc1(x)
return x
#################################################################
# MobileNet
class MobileNetBlock(nn.Module):
def __init__(self, in_ch, out_ch, stride=1):
super(MobileNetBlock, self).__init__()
self.conv1 = nn.Conv2d(in_ch, in_ch, kernel_size=3, stride=stride, padding=1, groups=in_ch, bias=False)
self.bn1 = nn.BatchNorm2d(in_ch)
self.conv2 = nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_ch)
self.bn1 = nn.BatchNorm2d(in_ch)
self.bn2 = nn.BatchNorm2d(out_ch)
self.init_weights()
def init_weights(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
return x
class MobileNet(nn.Module):
def __init__(self, class_num):
super(MobileNet, self).__init__()
self.conv1 = nn.Conv2d(1, 64, kernel_size =3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.mobileblock1 = MobileNetBlock(64, 128, 1)
self.mobileblock2 = MobileNetBlock(128, 256, 2)
self.fc1 = nn.Linear(256, class_num, bias=True)
self.bn0 = nn.BatchNorm2d(64)
self.init_weights()
def init_weights(self):
init_bn(self.bn0)
init_layer(self.fc1)
init_layer(self.conv1)
init_bn(self.bn1)
def forward(self, input):
(_, seq_len, mel_bins) = input.shape
x = input.view(-1, 1, seq_len, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
x = F.relu(self.bn1(self.conv1(x)))
x = self.mobileblock1(x)
x = self.mobileblock2(x)
x = F.max_pool2d(x, kernel_size=x.shape[2:])
x = F.dropout(x, p=0.15, training=self.training)
x = x.view(x.shape[0:2])
x = self.fc1(x)
return x
| StarcoderdataPython |
5041438 | import numpy as np
################################################################################
# #
# Base class for robots #
# - robot has only 3 actions #
# - forward, turn left, turn right #
# - robot must provide an observation of continous distances to objects #
# - if readings are 90 degrees apart, observation space = 360/90 = 4 #
# - if readings are 15 degrees apart, observation space = 360/15 = 24 #
# #
################################################################################
class QBot:
def __init__(self, sensor_sectors, degrees_per_sensor_sector, turn_sectors):
self.sensor_sectors = sensor_sectors
self.degrees_per_sensor_sector = degrees_per_sensor_sector
self.turn_sectors = turn_sectors
def action_space(self): # forward, turn left, turn right
return 3 # turn left, turn right, move forward
def observation_space(self):
return self.sensor_sectors + 1 # the number of sensor readings per sweep
# plus 1 for 'no object detected'
def sample(self):
return np.random.randint(self.action_space())
| StarcoderdataPython |
6427383 | <filename>src/asm/schedule/tests/test_filter.py<gh_stars>0
import unittest
import asm.schedule.schedule
class ScheduleFilterTests(unittest.TestCase):
def test_filter_all_days_all_categories(self):
asm.schedule.schedule.Schedule()
| StarcoderdataPython |
3474542 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import numpy as np
import scipy.linalg as spl
from ...externals.transforms3d.quaternions import mat2quat, quat2axangle
from .transform import Transform
from ..utils.affines import apply_affine
# Globals
RADIUS = 100
MAX_ANGLE = 1e10 * 2 * np.pi
SMALL_ANGLE = 1e-30
MAX_DIST = 1e10
LOG_MAX_DIST = np.log(MAX_DIST)
TINY = float(np.finfo(np.double).tiny)
def threshold(x, th):
return np.maximum(np.minimum(x, th), -th)
def rotation_mat2vec(R):
""" Rotation vector from rotation matrix `R`
Parameters
----------
R : (3,3) array-like
Rotation matrix
Returns
-------
vec : (3,) array
Rotation vector, where norm of `vec` is the angle ``theta``, and the
axis of rotation is given by ``vec / theta``
"""
ax, angle = quat2axangle(mat2quat(R))
return ax * angle
def rotation_vec2mat(r):
"""
R = rotation_vec2mat(r)
The rotation matrix is given by the Rodrigues formula:
R = Id + sin(theta)*Sn + (1-cos(theta))*Sn^2
with:
0 -nz ny
Sn = nz 0 -nx
-ny nx 0
where n = r / ||r||
In case the angle ||r|| is very small, the above formula may lead
to numerical instabilities. We instead use a Taylor expansion
around theta=0:
R = I + sin(theta)/tetha Sr + (1-cos(theta))/teta2 Sr^2
leading to:
R = I + (1-theta2/6)*Sr + (1/2-theta2/24)*Sr^2
To avoid numerical instabilities, an upper threshold is applied to
the angle. It is chosen to be a multiple of 2*pi, hence the
resulting rotation is then the identity matrix. This strategy warrants
that the output matrix is a continuous function of the input vector.
"""
theta = np.sqrt(np.sum(r ** 2))
if theta > MAX_ANGLE:
return np.eye(3)
elif theta > SMALL_ANGLE:
n = r / theta
Sn = np.array([[0, -n[2], n[1]], [n[2], 0, -n[0]], [-n[1], n[0], 0]])
R = np.eye(3) + np.sin(theta) * Sn\
+ (1 - np.cos(theta)) * np.dot(Sn, Sn)
else:
Sr = np.array([[0, -r[2], r[1]], [r[2], 0, -r[0]], [-r[1], r[0], 0]])
theta2 = theta * theta
R = np.eye(3) + (1 - theta2 / 6.) * Sr\
+ (.5 - theta2 / 24.) * np.dot(Sr, Sr)
return R
def matrix44(t, dtype=np.double):
"""
T = matrix44(t)
t is a vector of of affine transformation parameters with size at
least 6.
size < 6 ==> error
size == 6 ==> t is interpreted as translation + rotation
size == 7 ==> t is interpreted as translation + rotation +
isotropic scaling
7 < size < 12 ==> error
size >= 12 ==> t is interpreted as translation + rotation +
scaling + pre-rotation
"""
size = t.size
T = np.eye(4, dtype=dtype)
R = rotation_vec2mat(t[3:6])
if size == 6:
T[0:3, 0:3] = R
elif size == 7:
T[0:3, 0:3] = t[6] * R
else:
S = np.diag(np.exp(threshold(t[6:9], LOG_MAX_DIST)))
Q = rotation_vec2mat(t[9:12])
# Beware: R*s*Q
T[0:3, 0:3] = np.dot(R, np.dot(S, Q))
T[0:3, 3] = threshold(t[0:3], MAX_DIST)
return T
def preconditioner(radius):
"""
Computes a scaling vector pc such that, if p=(u,r,s,q) represents
affine transformation parameters, where u is a translation, r and
q are rotation vectors, and s is the vector of log-scales, then
all components of (p/pc) are roughly comparable to the translation
component.
To that end, we use a `radius` parameter which represents the
'typical size' of the object being registered. This is used to
reformat the parameter vector
(translation+rotation+scaling+pre-rotation) so that each element
roughly represents a variation in mm.
"""
rad = 1. / radius
sca = 1. / radius
return np.array([1, 1, 1, rad, rad, rad, sca, sca, sca, rad, rad, rad])
def inverse_affine(affine):
return spl.inv(affine)
def subgrid_affine(affine, slices):
steps = map(lambda x: max(x, 1), [s.step for s in slices])
starts = map(lambda x: max(x, 0), [s.start for s in slices])
t = np.diag(np.concatenate((steps, [1]), 1))
t[0:3, 3] = starts
return np.dot(affine, t)
class Affine(Transform):
param_inds = range(12)
def __init__(self, array=None, radius=RADIUS):
self._direct = True
self._precond = preconditioner(radius)
if array == None:
self._vec12 = np.zeros(12)
elif array.size == 12:
self._vec12 = array.ravel().copy()
elif array.shape == (4, 4):
self.from_matrix44(array)
else:
raise ValueError('Invalid array')
def copy(self):
new = self.__class__()
new._direct = self._direct
new._precond[:] = self._precond[:]
new._vec12 = self._vec12.copy()
return new
def from_matrix44(self, aff):
"""
Convert a 4x4 matrix describing an affine transform into a
12-sized vector of natural affine parameters: translation,
rotation, log-scale, pre-rotation (to allow for shearing when
combined with non-unitary scales). In case the transform has a
negative determinant, set the `_direct` attribute to False.
"""
vec12 = np.zeros((12,))
vec12[0:3] = aff[:3, 3]
# Use SVD to find orthogonal and diagonal matrices such that
# aff[0:3,0:3] == R*S*Q
R, s, Q = spl.svd(aff[0:3, 0:3])
if spl.det(R) < 0:
R = -R
Q = -Q
r = rotation_mat2vec(R)
if spl.det(Q) < 0:
Q = -Q
self._direct = False
q = rotation_mat2vec(Q)
vec12[3:6] = r
vec12[6:9] = np.log(np.maximum(s, TINY))
vec12[9:12] = q
self._vec12 = vec12
def apply(self, xyz):
return apply_affine(self.as_affine(), xyz)
def _get_param(self):
param = self._vec12 / self._precond
return param[self.param_inds]
def _set_param(self, p):
p = np.asarray(p)
inds = self.param_inds
self._vec12[inds] = p * self._precond[inds]
def _get_translation(self):
return self._vec12[0:3]
def _set_translation(self, x):
self._vec12[0:3] = x
def _get_rotation(self):
return self._vec12[3:6]
def _set_rotation(self, x):
self._vec12[3:6] = x
def _get_scaling(self):
return np.exp(self._vec12[6:9])
def _set_scaling(self, x):
self._vec12[6:9] = np.log(x)
def _get_pre_rotation(self):
return self._vec12[9:12]
def _set_pre_rotation(self, x):
self._vec12[9:12] = x
def _get_direct(self):
return self._direct
def _get_precond(self):
return self._precond
translation = property(_get_translation, _set_translation)
rotation = property(_get_rotation, _set_rotation)
scaling = property(_get_scaling, _set_scaling)
pre_rotation = property(_get_pre_rotation, _set_pre_rotation)
is_direct = property(_get_direct)
precond = property(_get_precond)
param = property(_get_param, _set_param)
def as_affine(self, dtype='double'):
T = matrix44(self._vec12, dtype=dtype)
if not self._direct:
T[:3, :3] *= -1
return T
def compose(self, other):
""" Compose this transform onto another
Parameters
----------
other : Transform
transform that we compose onto
Returns
-------
composed_transform : Transform
a transform implementing the composition of self on `other`
"""
# If other is not an Affine, use either its left compose
# method, if available, or the generic compose method
if not hasattr(other, 'as_affine'):
if hasattr(other, 'left_compose'):
return other.left_compose(self)
else:
return Transform(self.apply).compose(other)
# Affine case: choose more capable of input types as output
# type
other_aff = other.as_affine()
self_inds = set(self.param_inds)
other_inds = set(other.param_inds)
if self_inds.issubset(other_inds):
klass = other.__class__
elif other_inds.isssubset(self_inds):
klass = self.__class__
else: # neither one contains capabilities of the other
klass = Affine
a = klass()
a._precond[:] = self._precond[:]
a.from_matrix44(np.dot(self.as_affine(), other_aff))
return a
def __str__(self):
string = 'translation : %s\n' % str(self.translation)
string += 'rotation : %s\n' % str(self.rotation)
string += 'scaling : %s\n' % str(self.scaling)
string += 'pre-rotation: %s' % str(self.pre_rotation)
return string
def inv(self):
"""
Return the inverse affine transform.
"""
a = self.__class__()
a._precond[:] = self._precond[:]
a.from_matrix44(spl.inv(self.as_affine()))
return a
class Affine2D(Affine):
param_inds = [0, 1, 5, 6, 7, 11]
class Rigid(Affine):
param_inds = range(6)
def from_matrix44(self, aff):
"""
Convert a 4x4 matrix describing a rigid transform into a
12-sized vector of natural affine parameters: translation,
rotation, log-scale, pre-rotation (to allow for pre-rotation
when combined with non-unitary scales). In case the transform
has a negative determinant, set the `_direct` attribute to
False.
"""
vec12 = np.zeros((12,))
vec12[:3] = aff[:3, 3]
R = aff[:3, :3]
if spl.det(R) < 0:
R = -R
self._direct = False
vec12[3:6] = rotation_mat2vec(R)
vec12[6:9] = 0.0
self._vec12 = vec12
def __str__(self):
string = 'translation : %s\n' % str(self.translation)
string += 'rotation : %s\n' % str(self.rotation)
return string
class Rigid2D(Rigid):
param_inds = [0, 1, 5]
class Similarity(Affine):
param_inds = range(7)
def from_matrix44(self, aff):
"""
Convert a 4x4 matrix describing a similarity transform into a
12-sized vector of natural affine parameters: translation,
rotation, log-scale, pre-rotation (to allow for pre-rotation
when combined with non-unitary scales). In case the transform
has a negative determinant, set the `_direct` attribute to
False.
"""
vec12 = np.zeros((12,))
vec12[:3] = aff[:3, 3]
## A = s R ==> det A = (s)**3 ==> s = (det A)**(1/3)
A = aff[:3, :3]
detA = spl.det(A)
s = np.maximum(np.abs(detA) ** (1 / 3.), TINY)
if detA < 0:
A = -A
self._direct = False
vec12[3:6] = rotation_mat2vec(A / s)
vec12[6:9] = np.log(s)
self._vec12 = vec12
def _set_param(self, p):
p = np.asarray(p)
self._vec12[range(9)] =\
(p[[0, 1, 2, 3, 4, 5, 6, 6, 6]] * self._precond[range(9)])
param = property(Affine._get_param, _set_param)
def __str__(self):
string = 'translation : %s\n' % str(self.translation)
string += 'rotation : %s\n' % str(self.rotation)
string += 'scaling : %s\n' % str(self.scaling[0])
return string
class Similarity2D(Similarity):
param_inds = [0, 1, 5, 6]
def _set_param(self, p):
p = np.asarray(p)
self._vec12[[0, 1, 5, 6, 7, 8]] =\
(p[[0, 1, 2, 3, 3, 3]] * self._precond[[0, 1, 5, 6, 7, 8]])
param = property(Similarity._get_param, _set_param)
affine_transforms = {'affine': Affine,
'affine2d': Affine2D,
'similarity': Similarity,
'similarity2d': Similarity2D,
'rigid': Rigid,
'rigid2d': Rigid2D}
| StarcoderdataPython |
4898004 | <filename>uimnet/algorithms/due.py<gh_stars>1-10
#
# # Copyright (c) 2021 Facebook, inc. and its affiliates. All Rights Reserved
#
#
import gpytorch
import torch
import numpy as np
from uimnet import utils
from uimnet.algorithms.base import Algorithm
import torchvision
import torch.cuda.amp as amp
from sklearn import cluster
class GP(gpytorch.models.ApproximateGP):
def __init__(self,
num_outputs,
some_features,
kernel="RBF",
num_inducing_points=20):
batch_shape = torch.Size([num_outputs])
initial_inducing_points = self.cluster_(
some_features, num_inducing_points)
initial_lengthscale = torch.pdist(some_features).mean() / 2
variational_distribution = \
gpytorch.variational.CholeskyVariationalDistribution(
num_inducing_points, batch_shape=batch_shape)
variational_strategy = \
gpytorch.variational.IndependentMultitaskVariationalStrategy(
gpytorch.variational.VariationalStrategy(
self, initial_inducing_points, variational_distribution),
num_tasks=num_outputs)
super(GP, self).__init__(variational_strategy)
kwargs = {
# These two options gave worse results
# "ard_num_dims": int(some_features.size(1)),
# "batch_shape": batch_shape
}
if kernel == "RBF":
kernel = gpytorch.kernels.RBFKernel(**kwargs)
elif kernel == "Matern12":
kernel = gpytorch.kernels.MaternKernel(nu=1 / 2, **kwargs)
elif kernel == "Matern32":
kernel = gpytorch.kernels.MaternKernel(nu=3 / 2, **kwargs)
elif kernel == "Matern52":
kernel = gpytorch.kernels.MaternKernel(nu=5 / 2, **kwargs)
elif kernel == "RQ":
kernel = gpytorch.kernels.RQKernel(**kwargs)
else:
raise ValueError("Specified kernel not known.")
kernel.lengthscale = initial_lengthscale * torch.ones_like(
kernel.lengthscale)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(kernel)
def cluster_(self, some_features, k):
kmeans = cluster.MiniBatchKMeans(n_clusters=k, batch_size=k)
kmeans.fit(some_features.detach().cpu())
return torch.from_numpy(kmeans.cluster_centers_).float()
def forward(self, x):
return gpytorch.distributions.MultivariateNormal(
self.mean_module(x), self.covar_module(x))
class DKL_GP(gpytorch.Module):
def __init__(self, feature_extractor, gp):
"""
This wrapper class is necessary because ApproximateGP (above) does some
magic on the forward method which is not compatible with a
feature_extractor.
"""
super().__init__()
self.feature_extractor = feature_extractor
self.gp = gp
def forward(self, x):
features = self.feature_extractor(x)
return self.gp(features)
class DUE(Algorithm):
HPARAMS = dict(Algorithm.HPARAMS)
HPARAMS.update({
"lr": (0.1, lambda: float(10**np.random.uniform(-2, -0.3))),
"momentum": (0.9, lambda: float(np.random.choice([0.5, 0.9, 0.99]))),
"weight_decay": (1e-4, lambda: float(10**np.random.uniform(-5, -3))),
"num_inducing_points": (20, lambda: np.random.choice([20, 100, 300])),
"kernel": ("RBF", lambda: np.random.choice(
["RBF", "RQ", "Matern12", "Matern32", "Matern52"]))
})
def __init__(self,
num_classes,
arch,
device="cuda",
seed=0,
use_mixed_precision=False, sn=False, sn_coef=1, sn_bn=False):
super(DUE, self).__init__(
num_classes,
arch,
device,
seed,
use_mixed_precision=use_mixed_precision,
sn=sn,
sn_coef=sn_coef,
sn_bn=sn_bn)
self.has_native_measure = True
def construct_networks(self, dataset=None):
self.likelihood = gpytorch.likelihoods.SoftmaxLikelihood(
num_classes=self.num_classes, mixing_weights=False)
featurizer = torchvision.models.__dict__[self.arch](
num_classes=self.num_classes,
pretrained=False,
zero_init_residual=True)
num_features = featurizer.fc.in_features
featurizer.fc = utils.Identity()
if dataset is not None:
loader = torch.utils.data.DataLoader(
dataset, batch_size=32, shuffle=True)
some_features = []
with torch.no_grad():
for i, datum in enumerate(loader):
some_features.append(featurizer(datum['x']).cpu())
if i == 30:
break
some_features = torch.cat(some_features)
self.dataset_length = len(dataset)
else:
# else not executed for training, following are placeholders
# that should be replaced when doing classifier.load_state_dict()
some_features = torch.randn(
self.hparams["num_inducing_points"],
num_features)
self.dataset_length = 240000
self.classifier = GP(
self.num_classes,
some_features,
self.hparams["kernel"],
self.hparams["num_inducing_points"])
self.loss = gpytorch.mlls.VariationalELBO(
self.likelihood, self.classifier, num_data=self.dataset_length)
return dict(featurizer=featurizer)
def setup_optimizers(self):
self.lr = self.hparams["lr"]
self.optimizers['featurizer'] = torch.optim.SGD(
self.networks['featurizer'].parameters(),
lr=self.lr,
momentum=self.hparams['momentum'],
weight_decay=0)
self.optimizers['classifier'] = torch.optim.SGD(
self.classifier.parameters(),
lr=self.lr,
momentum=self.hparams['momentum'],
weight_decay=0)
def process_minibatch(self, x, y):
x = x.to(self.device, non_blocking=True)
y = y.to(self.device, non_blocking=True)
return x, y
def predictions(self, x):
return self.classifier(self.networks['featurizer'](x.to(self.device)))
def update(self, x, y, epoch=None):
if epoch is not None:
self.adjust_learning_rate_(epoch)
for param in self.parameters():
param.grad = None
x, y = self.process_minibatch(x, y)
with amp.autocast(enabled=self.use_mixed_precision):
loss = -self.loss(self.predictions(x), y)
cost = loss + self.hparams['weight_decay'] * self.get_l2_reg()
self.grad_scaler.scale(cost).backward()
for optimizer in self.optimizers.values():
self.grad_scaler.step(optimizer)
self.grad_scaler.update()
return {
'loss': loss.item(),
'cost': cost.item()
}
def _forward(self, x):
with gpytorch.settings.num_likelihood_samples(32):
output = self.predictions(x)
output = output.to_data_independent_dist()
output = self.likelihood(output).logits.mean(0)
return output
def uncertainty(self, x):
with gpytorch.settings.num_likelihood_samples(32):
output = self.predictions(x)
output = output.to_data_independent_dist()
output = self.likelihood(output).probs.mean(0)
return -(output * output.log()).sum(1)
| StarcoderdataPython |
345115 | from property_test import User
| StarcoderdataPython |
5086463 | import logging
import urllib.parse
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseBadRequest, HttpResponseServerError
from django.shortcuts import redirect, render
from django.views.decorators.http import require_http_methods
from config.settings import NOTION_CLIENT_ID, NOTION_OAUTH_CALLBACK
from .service import create_access_workspace_from_user_code
logger = logging.getLogger(__name__)
# Create your views here.
@login_required
@require_http_methods(["GET"])
def add_notion_workspace_from_access_code(request):
logger.info("Fetching Notion workspace from access_code!")
oauth_request_code_string = request.GET.get("code", None)
if oauth_request_code_string is None or oauth_request_code_string == "":
logger.warning("Did not have the request code!")
return HttpResponseBadRequest(
"You need to provide an OAuth2 Code to get Access"
)
try:
create_access_workspace_from_user_code(
user_model=request.user, oauth_code=oauth_request_code_string
)
except Exception as e:
return HttpResponseServerError("Error occurred trying to authorize with Notion")
return redirect("recurring-tasks-view")
@login_required
def show_notion_access_prompt(request):
return render(
request,
"workspaces/notion-auth.html",
{
"client_id": NOTION_CLIENT_ID,
"callback_url": urllib.parse.quote(NOTION_OAUTH_CALLBACK),
},
)
| StarcoderdataPython |
4907033 | """
Module to generate instance for Stochastic Capacitated Facility Location Problem
An instance comprises of random first stage cost and fixed (across all the instances)
second stage cost.
"""
from collections import defaultdict
import numpy as np
from ...utils import load_pickle
from ...utils import save_pickle
def generate_first_stage_cost(from_pid, to_pid, n_facility, n_client, n_scenario, cost):
"""Generate fixed cost for opening a facility and variable cost based on
the capacity installed capacity at a facility.
Parameters
----------
from_pid : int
Start seed to create instances
to_pid : int
End seed to create instance
n_facility : int
Number of facilities
n_client : int
Number of clients
n_scenario : int
Number of scenarios
cost : defaultdict(dict)
Previously generated instances
Returns
-------
cost : defaultdict(dict)
Generated instances
"""
cost = defaultdict(dict) if cost is None else cost
for i in range(from_pid, to_pid):
if i in cost:
print(f"Instance {i} already generated")
continue
np.random.seed(i)
c_f = np.random.randint(15, 20, n_facility)
c_v = np.random.randint(5, 10, n_facility)
demand_mean = np.floor((c_f + 10 * c_v) / np.sqrt(n_facility))
scenario = np.array([[np.random.poisson(demand_mean[j])
for j in range(n_client)]
for _ in range(n_scenario)])
cost[i] = {'c_f': c_f, 'c_v': c_v, 'scenario': scenario}
return cost
def generate_second_stage_cost(n_client, n_facility, cost, test=False):
"""Generate fixed cost for serving a client from a facility and variable cost
based on the demand being served by a facility.
Parameters
----------
n_client : int
Number of client
n_facility : int
Number of facility
cost : defaultdict(dict)
Generated instances (first-stage cost)
test : bool
Boolean indicating whether we are running a test
Returns
-------
cost : defaultdict(dict)
Generated instances (with second-stage cost)
"""
if -1 not in cost:
# Generate static c_tv, c_tf
c_tf = 10 * np.array([[abs(i - j) for j in range(n_client)]
for i in range(n_facility)])
c_tv = np.array([[abs(i - j) for j in range(n_client)]
for i in range(n_facility)])
c_tf_hub = np.zeros(n_facility)
c_tv_hub = np.zeros(n_client)
if not test:
for j in range(n_client):
c_tf_hub[j] = 5 * np.max(c_tf[:, j])
c_tv_hub[j] = 5 * np.max(c_tv[:, j])
else:
# During test, we set the fixed and variable cost from hub
# equal to zero. Since there is no limit on hub capacity,
# the demand for each client should be meet from hub.
for j in range(n_client):
c_tf_hub[j] = 0
c_tv_hub[j] = 0
c_tf = np.vstack((c_tf_hub.reshape(1, -1), c_tf))
c_tv = np.vstack((c_tv_hub.reshape(1, -1), c_tv))
# print(c_tf[10:], c_tv)
assert c_tf.shape == (n_facility + 1, n_client)
assert c_tv.shape == (n_facility + 1, n_client)
assert c_tf[1, 0] == 0 and c_tf[n_facility, n_client - 1] == 0 and c_tf[3, 3] != 0
assert c_tv[1, 0] == 0 and c_tv[n_facility, n_client - 1] == 0 and c_tv[3, 3] != 0
# print(c_tf, c_tv)
cost[-1] = {'c_tf': c_tf, 'c_tv': c_tv}
return cost
def generate_instance(meta_config, problem_config, path, test=False):
"""Generate problem instances for Stochastic Capacitated Facility Location
Problem.
Parameters
----------
meta_config : configparser.ConfigParser
Project configuration
problem_config : configparser.ConfigParser
Problem configuration
path : dict
Dictionary of importlib.Path objects
test : bool (default False)
Boolean indicating whether we are writing test or not
"""
from_pid = meta_config.getint('Run', 'from_pid')
to_pid = meta_config.getint('Run', 'to_pid')
n_client = problem_config.getint('Problem', 'n_client')
n_facility = problem_config.getint('Problem', 'n_facility')
n_scenario = problem_config.getint('Problem', 'n_scenario')
cost = load_pickle(path["instance"], check=False) if path["instance"].exists() else None
cost = generate_first_stage_cost(from_pid, to_pid, n_facility, n_client, n_scenario, cost)
cost = generate_second_stage_cost(n_client, n_facility, cost, test)
save_pickle(path["instance"], cost)
| StarcoderdataPython |
12816589 | <filename>fft_ydiv_spin.py
from PyQt5.Qt import QDoubleSpinBox, QKeyEvent
from PyQt5.uic.Compiler.qtproxies import QtCore
from PyQt5.QtCore import pyqtSignal,Qt
class fft_ydiv_spin(QDoubleSpinBox):
val_entered=pyqtSignal()
def __init__(self,parent):
QDoubleSpinBox.__init__(self)
def keyReleaseEvent(self,e):
if e.key()==Qt.Key_Return:
self.val_entered.emit()
| StarcoderdataPython |
3258598 | from flask import Flask,render_template,Response
import cv2
app=Flask(__name__)
camera=cv2.VideoCapture(0)
# for cctv camera use 'rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp'
def genFrames():
while True:
success,frame=camera.read()
if not success:
break
else:
ret,buffer=cv2.imencode('.jpg',frame)
frame=buffer.tobytes()
yield(b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/live')
def liveVideo():
print("==========",genFrames())
return Response(genFrames(),mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__=="__main__":
app.run(debug=True)
| StarcoderdataPython |
1771760 | <filename>setup.py<gh_stars>1-10
from setuptools import find_packages, setup
setup(
name='whatsapp_web_driver',
packages=find_packages(include=['whatsapp_web_driver']),
version='0.0.0',
description='Enables the user to easily create their own whatsapp bot.',
author='<NAME>, <NAME>',
license='Apache 2.0',
install_requires=[],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
test_suite='tests',
url="https://github.com/AadamLok/whatsapp_web_driver",
keywords=["Web Whatsapp Automation", "Whatsapp Bot", "Whatsapp"],
) | StarcoderdataPython |
11371872 | <gh_stars>0
class Proprietario():
# CONSTRUCTOR
def __init__(self, nome, matricula, curso):
self.nome = nome
self.matricula = matricula
self.curso = curso
# GETTERS AND SETTERS
def get_nome(self):
return self.nome
def set_nome(self, nome):
self.nome = nome
def get_matricula(self):
return self.matricula
def set_matricula(self, matricula):
self.matricula = matricula
def get_curso(self):
return self.curso
def set_curso(self, curso):
self.curso = curso
# STR (toString)
def __str__(self):
return self.nome + " (" + str(self.matricula) + ")"
| StarcoderdataPython |
4881266 | <reponame>Jhonattan-rocha/Meus-primeiros-programas
from contas import Conta_poupnaca, Conta_corrente
from itertools import count
class _MetaClassCliente(type):
def __new__(mcs, name, bases, namespace):
if name == "Cliente":
return type.__new__(mcs, name, bases, namespace)
if "cadastrar_pounpanca" not in namespace:
raise SyntaxError("Falta o método cadastrar pounpanca na classe")
else:
if not callable(namespace["cadastrar_pounpanca"]):
raise SyntaxError("Falta o método cadastrar pounpanca na classe")
if "cadastrar_corrente" not in namespace:
raise SyntaxError("Falta o método cadastrar pounpanca na classe")
else:
if not callable(namespace["cadastrar_corrente"]):
raise SyntaxError("Falta o método cadastrar corente na classe")
class Pessoa:
def __init__(self, nome="", idade=0, RG=0, CPF=0):
if self.__class__ is Pessoa:
raise TypeError(f"{self.__class__} não pode ser instanciada")
self._nome = nome
self._idade = idade
self.__RG = RG
self.__CPF = CPF
class Cliente(Pessoa, metaclass=_MetaClassCliente):
def __init__(self, nome, idade, RG, CPF):
super(Cliente, self).__init__(nome, idade, RG, CPF)
self._id_cliente = next(count(1))
def cadastrar_pounpanca(self, agencia, conta):
self.conta_poupanca = Conta_poupnaca(agencia, conta)
def cadastrar_corrente(self, agencia, conta):
self.conta_corrente = Conta_corrente(agencia, conta)
if __name__ == '__main__':
pass
| StarcoderdataPython |
4922684 | #!/usr/bin/env python
from __future__ import print_function
from uuid import uuid4
import requests
import hashlib
import json
class CallOfDutyAPIClient(object):
deviceId = None
ssoCookie = None
cookies = {
'new_SiteId': 'cod',
'ACT_SSO_LOCALE': 'en_US',
'country': 'US',
'XSRF-TOKEN': '<PASSWORD>',
'API_CSRF_TOKEN': '<PASSWORD>'
}
userAgent = ""
loggedIn = False
debug = False
platforms = {
"battle": "battle",
"steam": "steam",
"psn": "psn",
"xbl": "xbl",
"acti": "uno",
"uno": "uno"
}
# http
headers = {
"Content-Type": "application/json",
"Cookie": ";".join(["=".join([x,y]) for x,y in cookies.items()]),
"User-Agent": userAgent,
"x-requested-with": userAgent,
"Accept": "application/json, text/javascript, */*; q=0.01",
"Connection": "keep-alive"
}
# urls
defaultBaseURL = "https://my.callofduty.com/api/papi-client/";
loginURL = "https://profile.callofduty.com/cod/mapp/";
defaultProfileURL = "https://profile.callofduty.com/";
modernwarfare = "mw"
def __init__(self, platform="battle", debug=False, ratelimit=None):
self.platform = platform
self.debug = debug
self.ratelimit = ratelimit
def login(self, email, password):
randomId = str(uuid4()).encode()
md5sum = hashlib.md5(randomId)
deviceId = md5sum.hexdigest()
data = {"deviceId": deviceId}
response = requests.post(self.loginURL + "registerDevice", headers=self.headers, json=data)
response.raise_for_status()
data = response.json()
if data['status'] != 'success':
raise Exception("could not register new device id")
authHeader = data['data']['authHeader']
self.headers['Authorization'] = 'Bearer %s' % authHeader
self.headers['x_cod_device_id'] = deviceId
data = {"email": email, "password": password}
response = requests.post(self.loginURL + 'login', headers=self.headers, json=data)
response.raise_for_status()
data = response.json()
if data['success'] is False:
raise Exception("could not log in")
self.ssoCookie = data['s_ACT_SSO_COOKIE']
self.cookies['rtkn'] = data['rtkn']
self.cookies['ACT_SSO_COOKIE'] = data['s_ACT_SSO_COOKIE']
self.cookies['atkn'] = data['atkn']
self.headers['Cookie'] = ";".join(["=".join([x,y]) for x,y in self.cookies.items()])
self.loggedIn = True
return True
def sendRequest(self, baseURL, path, method="GET", data=None):
if not self.loggedIn:
raise Exception("Not logged in")
endpoint = baseURL + path
headers = self.headers
if method == 'GET':
response = requests.get(endpoint, headers=headers, cookies=self.cookies, params=data)
elif method == 'POST':
response = requests.post(endpoint, headers=headers, cookies=self.cookies, json=data)
elif method == 'PUT':
response = requests.put(endpoint, headers=headers, cookies=self.cookies, json=data)
response.raise_for_status()
data = response.json()
if not data['status'] == "success":
raise Exception("API call failed - %s" % data)
return data
| StarcoderdataPython |
3461227 | from RPi import GPIO
class LED(object):
def __init__(self, pin):
self.pin = pin
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.OUT)
def turn_on(self):
GPIO.output(self.pin, 1)
def turn_off(self):
GPIO.output(self.pin, 0)
| StarcoderdataPython |
214025 | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: phone_number_area_codes_sample_async.py
DESCRIPTION:
This sample demonstrates how to get all area codes via a connection string, country code and phone plan id.
USAGE:
python phone_number_area_codes_sample_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_COMMUNICATION_SERVICE_CONNECTION_STRING - The endpoint of your Azure Communication Service
2) AZURE_COMMUNICATION_SERVICE_PHONENUMBERS_COUNTRY_CODE - The country code you want to get area codes from
3) AZURE_COMMUNICATION_SERVICE_PHONENUMBERS_PHONE_PLAN_ID_AREA_CODES - The phone plan id you want to get area codes from
"""
import os
import asyncio
from azure.communication.phonenumbers.aio import PhoneNumbersAdministrationClient
connection_str = os.getenv('AZURE_COMMUNICATION_SERVICE_CONNECTION_STRING')
country_code = os.getenv('AZURE_COMMUNICATION_SERVICE_PHONENUMBERS_COUNTRY_CODE', "US")
phone_plan_id_area_codes = os.getenv('AZURE_COMMUNICATION_SERVICE_PHONENUMBERS_PHONE_PLAN_ID_AREA_CODES', "phone-plan-id")
async def get_all_area_codes():
# [START get_all_area_codes]
phone_number_administration_client = PhoneNumbersAdministrationClient.from_connection_string(
connection_str)
async with phone_number_administration_client:
all_area_codes = await phone_number_administration_client.get_all_area_codes(
location_type="NotRequired",
country_code=country_code,
phone_plan_id=phone_plan_id_area_codes
)
# [END get_all_area_codes]
print('all_area_codes:')
print(all_area_codes)
async def main():
await get_all_area_codes()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| StarcoderdataPython |
1949806 | <reponame>datacentricorg/datacentric-py
# Copyright (C) 2013-present The DataCentric Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import IntEnum
class ValueParamType(IntEnum):
"""Primitive type of a handler parameter or type element."""
Empty = 0,
"""
Indicates that enum value is not set.
In programming languages where enum defaults to the first item when
not set, making Empty the first item prevents unintended assignment
of a meaningful value.
"""
Bool = 1,
"""Bool value."""
NullableBool = 2,
"""Nullable bool value."""
Int = 3,
"""Int value."""
NullableInt = 4,
"""Nullable int value."""
Long = 5,
"""Long value."""
NullableLong = 6,
"""Nullable long value."""
Double = 7,
"""Double value."""
NullableDouble = 8,
"""Nullable double value."""
Date = 9,
"""Date value."""
NullableDate = 10,
"""Nullable date value."""
DateTime = 11,
"""DateTime value."""
NullableDateTime = 12,
"""Nullable DateTime value."""
String = 13,
"""String value."""
Binary = 14,
"""Binary value."""
Key = 15,
"""Key value."""
Data = 16,
"""Generic data value."""
Variant = 17,
"""Variant value."""
Decimal = 18,
"""Decimal value."""
NullableDecimal = 19,
"""Nullable decimal value."""
Time = 20,
"""Time value."""
NullableTime = 21,
"""Nullable time value."""
TemporalId = 22,
"""TemporalId."""
NullableTemporalId = 23,
"""Nullable TemporalId."""
Minute = 24,
"""Minute."""
NullableMinute = 25,
"""Nullable minute."""
Instant = 26,
"""Instant."""
NullableInstant = 27,
"""Nullable instant."""
| StarcoderdataPython |
1729205 | # -*- coding: utf-8 -*-
from .derivation import (
normalised_primary_matrix,
chromatically_adapted_primaries,
primaries_whitepoint,
RGB_luminance_equation,
RGB_luminance,
)
from .rgb_colourspace import RGB_Colourspace
from .rgb_colourspace import XYZ_to_RGB, RGB_to_XYZ
from .rgb_colourspace import matrix_RGB_to_RGB, RGB_to_RGB
from .transfer_functions import * # noqa
from . import transfer_functions
from .datasets import * # noqa
from . import datasets
from .common import XYZ_to_sRGB, sRGB_to_XYZ
from .cylindrical import (
RGB_to_HSV,
HSV_to_RGB,
RGB_to_HSL,
HSL_to_RGB,
RGB_to_HCL,
HCL_to_RGB,
)
from .cmyk import RGB_to_CMY, CMY_to_RGB, CMY_to_CMYK, CMYK_to_CMY
from .hanbury2003 import RGB_to_IHLS, IHLS_to_RGB
from .prismatic import RGB_to_Prismatic, Prismatic_to_RGB
from .ycbcr import (
WEIGHTS_YCBCR,
matrix_YCbCr,
offset_YCbCr,
RGB_to_YCbCr,
YCbCr_to_RGB,
RGB_to_YcCbcCrc,
YcCbcCrc_to_RGB,
)
from .ycocg import RGB_to_YCoCg, YCoCg_to_RGB
from .ictcp import RGB_to_ICtCp, ICtCp_to_RGB, XYZ_to_ICtCp, ICtCp_to_XYZ
__all__ = [
'normalised_primary_matrix',
'chromatically_adapted_primaries',
'primaries_whitepoint',
'RGB_luminance_equation',
'RGB_luminance',
]
__all__ += [
'RGB_Colourspace',
]
__all__ += [
'XYZ_to_RGB',
'RGB_to_XYZ',
]
__all__ += [
'matrix_RGB_to_RGB',
'RGB_to_RGB',
]
__all__ += transfer_functions.__all__
__all__ += datasets.__all__
__all__ += [
'XYZ_to_sRGB',
'sRGB_to_XYZ',
]
__all__ += [
'RGB_to_HSV',
'HSV_to_RGB',
'RGB_to_HSL',
'HSL_to_RGB',
'RGB_to_HCL',
'HCL_to_RGB',
]
__all__ += [
'RGB_to_CMY',
'CMY_to_RGB',
'CMY_to_CMYK',
'CMYK_to_CMY',
]
__all__ += [
'RGB_to_IHLS',
'IHLS_to_RGB',
]
__all__ += [
'RGB_to_Prismatic',
'Prismatic_to_RGB',
]
__all__ += [
'WEIGHTS_YCBCR',
'matrix_YCbCr',
'offset_YCbCr',
'RGB_to_YCbCr',
'YCbCr_to_RGB',
'RGB_to_YcCbcCrc',
'YcCbcCrc_to_RGB',
]
__all__ += [
'RGB_to_YCoCg',
'YCoCg_to_RGB',
]
__all__ += [
'RGB_to_ICtCp',
'ICtCp_to_RGB',
'XYZ_to_ICtCp',
'ICtCp_to_XYZ',
]
| StarcoderdataPython |
4967716 | <reponame>justusc/Elemental
#
# Copyright (c) 2009-2015, <NAME>
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
import time
n=30
A = El.DistSparseMatrix()
El.DynamicRegCounter( A, n )
El.Display( A, "A" )
# Require the user to press a button before the figures are closed
worldSize = El.mpi.WorldSize()
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
| StarcoderdataPython |
5028086 | <filename>spectrochempy/core/dataset/coordrange.py
# -*- coding: utf-8 -*-
# ======================================================================================================================
# Copyright (©) 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie, Caen, France. =
# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in the root directory =
# ======================================================================================================================
"""
This module implements the class |CoordRange|.
"""
__all__ = __slots__ = ['trim_ranges']
from traitlets import HasTraits, List, Bool
from spectrochempy.utils.traitlets import Range
# ======================================================================================================================
# _CoordRange
# ======================================================================================================================
class _CoordRange(HasTraits):
# TODO: May use also units ???
ranges = List(Range())
reversed = Bool()
# ..................................................................................................................
def __init__(self, *ranges, reversed=False):
self.reversed = reversed
if len(ranges) == 0:
# first case: no argument passed, returns an empty range
self.ranges = []
elif len(ranges) == 2 and all(isinstance(elt, (int, float)) for elt in ranges):
# second case: a pair of scalars has been passed
# using the Interval class, we have autochecking of the interval
# validity
self.ranges = [list(map(float, ranges))]
else:
# third case: a set of pairs of scalars has been passed
self._clean_ranges(ranges)
if self.ranges:
self._clean_ranges(self.ranges)
# ------------------------------------------------------------------------------------------------------------------
# private methods
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
def _clean_ranges(self, ranges):
"""Sort and merge overlapping ranges
It works as follows::
1. orders each interval
2. sorts intervals
3. merge overlapping intervals
4. reverse the orders if required
"""
# transforms each pairs into valid interval
# should generate an error if a pair is not valid
ranges = [list(range) for range in ranges]
# order the ranges
ranges = sorted(ranges, key=lambda r: min(r[0], r[1]))
cleaned_ranges = [ranges[0]]
for range in ranges[1:]:
if range[0] <= cleaned_ranges[-1][1]:
if range[1] >= cleaned_ranges[-1][1]:
cleaned_ranges[-1][1] = range[1]
else:
cleaned_ranges.append(range)
self.ranges = cleaned_ranges
if self.reversed:
for range in self.ranges:
range.reverse()
self.ranges.reverse()
def trim_ranges(*ranges, reversed=False):
"""
Set of ordered, non intersecting intervals.
An ordered set of ranges is contructed from the inputs and returned.
*e.g.,* [[a, b], [c, d]] with a < b < c < d or a > b > c > d.
Parameters
-----------
*ranges : iterable
An interval or a set of intervals.
set of intervals. If none is given, the range will be a set of an empty interval [[]]. The interval
limits do not need to be ordered, and the intervals do not need to be distincts.
reversed : bool, optional
The intervals are ranked by decreasing order if True or increasing order if False.
Returns
-------
ordered
list of ranges.
Examples
--------
>>> import spectrochempy as scp
>>> scp.trim_ranges([1, 4], [7, 5], [6, 10])
[[1, 4], [5, 10]]
"""
return _CoordRange(*ranges, reversed=reversed).ranges
# ======================================================================================================================
if __name__ == '__main__':
pass
| StarcoderdataPython |
3290460 | #!/usr/bin/env python
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import optparse
import os
import subprocess
import sys
import time
import glob
import re
import shutil
# Used to time-stamp output files and directories
CURRENT_TIME = time.strftime("%d_%m_%Y-%H:%M:%S")
class Error(Exception):
pass
class FfmpegError(Error):
pass
class MagewellError(Error):
pass
class CompareVideosError(Error):
pass
def _ParseArgs():
"""Registers the command-line options."""
usage = 'usage: %prog [options]'
parser = optparse.OptionParser(usage=usage)
parser.add_option('--frame_width', type='string', default='1280',
help='Width of the recording. Default: %default')
parser.add_option('--frame_height', type='string', default='720',
help='Height of the recording. Default: %default')
parser.add_option('--framerate', type='string', default='60',
help='Recording framerate. Default: %default')
parser.add_option('--ref_duration', type='string', default='20',
help='Reference recording duration. Default: %default')
parser.add_option('--test_duration', type='string', default='10',
help='Test recording duration. Default: %default')
parser.add_option('--time_between_recordings', type=float, default=5,
help='Time between starting test recording after ref.'
'Default: %default')
parser.add_option('--ref_video_device', type='string', default='/dev/video0',
help='Reference recording device. Default: %default')
parser.add_option('--test_video_device', type='string', default='/dev/video1',
help='Test recording device. Default: %default')
parser.add_option('--app_name', type='string',
help='Name of the app under test.')
parser.add_option('--recording_api', type='string', default='Video4Linux2',
help='Recording API to use. Default: %default')
parser.add_option('--pixel_format', type='string', default='yuv420p',
help='Recording pixel format Default: %default')
parser.add_option('--ffmpeg', type='string',
help='Path to the ffmpeg executable for the reference '
'device.')
parser.add_option('--video_container', type='string', default='yuv',
help='Video container for the recordings.'
'Default: %default')
parser.add_option('--compare_videos_script', type='string',
default='compare_videos.py',
help='Path to script used to compare and generate metrics.'
'Default: %default')
parser.add_option('--frame_analyzer', type='string',
default='../../out/Default/frame_analyzer',
help='Path to the frame analyzer executable.'
'Default: %default')
parser.add_option('--zxing_path', type='string',
help='Path to the zebra xing barcode analyzer.')
parser.add_option('--ref_rec_dir', type='string', default='ref',
help='Path to where reference recordings will be created.'
'Ideally keep the ref and test directories on separate'
'drives. Default: %default')
parser.add_option('--test_rec_dir', type='string', default='test',
help='Path to where test recordings will be created.'
'Ideally keep the ref and test directories on separate '
'drives. Default: %default')
parser.add_option('--test_crop_parameters', type='string',
help='ffmpeg processing parameters for the test video.')
parser.add_option('--ref_crop_parameters', type='string',
help='ffmpeg processing parameters for the ref video.')
options, _ = parser.parse_args()
if not options.app_name:
parser.error('You must provide an application name!')
if not options.test_crop_parameters or not options.ref_crop_parameters:
parser.error('You must provide ref and test crop parameters!')
# Ensure the crop filter is included in the crop parameters used for ffmpeg.
if 'crop' not in options.ref_crop_parameters:
parser.error('You must provide a reference crop filter for ffmpeg.')
if 'crop' not in options.test_crop_parameters:
parser.error('You must provide a test crop filter for ffmpeg.')
if not options.ffmpeg:
parser.error('You most provide location for the ffmpeg executable.')
if not os.path.isfile(options.ffmpeg):
parser.error('Cannot find the ffmpeg executable.')
# compare_videos.py dependencies.
if not os.path.isfile(options.compare_videos_script):
parser.warning('Cannot find compare_videos.py script, no metrics will be '
'generated!')
if not os.path.isfile(options.frame_analyzer):
parser.warning('Cannot find frame_analyzer, no metrics will be generated!')
if not os.path.isfile(options.zxing_path):
parser.warning('Cannot find Zebra Xing, no metrics will be generated!')
return options
def CreateRecordingDirs(options):
"""Create root + sub directories for reference and test recordings.
Args:
options(object): Contains all the provided command line options.
Returns:
record_paths(dict): key: value pair with reference and test file
absolute paths.
"""
# Create root directories for the video recordings.
if not os.path.isdir(options.ref_rec_dir):
os.makedirs(options.ref_rec_dir)
if not os.path.isdir(options.test_rec_dir):
os.makedirs(options.test_rec_dir)
# Create and time-stamp directories for all the output files.
ref_rec_dir = os.path.join(options.ref_rec_dir, options.app_name + '_' + \
CURRENT_TIME)
test_rec_dir = os.path.join(options.test_rec_dir, options.app_name + '_' + \
CURRENT_TIME)
os.makedirs(ref_rec_dir)
os.makedirs(test_rec_dir)
record_paths = {
'ref_rec_location' : os.path.abspath(ref_rec_dir),
'test_rec_location' : os.path.abspath(test_rec_dir)
}
return record_paths
def RestartMagewellDevices(ref_video_device, test_video_device):
"""Reset the USB ports where Magewell capture devices are connected to.
Tries to find the provided ref_video_device and test_video_device devices
which use video4linux and then do a soft reset by using USB unbind and bind.
This is due to Magewell capture devices have proven to be unstable after the
first recording attempt.
Args :
ref_video_device(string): reference recording device path.
test_video_device(string): test recording device path
Raises:
MagewellError: If no magewell devices are found.
"""
# Get the dev/videoN device name from the command line arguments.
ref_magewell = ref_video_device.split('/')[2]
test_magewell = test_video_device.split('/')[2]
# Find the device location including USB and USB Bus ID's.
device_string = '/sys/bus/usb/devices/usb*/**/**/video4linux/'
ref_magewell_device = glob.glob('%s%s' % (device_string, ref_magewell))
test_magewell_device = glob.glob('%s%s' % (device_string, test_magewell))
magewell_usb_ports = []
# Figure out the USB bus and port ID for each device.
ref_magewell_path = str(ref_magewell_device).split('/')
for directory in ref_magewell_path:
# Find the folder with pattern "N-N", e.g. "4-3" or \
# "[USB bus ID]-[USB port]"
if re.match(r'^\d-\d$', directory):
magewell_usb_ports.append(directory)
test_magewell_path = str(test_magewell_device).split('/')
for directory in test_magewell_path:
# Find the folder with pattern "N-N", e.g. "4-3" or \
# "[USB bus ID]-[USB port]"
if re.match(r'^\d-\d$', directory):
magewell_usb_ports.append(directory)
# Abort early if no devices are found.
if len(magewell_usb_ports) == 0:
raise MagewellError('No magewell devices found.')
else:
print '\nResetting USB ports where magewell devices are connected...'
# Use the USB bus and port ID (e.g. 4-3) to unbind and bind the USB devices
# (i.e. soft eject and insert).
for usb_port in magewell_usb_ports:
echo_cmd = ['echo', usb_port]
unbind_cmd = ['sudo', 'tee', '/sys/bus/usb/drivers/usb/unbind']
bind_cmd = ['sudo', 'tee', '/sys/bus/usb/drivers/usb/bind']
# TODO(jansson) Figure out a way to call on echo once for bind & unbind
# if possible.
echo_unbind = subprocess.Popen(echo_cmd, stdout=subprocess.PIPE)
unbind = subprocess.Popen(unbind_cmd, stdin=echo_unbind.stdout)
echo_unbind.stdout.close()
unbind.wait()
echo_bind = subprocess.Popen(echo_cmd, stdout=subprocess.PIPE)
bind = subprocess.Popen(bind_cmd, stdin=echo_bind.stdout)
echo_bind.stdout.close()
bind.wait()
if bind.returncode == 0:
print 'Reset done!\n'
def StartRecording(options, ref_file_location, test_file_location):
"""Starts recording from the two specified video devices.
Args:
options(object): Contains all the provided command line options.
record_paths(dict): key: value pair with reference and test file
absolute paths.
Returns:
recording_files_and_time(dict): key: value pair with the path to cropped
test and reference video files.
Raises:
FfmpegError: If the ffmpeg command fails.
"""
ref_file_name = '%s_%s_ref.%s' % (options.app_name, CURRENT_TIME,
options.video_container)
ref_file = os.path.join(ref_file_location, ref_file_name)
test_file_name = '%s_%s_test.%s' % (options.app_name, CURRENT_TIME,
options.video_container)
test_file = os.path.join(test_file_location, test_file_name)
# Reference video recorder command line.
ref_cmd = [
options.ffmpeg,
'-v', 'error',
'-s', options.frame_width + 'x' + options.frame_height,
'-framerate', options.framerate,
'-f', options.recording_api,
'-i', options.ref_video_device,
'-pix_fmt', options.pixel_format,
'-s', options.frame_width + 'x' + options.frame_height,
'-t', options.ref_duration,
'-framerate', options.framerate,
ref_file
]
# Test video recorder command line.
test_cmd = [
options.ffmpeg,
'-v', 'error',
'-s', options.frame_width + 'x' + options.frame_height,
'-framerate', options.framerate,
'-f', options.recording_api,
'-i', options.test_video_device,
'-pix_fmt', options.pixel_format,
'-s', options.frame_width + 'x' + options.frame_height,
'-t', options.test_duration,
'-framerate', options.framerate,
test_file
]
print 'Trying to record from reference recorder...'
ref_recorder = subprocess.Popen(ref_cmd, stderr=sys.stderr)
# Start the 2nd recording a little later to ensure the 1st one has started.
# TODO(jansson) Check that the ref_recorder output file exists rather than
# using sleep.
time.sleep(options.time_between_recordings)
print 'Trying to record from test recorder...'
test_recorder = subprocess.Popen(test_cmd, stderr=sys.stderr)
test_recorder.wait()
ref_recorder.wait()
# ffmpeg does not abort when it fails, need to check return code.
if ref_recorder.returncode != 0 or test_recorder.returncode != 0:
# Cleanup recording directories.
shutil.rmtree(ref_file_location)
shutil.rmtree(test_file_location)
raise FfmpegError('Recording failed, check ffmpeg output.')
else:
print 'Ref file recorded to: ' + os.path.abspath(ref_file)
print 'Test file recorded to: ' + os.path.abspath(test_file)
print 'Recording done!\n'
return FlipAndCropRecordings(options, test_file_name, test_file_location,
ref_file_name, ref_file_location)
def FlipAndCropRecordings(options, test_file_name, test_file_location,
ref_file_name, ref_file_location):
"""Performs a horizontal flip of the reference video to match the test video.
This is done to the match orientation and then crops the ref and test videos
using the options.test_crop_parameters and options.ref_crop_parameters.
Args:
options(object): Contains all the provided command line options.
test_file_name(string): Name of the test video file recording.
test_file_location(string): Path to the test video file recording.
ref_file_name(string): Name of the reference video file recording.
ref_file_location(string): Path to the reference video file recording.
Returns:
recording_files_and_time(dict): key: value pair with the path to cropped
test and reference video files.
Raises:
FfmpegError: If the ffmpeg command fails.
"""
print 'Trying to crop videos...'
# Ref file cropping.
cropped_ref_file_name = 'cropped_' + ref_file_name
cropped_ref_file = os.path.abspath(
os.path.join(ref_file_location, cropped_ref_file_name))
ref_video_crop_cmd = [
options.ffmpeg,
'-v', 'error',
'-s', options.frame_width + 'x' + options.frame_height,
'-i', os.path.join(ref_file_location, ref_file_name),
'-vf', options.ref_crop_parameters,
'-c:a', 'copy',
cropped_ref_file
]
# Test file cropping.
cropped_test_file_name = 'cropped_' + test_file_name
cropped_test_file = os.path.abspath(
os.path.join(test_file_location, cropped_test_file_name))
test_video_crop_cmd = [
options.ffmpeg,
'-v', 'error',
'-s', options.frame_width + 'x' + options.frame_height,
'-i', os.path.join(test_file_location, test_file_name),
'-vf', options.test_crop_parameters,
'-c:a', 'copy',
cropped_test_file
]
ref_crop = subprocess.Popen(ref_video_crop_cmd)
ref_crop.wait()
test_crop = subprocess.Popen(test_video_crop_cmd)
test_crop.wait()
# ffmpeg does not abort when it fails, need to check return code.
if ref_crop.returncode != 0 or test_crop.returncode != 0:
# Cleanup recording directories.
shutil.rmtree(ref_file_location)
shutil.rmtree(test_file_location)
raise FfmpegError('Cropping failed, check ffmpeg output.')
else:
print 'Ref file cropped to: ' + cropped_ref_file
print 'Test file cropped to: ' + cropped_test_file
print 'Cropping done!\n'
# Need to return these so they can be used by other parts.
cropped_recordings = {
'cropped_test_file' : cropped_test_file,
'cropped_ref_file' : cropped_ref_file
}
return cropped_recordings
def CompareVideos(options, cropped_ref_file, cropped_test_file):
"""Runs the compare_video.py script from src/webrtc/tools using the file path.
Uses the path from recording_result and writes the output to a file named
<options.app_name + '_' + CURRENT_TIME + '_result.txt> in the reference video
recording folder taken from recording_result.
Args:
options(object): Contains all the provided command line options.
cropped_ref_file(string): Path to cropped reference video file.
cropped_test_file(string): Path to cropped test video file.
Raises:
CompareVideosError: If compare_videos.py fails.
"""
print 'Starting comparison...'
print 'Grab a coffee, this might take a few minutes...'
compare_videos_script = os.path.abspath(options.compare_videos_script)
rec_path = os.path.abspath(os.path.join(
os.path.dirname(cropped_test_file)))
result_file_name = os.path.join(rec_path, '%s_%s_result.txt') % (
options.app_name, CURRENT_TIME)
# Find the crop dimensions (e.g. 950 and 420) in the ref crop parameter
# string: 'hflip, crop=950:420:130:56'
for param in options.ref_crop_parameters.split('crop'):
if param[0] == '=':
crop_width = param.split(':')[0].split('=')[1]
crop_height = param.split(':')[1]
compare_cmd = [
sys.executable,
compare_videos_script,
'--ref_video', cropped_ref_file,
'--test_video', cropped_test_file,
'--frame_analyzer', os.path.abspath(options.frame_analyzer),
'--zxing_path', options.zxing_path,
'--ffmpeg_path', options.ffmpeg,
'--stats_file_ref', os.path.join(os.path.dirname(cropped_ref_file),
cropped_ref_file + '_stats.txt'),
'--stats_file_test', os.path.join(os.path.dirname(cropped_test_file),
cropped_test_file + '_stats.txt'),
'--yuv_frame_height', crop_height,
'--yuv_frame_width', crop_width
]
with open(result_file_name, 'w') as f:
compare_video_recordings = subprocess.Popen(compare_cmd, stdout=f)
compare_video_recordings.wait()
if compare_video_recordings.returncode != 0:
raise CompareVideosError('Failed to perform comparison.')
else:
print 'Result recorded to: ' + os.path.abspath(result_file_name)
print 'Comparison done!'
def main():
"""The main function.
A simple invocation is:
./run_video_analysis.py \
--app_name AppRTCMobile \
--ffmpeg ./ffmpeg --ref_video_device=/dev/video0 \
--test_video_device=/dev/video1 \
--zxing_path ./zxing \
--test_crop_parameters 'crop=950:420:130:56' \
--ref_crop_parameters 'hflip, crop=950:420:130:56' \
--ref_rec_dir /tmp/ref \
--test_rec_dir /tmp/test
This will produce the following files if successful:
# Original video recordings.
/tmp/ref/AppRTCMobile_<recording date and time>_ref.yuv
/tmp/test/AppRTCMobile_<recording date and time>_test.yuv
# Cropped video recordings according to the crop parameters.
/tmp/ref/cropped_AppRTCMobile_<recording date and time>_ref.yuv
/tmp/test/cropped_AppRTCMobile_<recording date and time>_ref.yuv
# Comparison metrics from cropped test and ref videos.
/tmp/test/AppRTCMobile_<recording date and time>_result.text
"""
options = _ParseArgs()
RestartMagewellDevices(options.ref_video_device, options.test_video_device)
record_paths = CreateRecordingDirs(options)
recording_result = StartRecording(options, record_paths['ref_rec_location'],
record_paths['test_rec_location'])
# Do not require compare_video.py script to run, no metrics will be generated.
if options.compare_videos_script:
CompareVideos(options, recording_result['cropped_ref_file'],
recording_result['cropped_test_file'])
else:
print ('Skipping compare videos step due to compare_videos flag were not '
'passed.')
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
9680436 | <reponame>sapcc/neutron-lib<gh_stars>0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from neutron_lib import context
from neutron_lib.policy import _engine as policy_engine
from neutron_lib.tests import _base as base
class TestPolicyEnforcer(base.BaseTestCase):
def setUp(self):
super(TestPolicyEnforcer, self).setUp()
# Isolate one _ROLE_ENFORCER per test case
mock.patch.object(policy_engine, '_ROLE_ENFORCER', None).start()
def test_init_reset(self):
self.assertIsNone(policy_engine._ROLE_ENFORCER)
policy_engine.init()
self.assertIsNotNone(policy_engine._ROLE_ENFORCER)
def test_check_user_is_not_admin(self):
ctx = context.Context('me', 'my_project')
self.assertFalse(policy_engine.check_is_admin(ctx))
def test_check_user_elevated_is_admin(self):
ctx = context.Context('me', 'my_project', roles=['user']).elevated()
self.assertTrue(policy_engine.check_is_admin(ctx))
def test_check_is_admin_no_roles_no_admin(self):
policy_engine.init(policy_file='dummy_policy.json')
ctx = context.Context('me', 'my_project', roles=['user']).elevated()
# With no admin role, elevated() should not work.
self.assertFalse(policy_engine.check_is_admin(ctx))
def test_check_user_elevated_is_admin_with_default_policy(self):
policy_engine.init(policy_file='no_policy.json')
ctx = context.Context('me', 'my_project', roles=['user']).elevated()
self.assertTrue(policy_engine.check_is_admin(ctx))
def test_check_is_advsvc_role(self):
ctx = context.Context('me', 'my_project', roles=['advsvc'])
self.assertTrue(policy_engine.check_is_advsvc(ctx))
def test_check_is_not_advsvc_user(self):
ctx = context.Context('me', 'my_project', roles=['user'])
self.assertFalse(policy_engine.check_is_advsvc(ctx))
def test_check_is_not_advsvc_admin(self):
ctx = context.Context('me', 'my_project').elevated()
self.assertTrue(policy_engine.check_is_admin(ctx))
self.assertFalse(policy_engine.check_is_advsvc(ctx))
def test_check_is_advsvc_no_roles_no_advsvc(self):
policy_engine.init(policy_file='dummy_policy.json')
ctx = context.Context('me', 'my_project', roles=['advsvc'])
# No advsvc role in the policy file, so cannot assume the role.
self.assertFalse(policy_engine.check_is_advsvc(ctx))
def test_check_is_advsvc_role_with_default_policy(self):
policy_engine.init(policy_file='no_policy.json')
ctx = context.Context('me', 'my_project', roles=['advsvc'])
self.assertTrue(policy_engine.check_is_advsvc(ctx))
| StarcoderdataPython |
1780100 | # Version information. This file will be overwritten by the GitLab CI for every release build
__version__ = '0.3.0'
| StarcoderdataPython |
1648773 | import json
class InventoryItemExistException(Exception):
"""
There is an item in the Inventory with the same code -> Integrity Error
"""
def __init__(self):
pass
class InventoryItemDoesNotExistException(Exception):
"""
Requested item not exists
"""
def __init__(self):
pass
class InventoryItem(object):
"""
Class for items
"""
code = None
name = None
price = 0
def __init__(self, code, name, price):
"""
Constructor
"""
self.code = code
self.name = name
self.price = price
@classmethod
def create_item(cls, code, name, price):
"""
Item creator. Useful if we need to adapt or format any field from the data source
"""
formatted_price = float(price) if type(price) == str else price
return InventoryItem(code, name, formatted_price)
class Inventory(object):
"""
As we don't have a database, this class is a replacement
"""
_products = None
def __init__(self, data_file_path):
"""
Reads the inventory from a file, converting every json object in an InventoryItem object.
data_file_path: Path to json file
"""
with open(data_file_path) as fp:
self._products = {
i['code']: InventoryItem.create_item(code=i['code'], name=i['name'], price=i['price'])
for i in json.load(fp)
}
def add_item(self, item):
"""
Add new item to the inventory
item: InventoryItem
return: none
"""
if item.code in self._products:
raise InventoryItemExistException
self._products[item.code] = item
def get_item_by_code(self, code):
"""
Returns an item from the inventory based on the code
code: code
return: InventoryItem
"""
if code not in self._products:
raise InventoryItemDoesNotExistException
return self._products[code]
| StarcoderdataPython |
8010648 | from flask import render_template, current_app as app, request, redirect
from werkzeug.utils import secure_filename
import json
import os, glob
with open("config.json", "r") as file:
params = json.load(file)['params']
app.config['Upload_folder'] = params["uploadPath"]
# clear Data
def clearDara():
for file in glob.glob('.//File//*.csv'):
if not (file.endswith("normal.csv")):
os.remove(file)
# Creating routes
@app.route('/')
def index():
clearDara()
return render_template('home.html')
@app.route('/test')
def test():
clearDara()
return render_template('test.html')
@app.route('/saveData', methods = ['POST'])
def model():
ecg_file = request.files['ecg']
if not ecg_file.filename.endswith('.csv'):
return redirect('/error')
data_path = f"{app.config['Upload_folder']}//{secure_filename(ecg_file.filename)}"
ecg_file.save(data_path)
print(f'[INFO] File is Saved by Name @ {data_path}')
return redirect('/analysis/')
@app.errorhandler(500)
@app.errorhandler(404)
@app.errorhandler(409)
@app.route("/error")
def error(e):
print("ERROR")
return render_template("error.html"), 404
| StarcoderdataPython |
5123659 | <gh_stars>0
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from .models import GeneralNotification
class UserAdmin(admin.ModelAdmin):
pass
admin.site.register(GeneralNotification, UserAdmin)
| StarcoderdataPython |
9729634 | #! /usr/bin/env python
"""Choose assassin(s) and maybe targets for a nerf-assassin game.
Arguments:
targets: Comma-separated list of targets. If empty, targets will be
chosen at random.
-p players: list of players.
-d, --dry-run: don't actually send emails; just run the chooser.
"""
# vim: set ai sw=4 et:
# import os
# import sys
import random
import argparse
import getpass
import smtplib
# I have no idea why pylint wants to complain about these imports.
# pylint: disable=import-error,no-name-in-module
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
# pylint: enable=import-error,no-name-in-module
# Need a local module which defines the following symbols
from emails import EMAILS, EMAIL_ACCOUNT, REPLY_ADDR
# There are 2 critical pieces of information for each role:
# 1: The answer to the question: "Are you the assassin?"
# 2: The article to use when telling someone he or she has a particluar
# role. Formerly, since there were only 1 target and 1 assassin, they got
# a definite article ("the") while, guards got an indefinite article ("a").
# Now, since we support multiple target/assassin pairs, they also get
# indefinite articles.
ROLES = {'target': {'answer': 'No', 'article':'a'},
'assassin': {'answer': 'Yes', 'article':'an'},
'guard': {'answer': 'No', 'article':'a'}}
PLAYERS = 'Doug, Pat, Paul, Brandon, <NAME>, Matt, Fred, Alex, Ashish, Scott, Bryan'
def make_email(from_addr, recip, role, target_name=None):
"""Compose a message to a nerf assassin player.
from_addr: The email address the message will be from.
recip: The email address of the recipient.
role: The player's role in the current game.
target_name: If a player is the assassin, this is the target.
"""
assert role in ROLES
msg = MIMEMultipart()
msg['From'] = from_addr
if isinstance(recip, str):
msg['To'] = recip
else:
msg['To'] = ', '.join(recip)
msg['Subject'] = 'Are you an assassin?'
msg.add_header('Reply-to', REPLY_ADDR)
# Compose the real body of the message.
body = '{}; you are {} {}.'.format(ROLES[role]['answer'], ROLES[role]['article'], role)
if role == 'assassin':
body += ' Your target is {}.'.format(target_name)
def fluff_lines(num_lines):
"""Make up lines of random glop.
Intended to be used for obscuration so the real message doesn't show
up in someone's gmail inbox overview which might be seen by someone
glancing at her monitor.
"""
return '\n'.join('=*+-#' * 10 for x in range(num_lines))
# Put it together to generate the final message body.
body = fluff_lines(3) + '\n\n' + body + '\n\n' + fluff_lines(1)
msg.attach(MIMEText(body, 'plain'))
return msg
def make_slack_msg(target_list, player_list):
"""Emit a message suitable for posting to the assassing slack channel."""
msg = '\n'.join(["This week's assassins and targets are assigned, and emails have been sent.",
'The roster is:',
'*Targets*: "{}"',
'*Players*: "{}"',
'*GAME ON*'])
return msg.format(', '.join(target_list), ', '.join(player_list))
def commastring_to_list(string, capitalize=False):
"""Turn a comma separated list in a string to a python list."""
if capitalize:
return [item.strip().capitalize() for item in string.split(',')]
return [item.strip() for item in string.split(',')]
def get_cred(username):
"""Get credentials (password) for a given username."""
password = getpass.getpass('Please enter password for {}: '.format(username))
return (username, password)
def email_login(cred):
"""Log into the gmail smtp server with the given credentials."""
print 'Logging in to smtp.gmail.com as {}.'.format(cred[0])
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(*cred)
print 'Login complete.'
return server
def email_send(server, recip, msg, dry_run=True):
"""Send an email message.
If dry_run is true, just print the message which would have been sent.
"""
if dry_run:
print 'Will send this email to {}:'.format(recip)
print '====='
print msg.as_string()
print '====='
print
else:
server.sendmail(msg['From'], recip, msg.as_string())
def email_logout(server):
"""Log out of an email session."""
print 'Logging out of smtp.gmail.com.'
server.quit()
def send_results(ta_list, guard_list, dry_run=True):
"""Once we've chosen roles for all players, send out the emails."""
for target_name, assassin_name in ta_list:
print '{} is {}\'s target.'.format(target_name, assassin_name)
for guard_name in guard_list:
print '{} is a guard.'.format(guard_name)
cred = get_cred(EMAIL_ACCOUNT)
# Okay; we've got our players and their roles.
# Let's compose email messages, and send them.
email_list = []
for target_name, assassin_name in ta_list:
recip = EMAILS[target_name]
msg = make_email(EMAIL_ACCOUNT, recip, 'target')
email_list.append((recip, msg))
recip = EMAILS[assassin_name]
msg = make_email(EMAIL_ACCOUNT, recip, 'assassin', target_name)
email_list.append((recip, msg))
for guard_name in guard_list:
recip = EMAILS[guard_name]
msg = make_email(EMAIL_ACCOUNT, recip, 'guard')
email_list.append((recip, msg))
email_server = email_login(cred)
for recip, msg in email_list:
email_send(email_server, recip, msg, dry_run)
email_logout(email_server)
def choose_assassins(target_list, player_list):
"""function which assigns assassins for each target."""
ta_list = []
guard_list = player_list[:]
for target_name in target_list:
# Choose an assassin for each target.
assassin_name = random.choice(guard_list)
guard_list.remove(assassin_name)
ta_list.append((target_name, assassin_name))
return ta_list, guard_list
if __name__ == '__main__':
# Stupid pylint wants a bunch of variables (including parser!) to be
# named as constants. That would be worse, not better.
# pylint: disable=invalid-name
parser = argparse.ArgumentParser()
parser.add_argument('targets', nargs='?')
parser.add_argument('-p', '--players', default=PLAYERS)
parser.add_argument('-d', '--dry_run', action='store_true')
args = parser.parse_args()
# print args
# sys.exit(0)
# We need to know the email address of every player.
players = commastring_to_list(args.players, capitalize=True)
for p in players:
assert p in EMAILS, '{} is an invalid player.'.format(p)
# The targets might or might not be in the list of players, but
# either way, we must know their email addresses
if args.targets is None:
targets = (random.choice(players),)
print "targets are: {}".format(targets)
else:
targets = commastring_to_list(args.targets, capitalize=True)
for target in targets:
assert target in EMAILS, '{} is an invalid target.'.format(target)
try:
players.remove(target)
except ValueError:
pass
# Choose an assassin for each target.
ta_pairs, guards = choose_assassins(targets, players)
# Send out the results.
send_results(ta_pairs, guards, dry_run=args.dry_run)
# Print a nice message to be posted in slack.
print make_slack_msg(targets, players)
| StarcoderdataPython |
3348938 | import bpy
import bmesh
import math
from . import object_manager
from . import settings_manager
def set_normals_to_outside(context, objects, only_recalculate_if_flagged = True):
'''
Set normals of objects so that they point outside of the mesh
(convex direction).
Set normals is an issue with planes, since it can invert normals.
Do not recalculate normals if:
- object has a dimension that is close to zero (i.e. flat plane)
Make sure normals are set BEFORE joining objects, since joining
changes bounding box!
'''
orig_mode = settings_manager.get_mode(context)
selection_data = settings_manager.get_selection_data(context)
for object in objects:
# Continue if this object should not be included in normal recalculation
if only_recalculate_if_flagged:
if not object.get('recalculate_normals'):
continue
# Select object
bpy.ops.object.mode_set(mode = 'OBJECT')
object_manager.select_objects(context, 'REPLACE', [object], True)
# Select all elements in edit mode
bpy.ops.object.mode_set(mode = 'EDIT')
bpy.ops.mesh.select_all(action = 'SELECT')
print("set normal to outside on object " + object.name)
bpy.ops.mesh.normals_make_consistent(inside=False)
# Restore mode, selection and active object
bpy.ops.object.mode_set(mode = orig_mode)
settings_manager.restore_selected_objects(context, selection_data)
def apply_custom_split_normal(context, objects):
'''
Apply custom split normal to each object
'''
selected_objects = context.selected_objects
active_object = context.active_object
# Apply custom split normals per object
for object in objects:
context.view_layer.objects.active = object
bpy.ops.mesh.customdata_custom_splitnormals_add()
object.data.use_auto_smooth = True
# Restore selection and active object
object_manager.select_objects(context, 'REPLACE', objects)
context.view_layer.objects.active = active_object
def handle_uv_naming_before_joining_objects(context, objects):
'''
# Rename uv maps if needed for the join process
# Make sure all objects active UV has same name so that it is not lost during the join process
'''
# Check if renaming of uv maps is needed
uv_renaming_needed = False
main_uv_name = ""
for object in objects:
for uv in object.data.uv_layers:
if uv.active_render == True:
active_uv_name = uv.name
if object == objects[0]:
main_uv_name = uv.name
if not active_uv_name == main_uv_name:
uv_renaming_needed = True
# Rename uv's if all active uv's on objects don't share the same name
if uv_renaming_needed:
for object in objects:
for uv in object.data.uv_layers:
if uv.active_render == True:
uv.name = "active uv"
def get_meshes_list_from_objects(objects, exclude_objects = []):
meshes = []
for object in objects:
if not object.type == "MESH":
continue
if object in exclude_objects:
continue
meshes.append(object.data)
return meshes
def delete_meshes(context, meshes):
for mesh in meshes:
try:
bpy.data.meshes.remove(mesh)
except:
pass
def set_sharp_edges_from_auto_smooth(context, objects, set_max_smooth_angle = False):
'''
Set hard edges on each objects mesh data based on settings in normal auto smooth
Also force auto smooth with max angle setting
Note: I found that it was much better to use split custom normal instead
so this function is not used any longer. Keeping it just in case
'''
# Ensure objects is list
if not type(objects) == list:
objects = [objects]
for object in objects:
if object.data.use_auto_smooth == True:
auto_smooth_angle = object.data.auto_smooth_angle
else:
# Set to max auto_smooth_angle
auto_smooth_angle = 3.14159
bm = bmesh.new()
bm.from_mesh(object.data)
bm.edges.ensure_lookup_table()
for edge in bm.edges:
if not edge.smooth:
continue
if not edge.is_manifold:
edge.smooth = False
continue
angle = edge.calc_face_angle()
#angle = math.degree(angle)
if angle > auto_smooth_angle:
edge.smooth = False
else:
edge.smooth = True
# Write bmesh to data
bm.to_mesh(object.data)
# Force auto smooth with max angle setting
if set_max_smooth_angle:
object.data.use_auto_smooth = True
object.data.auto_smooth_angle = 3.14159
def create_cage_from_objects(context, objects, fatten_amount, cage_name = "cage_object"):
'''
Create a cage mesh object by joining duplicates of
multiple objects. Each vertex is moved in it's positive
normal direction by the "fatten" value
'''
orig_selection = context.selected_objects
object_manager.select_objects(context, 'REPLACE', objects, True)
bpy.ops.object.duplicate()
cage_objects = context.selected_objects
# Fatten
if not context.mode == 'EDIT':
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.transform.shrink_fatten(value = fatten_amount)
bpy.ops.object.editmode_toggle()
# Join objects
bpy.ops.object.join()
context.active_object.name = cage_name
joined_cage_object = context.active_object
# Restore selection
object_manager.select_objects(context, 'REPLACE', orig_selection)
# Return cage object
return joined_cage_object
| StarcoderdataPython |
6558226 | <filename>liao_xue_feng/_6_oop/test.py
# -*- coding: utf-8 -*-
from liao_xue_feng._6_oop.human import Human
from liao_xue_feng._6_oop.student import Student
tom = Human('Tom', 18)
tom.speak()
tom.__age = 0 # not work, because the `__age` property of Human object had been changed to `_Human__age`
# tom._Human__age = 0 # this will work, but not good
tom.speak()
tom.set_age(0) # good to use
tom.speak()
tom = Student('Ken', 18, 'IT')
tom.set_age(100)
tom.speak()
print(tom.get_species())
print(Student.get_species())
| StarcoderdataPython |
3513495 | <gh_stars>100-1000
def startup_installed(sender):
from uliweb.core import template
from .tags import link, use, htmlmerge
template.default_namespace['_tag_link'] = link
template.default_namespace['_tag_use'] = use
template.default_namespace['_tag_htmlmerge'] = htmlmerge
def init_static_combine():
"""
Process static combine, create md5 key according each static filename
"""
from uliweb import settings
from hashlib import md5
import os
d = {}
if settings.get_var('STATIC_COMBINE_CONFIG/enable', False):
for k, v in settings.get('STATIC_COMBINE', {}).items():
key = <KEY>'.join(v)).hexdigest()+os.path.splitext(v[0])[1]
d[key] = v
return d | StarcoderdataPython |
11217372 | <filename>dict_merge.py
dict1 = {'players': 251, 'audience': 1500}
dict2 = {'audience': 1700, 'matches': 30}
merged_dict = {**dict1, **dict2} # if any common keys are found the latter keys are used in the merged dict (dict2)
print(merged_dict)
| StarcoderdataPython |
9718561 | import h5py
import numpy as np
import os
from contextlib import contextmanager
from . import _utils as utils
import time
class _safe_open:
"""Custom context manager for opening files."""
def __init__(self, filename, *args, **kwargs):
self.f = False
while not self.f:
self.open(filename, *args, **kwargs)
if self.f:
break
time.sleep(2)
def open(self, filename, *args, **kwargs):
try:
self.f = h5py.File(filename, *args, **kwargs)
except Exception as e:
error_message = str(e)
if "Resource temporarily unavailable" in error_message:
print(error_message)
print("Retrying in 2 secondes...")
else:
raise (e)
def __enter__(self):
return self.f
def __exit__(self, *args):
self.f.close()
class h5logger:
"""h5 logger
Args:
-----
filename: str
the name of the h5 file to log the data into
replace_if_exists: bool
if True, then any file with same name will be deleted, if False
then any new logged data will be appended onto anything that already exists
"""
def __init__(
self,
filename: str,
replace_if_exists: bool = False,
concurrent_readers: bool = False,
datasets: dict or None = None,
n_datasets: int or None = None,
):
# be sure to use self.filename anywhere down this line as it
# took care of extension formatting automatically
self._filename = utils.produce_filename(filename)
self._concurrent_readers = concurrent_readers
self._n_datasets = n_datasets
if os.path.exists(self.filename) and not replace_if_exists:
# we have to make sure that it is a proper .h5 file
# and we can append data to it if needed
utils.check_file_validity(self.filename)
tag = "a"
elif os.path.exists(self.filename) and replace_if_exists:
os.remove(self.filename)
tag = "w"
else:
tag = "w"
self._file = h5py.File(self.filename, tag, libver="latest")
self._need_full_resize = []
if tag == "w" and self.concurrent_readers:
assert datasets is not None
for name, (dim, dtype) in datasets.items():
assert type(name) == str
assert type(dim) == int
self._file.create_dataset(
name,
(0,) * dim,
maxshape=(None,) * dim,
dtype=dtype,
)
self._need_full_resize.append(name)
self._file.swmr_mode = self.concurrent_readers
def enable_concurrent_readers(self):
if self._file.swmr_mode == False:
self._file.swmr_mode = True
def close(self):
self._file.close()
def log(self, name: str, value: object) -> None:
utils.validate_log(self.filename)
if not hasattr(value, "shape"):
value = np.array(value)
if not hasattr(value, "dtype"):
value = np.array(value)
if name not in self._file:
if self._n_datasets and len(list(self.keys())) == self._n_datasets:
raise ValueError("trying to add dataset to already locked logger")
if self._file.swmr_mode:
raise RuntimeError(
"trying to log a non-initialized dataset with concurrent readers on"
)
self._file.create_dataset(
name,
(0,) + value.shape,
maxshape=(None,) + value.shape,
dtype=value.dtype,
)
# note that once this condition is valid, it will never be
# seen again (in theory)
if self._n_datasets and len(list(self.keys())) == self._n_datasets:
self.enable_concurrent_readers()
dset = self._file[name]
if name in self._need_full_resize:
dset.resize((1,) + value.shape)
self._need_full_resize.remove(name)
else:
assert dset.shape[1:] == value.shape
dset.resize(dset.shape[0] + 1, axis=0)
dset[-1] = value
# Notify the reader process that new data has been written
dset.flush()
@property
def filename(self):
return self._filename
@property
def concurrent_readers(self):
return self._concurrent_readers
def keys(self):
with h5py.File(self.filename, "r") as f:
for key in f.keys():
yield key
@staticmethod
def open(filename):
return _safe_open(
utils.produce_filename(filename), "r", libver="latest", swmr=True
)
| StarcoderdataPython |
9678877 | <filename>src/domainClient/models/domain_agency_service_v2_model_contact_details.py
# coding: utf-8
"""
Domain Group API V1
Provides public access to Domain's microservices # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DomainAgencyServiceV2ModelContactDetails(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'business_sale': 'DomainAgencyServiceV2ModelEmailPhone',
'business_rent': 'DomainAgencyServiceV2ModelEmailPhone',
'commercial_lease': 'DomainAgencyServiceV2ModelEmailPhone',
'commercial_sale': 'DomainAgencyServiceV2ModelEmailPhone',
'email_domains': 'list[DomainAgencyServiceV2ModelEmailDomain]',
'general': 'DomainAgencyServiceV2ModelGeneralContactDetails',
'residential_rent': 'DomainAgencyServiceV2ModelEmailPhone',
'residential_sale': 'DomainAgencyServiceV2ModelEmailPhone'
}
attribute_map = {
'business_sale': 'businessSale',
'business_rent': 'businessRent',
'commercial_lease': 'commercialLease',
'commercial_sale': 'commercialSale',
'email_domains': 'emailDomains',
'general': 'general',
'residential_rent': 'residentialRent',
'residential_sale': 'residentialSale'
}
def __init__(self, business_sale=None, business_rent=None, commercial_lease=None, commercial_sale=None, email_domains=None, general=None, residential_rent=None, residential_sale=None): # noqa: E501
"""DomainAgencyServiceV2ModelContactDetails - a model defined in Swagger""" # noqa: E501
self._business_sale = None
self._business_rent = None
self._commercial_lease = None
self._commercial_sale = None
self._email_domains = None
self._general = None
self._residential_rent = None
self._residential_sale = None
self.discriminator = None
if business_sale is not None:
self.business_sale = business_sale
if business_rent is not None:
self.business_rent = business_rent
if commercial_lease is not None:
self.commercial_lease = commercial_lease
if commercial_sale is not None:
self.commercial_sale = commercial_sale
if email_domains is not None:
self.email_domains = email_domains
if general is not None:
self.general = general
if residential_rent is not None:
self.residential_rent = residential_rent
if residential_sale is not None:
self.residential_sale = residential_sale
@property
def business_sale(self):
"""Gets the business_sale of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:return: The business_sale of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:rtype: DomainAgencyServiceV2ModelEmailPhone
"""
return self._business_sale
@business_sale.setter
def business_sale(self, business_sale):
"""Sets the business_sale of this DomainAgencyServiceV2ModelContactDetails.
:param business_sale: The business_sale of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:type: DomainAgencyServiceV2ModelEmailPhone
"""
self._business_sale = business_sale
@property
def business_rent(self):
"""Gets the business_rent of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:return: The business_rent of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:rtype: DomainAgencyServiceV2ModelEmailPhone
"""
return self._business_rent
@business_rent.setter
def business_rent(self, business_rent):
"""Sets the business_rent of this DomainAgencyServiceV2ModelContactDetails.
:param business_rent: The business_rent of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:type: DomainAgencyServiceV2ModelEmailPhone
"""
self._business_rent = business_rent
@property
def commercial_lease(self):
"""Gets the commercial_lease of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:return: The commercial_lease of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:rtype: DomainAgencyServiceV2ModelEmailPhone
"""
return self._commercial_lease
@commercial_lease.setter
def commercial_lease(self, commercial_lease):
"""Sets the commercial_lease of this DomainAgencyServiceV2ModelContactDetails.
:param commercial_lease: The commercial_lease of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:type: DomainAgencyServiceV2ModelEmailPhone
"""
self._commercial_lease = commercial_lease
@property
def commercial_sale(self):
"""Gets the commercial_sale of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:return: The commercial_sale of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:rtype: DomainAgencyServiceV2ModelEmailPhone
"""
return self._commercial_sale
@commercial_sale.setter
def commercial_sale(self, commercial_sale):
"""Sets the commercial_sale of this DomainAgencyServiceV2ModelContactDetails.
:param commercial_sale: The commercial_sale of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:type: DomainAgencyServiceV2ModelEmailPhone
"""
self._commercial_sale = commercial_sale
@property
def email_domains(self):
"""Gets the email_domains of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:return: The email_domains of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:rtype: list[DomainAgencyServiceV2ModelEmailDomain]
"""
return self._email_domains
@email_domains.setter
def email_domains(self, email_domains):
"""Sets the email_domains of this DomainAgencyServiceV2ModelContactDetails.
:param email_domains: The email_domains of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:type: list[DomainAgencyServiceV2ModelEmailDomain]
"""
self._email_domains = email_domains
@property
def general(self):
"""Gets the general of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:return: The general of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:rtype: DomainAgencyServiceV2ModelGeneralContactDetails
"""
return self._general
@general.setter
def general(self, general):
"""Sets the general of this DomainAgencyServiceV2ModelContactDetails.
:param general: The general of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:type: DomainAgencyServiceV2ModelGeneralContactDetails
"""
self._general = general
@property
def residential_rent(self):
"""Gets the residential_rent of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:return: The residential_rent of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:rtype: DomainAgencyServiceV2ModelEmailPhone
"""
return self._residential_rent
@residential_rent.setter
def residential_rent(self, residential_rent):
"""Sets the residential_rent of this DomainAgencyServiceV2ModelContactDetails.
:param residential_rent: The residential_rent of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:type: DomainAgencyServiceV2ModelEmailPhone
"""
self._residential_rent = residential_rent
@property
def residential_sale(self):
"""Gets the residential_sale of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:return: The residential_sale of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:rtype: DomainAgencyServiceV2ModelEmailPhone
"""
return self._residential_sale
@residential_sale.setter
def residential_sale(self, residential_sale):
"""Sets the residential_sale of this DomainAgencyServiceV2ModelContactDetails.
:param residential_sale: The residential_sale of this DomainAgencyServiceV2ModelContactDetails. # noqa: E501
:type: DomainAgencyServiceV2ModelEmailPhone
"""
self._residential_sale = residential_sale
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DomainAgencyServiceV2ModelContactDetails, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DomainAgencyServiceV2ModelContactDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
3273713 | <gh_stars>1-10
import autoarray as aa
import numpy as np
from autoarray.mock.mock import MockPixelizationGrid, MockMapper
def test__regularization_matrix__matches_util():
pixel_neighbors = np.array(
[
[1, 3, 7, 2],
[4, 2, 0, -1],
[1, 5, 3, -1],
[4, 6, 0, -1],
[7, 1, 5, 3],
[4, 2, 8, -1],
[7, 3, 0, -1],
[4, 8, 6, -1],
[7, 5, -1, -1],
]
)
pixel_neighbors_sizes = np.array([4, 3, 3, 3, 4, 3, 3, 3, 2])
pixelization_grid = MockPixelizationGrid(
pixel_neighbors=pixel_neighbors, pixel_neighbors_sizes=pixel_neighbors_sizes
)
mapper = MockMapper(source_pixelization_grid=pixelization_grid)
reg = aa.reg.Constant(coefficient=1.0)
regularization_matrix = reg.regularization_matrix_from(mapper=mapper)
regularization_matrix_util = aa.util.regularization.constant_regularization_matrix_from(
coefficient=1.0,
pixel_neighbors=pixel_neighbors,
pixel_neighbors_sizes=pixel_neighbors_sizes,
)
assert (regularization_matrix == regularization_matrix_util).all()
| StarcoderdataPython |
1959236 | from .seg_hrnet_ocr import HighResolutionNet, get_seg_model | StarcoderdataPython |
1759508 | <filename>txqueue/scheduler/dispatcher.py
# the server runs in ../../api/openAPI3 or ../../api/swagger2
# so at runtime, the relative path to any file in the directory with dispatcher.py
# is ../../
import sys
sys.path.append('../..')
# and the full path to this file will be txqueue.scheduler.dispatcher
import json
import redis
from rq import Queue
import os
import entrypoint
q = Queue(connection=redis.StrictRedis(host=os.environ["REDIS_QUEUE_HOST"], port=int(os.environ["REDIS_QUEUE_PORT"]), db=int(os.environ["REDIS_QUEUE_DB"])))
TASK_TIME=os.environ["TASK_TIME"]
RESULT_TTL=int(os.environ["RESULT_TTL"])
def delete_job(job_id):
"""Delete my job by Id
Upon success, marks job as 'aborted' if it must be suspended, and returns the deleted job with the appropriate status # noqa: E501
:param job_id: Id of the job that needs to be deleted
:type job_id: str
:rtype: Job
"""
job = q.fetch_job(job_id)
job.cancel()
return job_id
def get_job_by_id(job_id): # noqa: E501
"""Find my job by Id
For valid response try integer Ids with value >= 1 and <= 1000.\\ \\ Other values will generated exceptions # noqa: E501
:param job_id: Id of job to be fetched
:type job_id: str
:rtype: Job
"""
job = q.fetch_job(job_id)
if job == None:
return 'Not Found', 404
else:
return {
"status": job.get_status(),
"name": job.func_name,
"created_at": str(job.created_at),
"enqueued_at": str(job.enqueued_at),
"started_at": str(job.started_at),
"ended_at": str(job.ended_at),
"description": job.description,
"result": job.result,
"exc_info": job.exc_info
}
def get_job_queue(): # noqa: E501
"""Lists queued jobs
Returns a map of status codes to job ids # noqa: E501
:rtype: Dict[str, int]
"""
return q.job_ids
def submit_job(job_timeout, result_ttl, body=None): # noqa: E501
"""Submit a job
set up the run outside of the scheduler. The scheduler doesn't care what the set-up looks like as long as it's well-constructed JSON. For example, the run could be a serialized object with all the run parameters, or it could be a quoted unique id that the application-specific worker knows how to lookup. # noqa: E501
:param body:
:type body: dict | bytes
:rtype: Job
"""
if job_timeout == None:
job_timeout = TASK_TIME
if result_ttl == None:
result_ttl = RESULT_TTL
pTable = q.enqueue(entrypoint.run, args=[body], job_timeout=job_timeout, result_ttl=result_ttl)
return pTable.id
| StarcoderdataPython |
1793229 | #!/usr/bin/env python
import logging
import rx
asyncio = rx.config['asyncio']
def main():
"""Somewhat simple example of asyncio"""
logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
loop.run_until_complete(do_something())
loop.close()
logging.info('closed event loop')
async def do_something():
logging.info('doing something')
await asyncio.sleep(2)
val = await do_something_else()
logging.info('did something and got {}'.format(val))
async def do_something_else():
await asyncio.sleep(5)
return 7
if __name__ == '__main__':
main()
| StarcoderdataPython |
4889981 | class ToolStripProgressBar(ToolStripControlHost,IComponent,IDisposable,IDropTarget,ISupportOleDropSource,IArrangedElement):
"""
Represents a Windows progress bar control contained in a System.Windows.Forms.StatusStrip.
ToolStripProgressBar()
ToolStripProgressBar(name: str)
"""
def Instance(self):
""" This function has been arbitrarily put into the stubs"""
return ToolStripProgressBar()
def CreateAccessibilityInstance(self,*args):
""" CreateAccessibilityInstance(self: ToolStripControlHost) -> AccessibleObject """
pass
def Dispose(self):
"""
Dispose(self: ToolStripControlHost,disposing: bool)
Releases the unmanaged resources used by the System.Windows.Forms.ToolStripControlHost and optionally releases the managed resources.
disposing: true to release both managed and unmanaged resources; false to release only unmanaged resources.
"""
pass
def GetService(self,*args):
"""
GetService(self: Component,service: Type) -> object
Returns an object that represents a service provided by the System.ComponentModel.Component or by its System.ComponentModel.Container.
service: A service provided by the System.ComponentModel.Component.
Returns: An System.Object that represents a service provided by the System.ComponentModel.Component,or null if the System.ComponentModel.Component does not provide the specified service.
"""
pass
def Increment(self,value):
"""
Increment(self: ToolStripProgressBar,value: int)
Advances the current position of the progress bar by the specified amount.
value: The amount by which to increment the progress bar's current position.
"""
pass
def IsInputChar(self,*args):
"""
IsInputChar(self: ToolStripItem,charCode: Char) -> bool
Determines whether a character is an input character that the item recognizes.
charCode: The character to test.
Returns: true if the character should be sent directly to the item and not preprocessed; otherwise,false.
"""
pass
def IsInputKey(self,*args):
"""
IsInputKey(self: ToolStripItem,keyData: Keys) -> bool
Determines whether the specified key is a regular input key or a special key that requires preprocessing.
keyData: One of the System.Windows.Forms.Keys values.
Returns: true if the specified key is a regular input key; otherwise,false.
"""
pass
def MemberwiseClone(self,*args):
"""
MemberwiseClone(self: MarshalByRefObject,cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity,which will cause the object to be assigned a new identity when it is marshaled across a remoting boundary. A value of false is usually appropriate. true to copy the current System.MarshalByRefObject object's identity to its clone,which will cause remoting client calls to be routed to the remote server object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def OnAvailableChanged(self,*args):
"""
OnAvailableChanged(self: ToolStripItem,e: EventArgs)
Raises the AvailableChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBackColorChanged(self,*args):
"""
OnBackColorChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.BackColorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnBoundsChanged(self,*args):
"""
OnBoundsChanged(self: ToolStripControlHost)
Occurs when the System.Windows.Forms.ToolStripItem.Bounds property changes.
"""
pass
def OnClick(self,*args):
"""
OnClick(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.Click event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDisplayStyleChanged(self,*args):
"""
OnDisplayStyleChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.DisplayStyleChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDoubleClick(self,*args):
"""
OnDoubleClick(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.DoubleClick event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDragDrop(self,*args):
"""
OnDragDrop(self: ToolStripItem,dragEvent: DragEventArgs)
Raises the System.Windows.Forms.ToolStripItem.DragDrop event.
dragEvent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def OnDragEnter(self,*args):
"""
OnDragEnter(self: ToolStripItem,dragEvent: DragEventArgs)
Raises the System.Windows.Forms.ToolStripItem.DragEnter event.
dragEvent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def OnDragLeave(self,*args):
"""
OnDragLeave(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.DragLeave event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnDragOver(self,*args):
"""
OnDragOver(self: ToolStripItem,dragEvent: DragEventArgs)
Raises the System.Windows.Forms.ToolStripItem.DragOver event.
dragEvent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""
pass
def OnEnabledChanged(self,*args):
"""
OnEnabledChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.EnabledChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnEnter(self,*args):
"""
OnEnter(self: ToolStripControlHost,e: EventArgs)
Raises the System.Windows.Forms.ToolStripControlHost.Enter event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnFontChanged(self,*args):
"""
OnFontChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.Control.FontChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnForeColorChanged(self,*args):
"""
OnForeColorChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.ForeColorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnGiveFeedback(self,*args):
"""
OnGiveFeedback(self: ToolStripItem,giveFeedbackEvent: GiveFeedbackEventArgs)
Raises the System.Windows.Forms.ToolStripItem.GiveFeedback event.
giveFeedbackEvent: A System.Windows.Forms.GiveFeedbackEventArgs that contains the event data.
"""
pass
def OnGotFocus(self,*args):
"""
OnGotFocus(self: ToolStripControlHost,e: EventArgs)
Raises the System.Windows.Forms.ToolStripControlHost.GotFocus event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnHostedControlResize(self,*args):
"""
OnHostedControlResize(self: ToolStripControlHost,e: EventArgs)
Synchronizes the resizing of the control host with the resizing of the hosted control.
e: An System.EventArgs that contains the event data.
"""
pass
def OnKeyDown(self,*args):
"""
OnKeyDown(self: ToolStripControlHost,e: KeyEventArgs)
Raises the System.Windows.Forms.ToolStripControlHost.KeyDown event.
e: A System.Windows.Forms.KeyEventArgs that contains the event data.
"""
pass
def OnKeyPress(self,*args):
"""
OnKeyPress(self: ToolStripControlHost,e: KeyPressEventArgs)
Raises the System.Windows.Forms.ToolStripControlHost.KeyPress event.
e: A System.Windows.Forms.KeyPressEventArgs that contains the event data.
"""
pass
def OnKeyUp(self,*args):
"""
OnKeyUp(self: ToolStripControlHost,e: KeyEventArgs)
Raises the System.Windows.Forms.ToolStripControlHost.KeyUp event.
e: A System.Windows.Forms.KeyEventArgs that contains the event data.
"""
pass
def OnLayout(self,*args):
"""
OnLayout(self: ToolStripControlHost,e: LayoutEventArgs)
e: A System.Windows.Forms.LayoutEventArgs that contains the event data.
"""
pass
def OnLeave(self,*args):
"""
OnLeave(self: ToolStripControlHost,e: EventArgs)
Raises the System.Windows.Forms.ToolStripControlHost.Leave event.
e: A System.EventArgs that contains the event data.
"""
pass
def OnLocationChanged(self,*args):
"""
OnLocationChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.LocationChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnLostFocus(self,*args):
"""
OnLostFocus(self: ToolStripControlHost,e: EventArgs)
Raises the System.Windows.Forms.ToolStripControlHost.LostFocus event.
e: A System.EventArgs that contains the event data.
"""
pass
def OnMouseDown(self,*args):
"""
OnMouseDown(self: ToolStripItem,e: MouseEventArgs)
Raises the System.Windows.Forms.ToolStripItem.MouseDown event.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseEnter(self,*args):
"""
OnMouseEnter(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.MouseEnter event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseHover(self,*args):
"""
OnMouseHover(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.MouseHover event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseLeave(self,*args):
"""
OnMouseLeave(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.MouseLeave event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnMouseMove(self,*args):
"""
OnMouseMove(self: ToolStripItem,mea: MouseEventArgs)
Raises the System.Windows.Forms.ToolStripItem.MouseMove event.
mea: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnMouseUp(self,*args):
"""
OnMouseUp(self: ToolStripItem,e: MouseEventArgs)
Raises the System.Windows.Forms.ToolStripItem.MouseUp event.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""
pass
def OnOwnerChanged(self,*args):
"""
OnOwnerChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.OwnerChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnOwnerFontChanged(self,*args):
"""
OnOwnerFontChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.Control.FontChanged event when the System.Windows.Forms.ToolStripItem.Font property has changed on the parent of the System.Windows.Forms.ToolStripItem.
e: A System.EventArgs that contains the event data.
"""
pass
def OnPaint(self,*args):
"""
OnPaint(self: ToolStripControlHost,e: PaintEventArgs)
e: A System.Windows.Forms.PaintEventArgs that contains the event data.
"""
pass
def OnParentBackColorChanged(self,*args):
"""
OnParentBackColorChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.BackColorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentChanged(self,*args):
"""
OnParentChanged(self: ToolStripControlHost,oldParent: ToolStrip,newParent: ToolStrip)
oldParent: The original parent of the item.
newParent: The new parent of the item.
"""
pass
def OnParentEnabledChanged(self,*args):
"""
OnParentEnabledChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.EnabledChanged event when the System.Windows.Forms.ToolStripItem.Enabled property value of the item's container changes.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentForeColorChanged(self,*args):
"""
OnParentForeColorChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.ForeColorChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnParentRightToLeftChanged(self,*args):
"""
OnParentRightToLeftChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.RightToLeftChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnQueryContinueDrag(self,*args):
"""
OnQueryContinueDrag(self: ToolStripItem,queryContinueDragEvent: QueryContinueDragEventArgs)
Raises the System.Windows.Forms.ToolStripItem.QueryContinueDrag event.
queryContinueDragEvent: A System.Windows.Forms.QueryContinueDragEventArgs that contains the event data.
"""
pass
def OnRightToLeftChanged(self,*args):
"""
OnRightToLeftChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.RightToLeftChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnRightToLeftLayoutChanged(self,*args):
"""
OnRightToLeftLayoutChanged(self: ToolStripProgressBar,e: EventArgs)
Raises the System.Windows.Forms.ProgressBar.RightToLeftLayoutChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnSubscribeControlEvents(self,*args):
"""
OnSubscribeControlEvents(self: ToolStripProgressBar,control: Control)
control: The control from which to subscribe events.
"""
pass
def OnTextChanged(self,*args):
"""
OnTextChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.TextChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def OnUnsubscribeControlEvents(self,*args):
"""
OnUnsubscribeControlEvents(self: ToolStripProgressBar,control: Control)
control: The control from which to unsubscribe events.
"""
pass
def OnValidated(self,*args):
"""
OnValidated(self: ToolStripControlHost,e: EventArgs)
Raises the System.Windows.Forms.ToolStripControlHost.Validated event.
e: A System.EventArgs that contains the event data.
"""
pass
def OnValidating(self,*args):
"""
OnValidating(self: ToolStripControlHost,e: CancelEventArgs)
Raises the System.Windows.Forms.ToolStripControlHost.Validating event.
e: A System.ComponentModel.CancelEventArgs that contains the event data.
"""
pass
def OnVisibleChanged(self,*args):
"""
OnVisibleChanged(self: ToolStripItem,e: EventArgs)
Raises the System.Windows.Forms.ToolStripItem.VisibleChanged event.
e: An System.EventArgs that contains the event data.
"""
pass
def PerformStep(self):
"""
PerformStep(self: ToolStripProgressBar)
Advances the current position of the progress bar by the amount of the System.Windows.Forms.ToolStripProgressBar.Step property.
"""
pass
def ProcessCmdKey(self,*args):
"""
ProcessCmdKey(self: ToolStripControlHost,m: Message,keyData: Keys) -> (bool,Message)
Processes a command key.
m: A System.Windows.Forms.Message,passed by reference,that represents the window message to process.
keyData: One of the System.Windows.Forms.Keys values that represents the key to process.
Returns: false in all cases.
"""
pass
def ProcessDialogKey(self,*args):
"""
ProcessDialogKey(self: ToolStripControlHost,keyData: Keys) -> bool
Processes a dialog key.
keyData: One of the System.Windows.Forms.Keys values that represents the key to process.
Returns: true if the key was processed by the item; otherwise,false.
"""
pass
def ProcessMnemonic(self,*args):
"""
ProcessMnemonic(self: ToolStripControlHost,charCode: Char) -> bool
Processes a mnemonic character.
charCode: The character to process.
Returns: true if the character was processed as a mnemonic by the control; otherwise,false.
"""
pass
def SetBounds(self,*args):
"""
SetBounds(self: ToolStripItem,bounds: Rectangle)
Sets the size and location of the item.
bounds: A System.Drawing.Rectangle that represents the size and location of the System.Windows.Forms.ToolStripItem
"""
pass
def SetVisibleCore(self,*args):
"""
SetVisibleCore(self: ToolStripControlHost,visible: bool)
visible: true to make the System.Windows.Forms.ToolStripItem visible; otherwise,false.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,name=None):
"""
__new__(cls: type)
__new__(cls: type,name: str)
"""
pass
def __str__(self,*args):
pass
BackgroundImage=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""This property is not relevant to this class.
Get: BackgroundImage(self: ToolStripProgressBar) -> Image
Set: BackgroundImage(self: ToolStripProgressBar)=value
"""
BackgroundImageLayout=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""This property is not relevant to this class.
Get: BackgroundImageLayout(self: ToolStripProgressBar) -> ImageLayout
Set: BackgroundImageLayout(self: ToolStripProgressBar)=value
"""
CanRaiseEvents=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the component can raise an event.
"""
DefaultAutoToolTip=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether to display the System.Windows.Forms.ToolTip that is defined as the default.
"""
DefaultDisplayStyle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating what is displayed on the System.Windows.Forms.ToolStripItem.
"""
DefaultMargin=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the spacing between the System.Windows.Forms.ToolStripProgressBar and adjacent items.
"""
DefaultPadding=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the internal spacing characteristics of the item.
"""
DefaultSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the height and width of the System.Windows.Forms.ToolStripProgressBar in pixels.
"""
DesignMode=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that indicates whether the System.ComponentModel.Component is currently in design mode.
"""
DismissWhenClicked=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether items on a System.Windows.Forms.ToolStripDropDown are hidden after they are clicked.
"""
Events=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the list of event handlers that are attached to this System.ComponentModel.Component.
"""
MarqueeAnimationSpeed=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value representing the delay between each System.Windows.Forms.ProgressBarStyle.Marquee display update,in milliseconds.
Get: MarqueeAnimationSpeed(self: ToolStripProgressBar) -> int
Set: MarqueeAnimationSpeed(self: ToolStripProgressBar)=value
"""
Maximum=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the upper bound of the range that is defined for this System.Windows.Forms.ToolStripProgressBar.
Get: Maximum(self: ToolStripProgressBar) -> int
Set: Maximum(self: ToolStripProgressBar)=value
"""
Minimum=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the lower bound of the range that is defined for this System.Windows.Forms.ToolStripProgressBar.
Get: Minimum(self: ToolStripProgressBar) -> int
Set: Minimum(self: ToolStripProgressBar)=value
"""
Parent=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the parent container of the System.Windows.Forms.ToolStripItem.
"""
ProgressBar=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Windows.Forms.ProgressBar.
Get: ProgressBar(self: ToolStripProgressBar) -> ProgressBar
"""
RightToLeftLayout=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value indicating whether the System.Windows.Forms.ToolStripProgressBar layout is right-to-left or left-to-right when the System.Windows.Forms.RightToLeft property is set to System.Windows.Forms.RightToLeft.Yes.
Get: RightToLeftLayout(self: ToolStripProgressBar) -> bool
Set: RightToLeftLayout(self: ToolStripProgressBar)=value
"""
ShowKeyboardCues=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether to show or hide shortcut keys.
"""
Step=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the amount by which to increment the current value of the System.Windows.Forms.ToolStripProgressBar when the System.Windows.Forms.ToolStripProgressBar.PerformStep method is called.
Get: Step(self: ToolStripProgressBar) -> int
Set: Step(self: ToolStripProgressBar)=value
"""
Style=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the style of the System.Windows.Forms.ToolStripProgressBar.
Get: Style(self: ToolStripProgressBar) -> ProgressBarStyle
Set: Style(self: ToolStripProgressBar)=value
"""
Text=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the text displayed on the System.Windows.Forms.ToolStripProgressBar.
Get: Text(self: ToolStripProgressBar) -> str
Set: Text(self: ToolStripProgressBar)=value
"""
Value=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the current value of the System.Windows.Forms.ToolStripProgressBar.
Get: Value(self: ToolStripProgressBar) -> int
Set: Value(self: ToolStripProgressBar)=value
"""
KeyDown=None
KeyPress=None
KeyUp=None
LocationChanged=None
OwnerChanged=None
RightToLeftLayoutChanged=None
TextChanged=None
Validated=None
Validating=None
| StarcoderdataPython |
4837880 |
from lasso.utils.ConsoleColoring import ConsoleColoring
from lasso.logging import str_error, str_warn, str_info
import pickle
import os
import sys
import time
import logging
from concurrent import futures
import ansa
sys.path.append(os.path.dirname(__file__))
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
# messages
_msg_site_packages_dir_not_set = ("Environment variable '{0}' was not specified. "
"Assuming all required packages where installed"
" already somewhere else.")
_msg_port_taken = "Port {} is already in use."
_msg_stopping_server = "Stopping GRPC server"
_msg_invalid_argument_index = ("A negative function argument index of '{0}' is not allowed."
" Either set a name or a valid index.")
_msg_some_weird_error = "Encountered error '{0}'. Shutting down service."
_msg_import_error = '''
ImportError: {0}
Please install the package for ANSA as follows:
(1) Create and activate a conda environment with python 3.3
- 'conda create -n py33 python=3.3'
- 'activate py33'
(2) Install required packages
- 'python -m pip install grpcio enum34 protobuf'
(3) Run the server with the option '--python33-path'
- 'python -m lasso.ansa.grpc.server --python33-path path/to/anaconda/envs/py33'
Or set the environment variable 'ANSA_GRPC_SITE_PACKAGES_PATH'
- csh : 'setenv ANSA_GRPC_SITE_PACKAGES_PATH path/to/anaconda/envs/py33'
- bash: 'export ANSA_GRPC_SITE_PACKAGES_PATH="path/to/anaconda/envs/py33"'
- ps : '$env:ANSA_GRPC_SITE_PACKAGES_PATH = "path/to/anaconda/envs/py33"'
- cmd : 'setx ANSA_GRPC_SITE_PACKAGES_PATH "path/to/anaconda/envs/py33"'
(4) Enjoy life ♥
'''
# this is a utility for the command line usage
try:
sys.path.append(os.environ["ANSA_GRPC_SITE_PACKAGES_PATH"])
except KeyError:
print(str_warn(_msg_site_packages_dir_not_set.format(
"ANSA_GRPC_SITE_PACKAGES_PATH")))
try:
import AnsaGRPC_pb2
import AnsaGRPC_pb2_grpc
import grpc
from utils import Entity, pickle_object
except ImportError as err:
raise RuntimeError(str_error(_msg_import_error.format(str(err))))
def print_header():
header = '''
ANSA Remote Scripting Server by {0}
------------------------------------------
'''.format(ConsoleColoring.blue("LASSO GmbH", light=True))
print(header)
def _serialize(obj):
''' Serializes an arbitrary object for transfer
Parameters
----------
obj : `obj`
object to be serialized for transfer
Returns
-------
pickled_object : `lasso.ansa.rpc.PickledObject`
protobuf serialized message
Notes
-----
Converts any ansa entities to remote placeholders.
'''
# first convert ansa entites to fake entities
if isinstance(obj, ansa.base.Entity):
obj = _serialize_ansa_entity(obj)
elif isinstance(obj, list):
obj = [_serialize_ansa_entity(entry) if isinstance(entry, ansa.base.Entity)
else entry for entry in obj]
elif isinstance(obj, tuple):
obj = tuple(_serialize_ansa_entity(entry) if isinstance(entry, ansa.base.Entity)
else entry for entry in obj)
elif isinstance(obj, dict):
obj = {
_serialize_ansa_entity(key) if isinstance(key, Entity) else key:
_serialize_ansa_entity(value) if isinstance(
value, Entity) else value
for key, value in obj.items()
}
# then we pickle everything
return AnsaGRPC_pb2.PickledObject(
data=pickle_object(obj))
def _serialize_ansa_entity(ansa_entity):
''' Replaces an ansa entity by a placeholder entity
Parameters
----------
ansa_entity : `ansa.base.Entity`
ansa entity
Returns
-------
entity : `lasso.ansa.Entity`
entity placeholder instance
Notes
-----
The placeholder has all properties of
the original instance.
'''
assert(isinstance(ansa_entity, ansa.base.Entity))
# create entity
entity = Entity(
id=ansa_entity._id,
ansa_type=ansa_entity.ansa_type(ansa.base.CurrentDeck())
)
# we take all properties with us
entity.assign_props(ansa_entity)
# transfer the thing
return entity
def _deserialize_entity(entity):
''' Convert a placeholder entity to an entity
Parameters
----------
entity : `lasso.ansa.rpc.Entity`
entity instance to be replaced
Returns
-------
ansa_entity : `ansa.base.Entity`
ansa entity
'''
assert(isinstance(entity, Entity))
return ansa.base.GetEntity(
ansa.base.CurrentDeck(),
entity.ansa_type,
entity.id
)
def _convert_any_ansa_entities(obj):
''' Converts any ansa entities contained in whatever it is
Parameters
----------
obj : `object`
any object to check or convert
Returns
-------
ret : `object`
object with converted entities
Notes
-----
Useful for converting stuff for lists of entities etc.
'''
if isinstance(obj, Entity):
return _deserialize_entity(obj)
elif isinstance(obj, list):
return [_deserialize_entity(entry) if isinstance(entry, Entity)
else entry for entry in obj]
elif isinstance(obj, tuple):
return tuple(_deserialize_entity(entry) if isinstance(entry, Entity)
else entry for entry in obj)
elif isinstance(obj, dict):
return {
_deserialize_entity(key) if isinstance(key, Entity) else key:
_deserialize_entity(value) if isinstance(
value, Entity) else value
for key, value in obj.items()
}
else:
return obj
class LassoAnsaDriverServicer(AnsaGRPC_pb2_grpc.LassoAnsaDriverServicer):
''' Implementation of the server
'''
def __init__(self):
''' Create a server executing tasks
'''
logging.info(str_info("LassoAnsaDriverServicer.__init__"))
self.please_shutdown = False
def _deserialize_args(self, pb_function_arguments):
''' Deserialize function arguments
Parameters
----------
pb_function_arguments : `list` of `lasso.ansa.rpc.FunctionArgument`
function arguments to iterate over
Returns
-------
args : `list`
argument list
kwargs : `dict`
kwargs dictionary for function
'''
# these guys or ladies get returned
args = []
kwargs = {}
for function_argument in pb_function_arguments:
# get argument info
argument_index = function_argument.index
argument_name = function_argument.name
argument_value = pickle.loads(function_argument.value.data)
# convert entities back to ansa entities
argument_value = _convert_any_ansa_entities(argument_value)
# I belong to KWARGS
if len(argument_name) != 0:
kwargs[argument_name] = argument_value
# I belong to ARGS
else:
# sorry, these guys are not allowed
if argument_index < 0:
raise RuntimeError(
str_error(_msg_invalid_argument_index.format(argument_index)))
# extend args if required
if len(args) <= argument_index:
args.extend([None] * (argument_index - len(args) + 1))
args[argument_index] = argument_value
return args, kwargs
def _run_function(self, function_name, args, kwargs):
''' This function actually runs a module function
Parameters
----------
function_name : `str`
name of the function to execute with full module path
args : `list`
argument list
kwargs : `dict`
dictionary of named args
Returns
-------
return_value : `object`
whatever came out of the function
Notes
-----
For security reasons this function tries to import
the ansa function from the modules and thus does
not use `eval` or `exec` to run any code.
This can be broken quite easily but at least it is
better than nothing.
'''
assert(isinstance(args, list))
assert(isinstance(kwargs, dict))
# seperate module path from function name
module_name, function_name = function_name.rsplit('.', 1)
# import module
my_module = __import__(module_name, globals(),
locals(), (function_name, ), 0)
# get function from module
my_function = getattr(my_module, function_name)
# run function
return my_function(*args, **kwargs)
def RunAnsaFunction(self, request: AnsaGRPC_pb2.AnsaFunction, context):
''' Implementation of protobuf interface function
'''
# get function name
function_name = request.name
logging.info("-" * 60)
logging.info("function: {0}".format(function_name))
# deserialize function arguments
args, kwargs = self._deserialize_args(request.args)
logging.info("args : {0}".format(args))
logging.info("kwargs : {0}".format(kwargs))
# run the thing
return_value = self._run_function(function_name, args, kwargs)
logging.info("return : {0}".format(return_value))
# serialize return
return_anything = _serialize(return_value)
return return_anything
def Shutdown(self, request: AnsaGRPC_pb2.Empty, context):
''' Shutdown the server
'''
self.please_shutdown = True
return AnsaGRPC_pb2.Empty()
def serve(port, interactive, enable_logging):
''' Run the grpc server
'''
print_header()
# set logging
if enable_logging:
logging.basicConfig(level=logging.INFO)
fmt_settings = "{0:14}: {1}"
logging.info(str_info(fmt_settings.format("port", port)))
logging.info(str_info(fmt_settings.format("interactive", interactive)))
logging.info(str_info(fmt_settings.format(
"enable_logging", enable_logging)))
# grpc server options
# We increase the transfer limit from 4MB to 1GB here
# This is seriously bad since big stuff should be streamed
# but I'm not getting paid for this.
gigabyte = 1024 ** 3
options = [
('grpc.max_send_message_length', gigabyte),
('grpc.max_receive_message_length', gigabyte)
]
# run server
# Note: Since ANSA is not threadsafe we allow only 1 worker.
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=1),
options=options
)
# register our driver
driver_servicer = LassoAnsaDriverServicer()
AnsaGRPC_pb2_grpc.add_LassoAnsaDriverServicer_to_server(
driver_servicer, server)
port = server.add_insecure_port('[::]:{}'.format(port))
# check if port was fine
if port == 0:
logging.error(_msg_port_taken.format(port))
raise RuntimeError(str_error(_msg_port_taken.format(port)))
# finally start server
server.start()
# let main process wait or make ANSA interactively accessible
# while the threadpool process handles incoming commands
try:
if interactive:
if enable_logging:
print()
import code
code.interact(local=locals())
else:
while True:
# time.sleep(60 * 60 * 24)
time.sleep(1)
if driver_servicer.please_shutdown:
raise KeyboardInterrupt()
except KeyboardInterrupt:
logging.info(_msg_stopping_server)
server.stop(0)
except Exception as err:
logging.error(str_error(_msg_some_weird_error.format(str(err))))
server.stop(0)
| StarcoderdataPython |
348712 | <reponame>ChrisDeadman/pyPhone
from pyphone.widgets import *
class RootPanel(Panel):
def __init__(self, master, cnf={}, **kw):
from pyphone.panels import CallPanel, MessagePanel, InfoPanel, BottomPanel
super().__init__(master, cnf, **kw)
self.grid_rowconfigure(0, weight=7)
self.grid_rowconfigure(1, weight=1)
self.grid_columnconfigure(0, weight=1)
self.top_panel = SwitchPanel(self, [CallPanel, MessagePanel, InfoPanel])
self.top_panel.grid(row=0, column=0, sticky="nsew")
self.bottom_panel = BottomPanel(self,
highlightbackground="black",
highlightcolor="black",
highlightthickness=4,
bd=0)
self.bottom_panel.grid(row=1, column=0, sticky="nsew")
| StarcoderdataPython |
202300 | import json
import logging
from django import template
from cio.pipeline import pipeline
from djedi.auth import has_permission
from djedi.compat import render_to_string
register = template.Library()
logger = logging.getLogger(__name__)
@register.simple_tag(takes_context=True)
def djedi_admin(context):
output = u''
if has_permission(context.get('request')):
defaults = dict((node.uri.clone(version=None), node.initial) for node in pipeline.history.list('get'))
output = render_to_string('djedi/cms/embed.html', {
'json_nodes': json.dumps(defaults).replace('</', '\\x3C/'),
})
# Clear pipeline
pipeline.clear()
return output
| StarcoderdataPython |
6660615 | <filename>sols_python/1238.py
entrada1 = int(input())
strings = list()
for c in range(entrada1):
entrada = input()
strings.append(entrada)
for frase in strings:
fraseEmArray = frase.split()
if len(fraseEmArray[0]) <= len(fraseEmArray[1]):
menorPalavra = len(fraseEmArray[0])
else:
menorPalavra = len(fraseEmArray[1])
palavra = ''
for c in range(menorPalavra):
palavra += fraseEmArray[0][c]
palavra += fraseEmArray[1][c]
if len(fraseEmArray[0]) > menorPalavra:
for c in range(menorPalavra, len(fraseEmArray[0])):
palavra += fraseEmArray[0][c]
elif len(fraseEmArray[1]) > menorPalavra:
for c in range(menorPalavra, len(fraseEmArray[1])):
palavra += fraseEmArray[1][c]
print(palavra)
| StarcoderdataPython |
9636887 | <filename>app_tasks/apps.py<gh_stars>0
from django.apps import AppConfig
class AppTasksConfig(AppConfig):
name = 'app_tasks'
| StarcoderdataPython |
1872844 | <filename>saleor/graphql/order/dataloaders.py
from collections import defaultdict
from django.db.models import F
from ...order.models import Order, OrderEvent, OrderLine
from ...payment.models import Payment
from ...warehouse.models import Allocation
from ..core.dataloaders import DataLoader
class OrderLinesByVariantIdAndChannelIdLoader(DataLoader):
context_key = "orderline_by_variant_and_channel"
def batch_load(self, keys):
channel_ids = [key[1] for key in keys]
variant_ids = [key[0] for key in keys]
order_lines = OrderLine.objects.filter(
order__channel_id__in=channel_ids, variant_id__in=variant_ids
).annotate(channel_id=F("order__channel_id"))
order_line_by_variant_and_channel_map = defaultdict(list)
for order_line in order_lines:
key = (order_line.variant_id, order_line.channel_id)
order_line_by_variant_and_channel_map[key].append(order_line)
return [order_line_by_variant_and_channel_map[key] for key in keys]
class OrderByIdLoader(DataLoader):
context_key = "order_by_id"
def batch_load(self, keys):
orders = Order.objects.in_bulk(keys)
return [orders.get(order_id) for order_id in keys]
class OrderLineByIdLoader(DataLoader):
context_key = "orderline_by_id"
def batch_load(self, keys):
order_lines = OrderLine.objects.in_bulk(keys)
return [order_lines.get(line_id) for line_id in keys]
class OrderLinesByOrderIdLoader(DataLoader):
context_key = "orderlines_by_order"
def batch_load(self, keys):
lines = OrderLine.objects.filter(order_id__in=keys).order_by("pk")
line_map = defaultdict(list)
for line in lines.iterator():
line_map[line.order_id].append(line)
return [line_map.get(order_id, []) for order_id in keys]
class PaymentsByOrderIdLoader(DataLoader):
context_key = "payments_by_order"
def batch_load(self, keys):
payments = Payment.objects.filter(order_id__in=keys).order_by("pk")
payment_map = defaultdict(list)
for payment in payments.iterator():
payment_map[payment.order_id].append(payment)
return [payment_map.get(order_id, []) for order_id in keys]
class OrderEventsByOrderIdLoader(DataLoader):
context_key = "orderevents_by_order"
def batch_load(self, keys):
events = OrderEvent.objects.filter(order_id__in=keys).order_by("pk")
events_map = defaultdict(list)
for event in events.iterator():
events_map[event.order_id].append(event)
return [events_map.get(order_id, []) for order_id in keys]
class AllocationsByOrderLineIdLoader(DataLoader):
context_key = "allocations_by_orderline_id"
def batch_load(self, keys):
allocations = Allocation.objects.filter(order_line__pk__in=keys)
order_lines_to_allocations = defaultdict(list)
for allocation in allocations:
order_lines_to_allocations[allocation.order_line_id].append(allocation)
return [order_lines_to_allocations[order_line_id] for order_line_id in keys]
| StarcoderdataPython |
1899107 | <reponame>the16thpythonist/ufotest<filename>ufotest/ci/server.py
import os
import json
import time
import smtplib
import datetime
import shutil
import click
from flask import Flask, request, send_from_directory, jsonify
from ufotest.config import Config, get_path
from ufotest.util import get_template, get_version
from ufotest.util import cerror, cprint, cresult
from ufotest.util import get_build_reports, get_test_reports
from ufotest.util import get_folder_size, format_byte_size
from ufotest.exceptions import BuildError
from ufotest.camera import UfoCamera
from ufotest.ci.build import BuildQueue, BuildLock, BuildRunner, BuildReport, build_context_from_request
from ufotest.ci.mail import send_report_mail
CONFIG = Config()
PATH = get_path()
ARCHIVE_PATH = os.path.join(PATH, 'archive')
BUILDS_PATH = os.path.join(PATH, 'builds')
STATIC_PATH = os.path.join(PATH, 'static')
PLUGINS_PATH = os.path.join(PATH, 'plugins')
class BuildAdapterGitlab(object):
def __init__(self, data: dict):
self.data = data
def get(self):
build = {
'repository': {
'name': self.data['repository']['name'],
'clone_url': self.data['repository']['git_http_url'],
'owner': {
'name': self.data['user_name'],
'email': self.data['user_email']
}
},
'ref': self.data['ref'],
'pusher': self.get_pusher(),
'commits': self.get_commits()
}
return build
def get_pusher(self) -> dict:
last_commit_data = self.data['commits'][-1]
return last_commit_data['author']
def get_commits(self) -> list:
return [commit_data['id'] for commit_data in self.data['commits'][::-1]]
class BuildWorker(object):
"""
This class wraps the main loop which is responsible for actually executing the build jobs.
This class was designed so that it's run method could essentially be used as the main loop of an entirely different
subprocess. Its main loop periodically checks the build queue for the case that new build jobs have arrived over
the web server.
**The build data**
So the basic way the build queue works is that the web server receives a new build job in the form of a json data
structure. This data structure is then put into the build queue until it is popped out again by this worker process.
To understand the build process it is important to understand how this json data from the build queue is structured.
The most important parts are explained here:
- repository
- owner
- *email*: The string email address of the owner of the repository. This will be used to send a report mail
to the owner whenever a build has finished.
- *name*: The string name of the owner of the repo. This is used to correctly address the person in the mail
- *clone_url*: The string url with which the repository can be cloned. This is then used to actually clone the
repository to flash the most recent changes to the camera.
- pusher
- *email*: The string email address of the person which caused the most recent push to the repository.
- *name*: The string name of the pusher
- *ref*: A string with the branch name to which the commit was issued. For the ufotest application only a certain
branch is being monitored. This will be used to check if the changes have actually occurred on the relevant branch
- *commits*: A list of strings, where each string is the string identifier for one of the commits for the
repository. The last one will be used as the string to checkout and use as the basis for the tests.
This should have been committed
"""
def __init__(self):
self.running = True
def run(self):
try:
while self.running:
time.sleep(1)
if not BuildQueue.is_empty() and not BuildLock.is_locked():
build_request = BuildQueue.pop()
try:
# ~ RUN THE BUILD PROCESS
build_report = self.run_build(build_request) # raises: BuildError
# ~ SEND REPORT MAILS
# After the build process has terminated we want to inform the relevant people of the outcome.
# This is mainly Two people: The maintainer of the repository is getting an email and pusher
self.send_report_mails(build_request, build_report)
except BuildError as error:
cerror('The build process was terminated due to a build error!')
cerror(str(error))
except smtplib.SMTPAuthenticationError as error:
cerror('The email credentials provided in the config file were not accepted by the server!')
cerror(str(error))
except OSError as error:
cerror('Report mails could not be sent because there is no network connection')
cerror(str(error))
except KeyboardInterrupt:
cprint('\n...Stopping BuildWorker')
def run_build(self, build_request: dict) -> BuildReport:
"""Actually runs the build process based on the information in *build_request* and returns the build report.
:param build_request: A dict, which specifies the request, that triggered the build.
:raises BuildError: If there is an error during the build process. All kinds of exceptions are wrapped as
this kind of error, so this is the only thing, one has to worry about
"""
# ~ ACTUALLY BUILD
with build_context_from_request(build_request) as build_context: # raises: BuildError
build_runner = BuildRunner(build_context)
build_runner.run(test_only=True)
build_report = BuildReport(build_context)
build_report.save(build_context.folder_path)
return build_report
def send_report_mails(self, build_request: dict, build_report: BuildReport) -> None:
"""Sends the report mails with the results of *build_report* to recipients based on the *build_request*
:param build_request: A dict, which specifies the request, that triggered the build.
:param build_report: The BuildReport which was generated from the build process.
:raises smtplib.SMTPAuthenticationError: If the username and password configured within the config file are not
accepted by the SMTPServer
:raises OSError: If there is no connection to the mail server.
"""
maintainer_email = build_request['repository']['owner']['email']
maintainer_name = build_request['repository']['owner']['name']
send_report_mail(maintainer_email, maintainer_name, build_report)
pusher_email = build_request['pusher']['email']
pusher_name = build_request['pusher']['name']
send_report_mail(pusher_email, pusher_name, build_report)
server = Flask('UfoTest CI Server', static_folder=None)
@server.route('/', methods=['GET'])
def home():
"""
This method returns the HTML content which will display the home page for the ufotest web interface.
On default the home page consists of various informative panels, which show the current state of the ufotest
project, the current installation, the current hardware and a list for both the most recent builds and the most
recent test reports.
:return: The rendered string HTML template
"""
template = get_template('home.html')
template = CONFIG.pm.apply_filter('home_template', template)
# The integer amount of how many items to be shown for both the most recent test and most recent build reports.
recent_count = CONFIG.pm.apply_filter('home_recent_count', 5)
test_reports = get_test_reports()
recent_builds = get_build_reports()[:recent_count]
recent_tests = test_reports[:recent_count]
# So we derive the summary values about the state of the hardware and firmware from the most recent test report. On
# default the test reports returned by "get_test_reports" are sorted by recentness, which would mean that we would
# only need to get the first item from the respective list. BUT, a filter hook applies to the return value of these
# functions which could mean that possibly a filter is applied which changes the ordering, so to be sure that this
# is the most recent one, we sort it again.
if len(recent_tests) != 0:
most_recent_test_report = sorted(recent_tests, key=lambda d: d['start_iso'], reverse=True)[0]
if len(recent_builds) != 0:
most_recent_build_report = sorted(recent_builds, key=lambda d: d['start_iso'], reverse=True)[0]
camera_class = CONFIG.pm.apply_filter('camera_class', UfoCamera)
camera = camera_class(CONFIG)
status_summary = [
# UFOTEST INFORMATION
{
'id': 'ufotest-version',
'label': 'UfoTest Version',
'value': get_version()
},
{
'id': 'installation-folder',
'label': 'Installation Folder',
'value': CONFIG.get_path()
},
{
'id': 'report-count',
'label': 'Total Test Reports',
'value': len(test_reports)
},
{
'id': 'loaded-plugins',
'label': 'Loaded Plugins',
'value': len(CONFIG.pm.plugins)
},
False,
{
'id': 'repository',
'label': 'Source Repository',
'value': f'<a href="{CONFIG.get_repository_url()}">GitHub</a>'
},
{
'id': 'documentation',
'label': 'Project Documentation',
'value': f'<a href="{CONFIG.get_documentation_url()}">ReadTheDocs</a>'
},
False,
# FIRMWARE / BUILD INFORMATION
{
'id': 'firmware-version',
'label': 'Firmware Version',
'value': '1.0'
},
{
'id': 'recent-build',
'label': 'Recent Build',
'value': most_recent_build_report['start_iso'] if len(recent_builds) != 0 else 'No build yet'
},
# HARDWARE INFORMATION
False,
{
'id': 'camera-class',
'label': 'Camera Class',
'value': str(camera_class.__name__)
},
{
'id': 'available',
'label': 'Camera Available',
'value': f'<i class="fas fa-circle {"green" if camera.poll() else "red"}"> </i>'
},
{
'id': 'hardware-version',
'label': 'Hardware Version',
'value': camera.get_prop('hardware_version')
},
{
'id': 'sensor-dimensions',
'label': 'Sensor Dimensions',
'value': f'{CONFIG.get_sensor_width()} x {CONFIG.get_sensor_height()}'
},
{
'id': 'sensor-version',
'label': 'Sensor Version',
'value': camera.get_prop('sensor_version')
}
]
status_summary = CONFIG.pm.apply_filter('home_status_summary', status_summary)
# ~ CALCULATING DISK USAGE
# TODO: The unit is hardcoded. This could be part of the config. Super low prio though
used_space = get_folder_size(CONFIG.get_path())
# https://stackoverflow.com/questions/48929553/get-hard-disk-size-in-python
_, _, free_space = shutil.disk_usage('/')
context = {
'status_summary': status_summary,
'recent_builds': recent_builds,
'recent_tests': recent_tests,
# 13.09.2021: I decided that displaying the amount of used space would be a good idea for the home screen,
# because now the complete source repo content is saved for each build report and depending how large the
# source repo is (gigabytes?) This could fill up the disk rather quickly...
'used_space': used_space,
'free_space': free_space
}
return template.render(context), 200
# == THE CONFIG WEB EDITOR
@server.route('/config', methods=['GET'])
def config():
template = get_template('config.html')
with open(get_path('config.toml')) as config_file:
config_lines = config_file.readlines()
context = {
'line_count': len(config_lines),
'config_content': ''.join(config_lines)
}
return template.render(context), 200
@server.route('/config/save', methods=['POST'])
def save_config():
try:
data = request.get_json()
content = data['content']
with open(get_path('config.toml'), mode='w') as config_file:
config_file.write(content)
return 'Config file saved', 200
except:
return 'There has been an error', 400
# == THE PLUGINS WEB VIEW
@server.route('/plugins', methods=['GET'])
def plugins():
template = get_template('plugins.html')
return template.render({}), 200
@server.route('/plugins/<path:path>')
def plugin_documentation(path):
return send_from_directory(PLUGINS_PATH, path)
# == GITHUB/GITLAB PUSH EVENTS
@server.route('/push/github', methods=['POST'])
def push_github():
data = request.get_json()
BuildQueue.push(data)
return 'New build added to the queue', 200
@server.route('/push/gitlab', methods=['POST'])
def push_gitlab():
data = request.get_json()
try:
adapter = BuildAdapterGitlab(data)
BuildQueue.push(adapter.get())
except Exception as e:
msg = '[!] An error occurred while pushing a gitlab request into the build queue: {}'.format(str(e))
click.secho(msg, fg='red')
return 'New build added to the queue', 200
@server.route('/archive')
def archive_list():
reports = []
for root, folders, files in os.walk(ARCHIVE_PATH):
for folder in folders:
folder_path = os.path.join(root, folder)
report_json_path = os.path.join(folder_path, 'report.json')
# We really need to check for the existence here because of the following case: A test run has been started
# but is not yet complete. In this case the test folder already exists, but the report does not. In this
# case attempting to open the file would cause an exception!
if os.path.exists(report_json_path):
with open(report_json_path, mode='r') as report_json_file:
report = json.loads(report_json_file.read())
reports.append(report)
break
sorted_reports = sorted(
reports,
key=lambda r: datetime.datetime.fromisoformat(r['start_iso']),
reverse=True
)
template = get_template('archive_list.html')
return template.render({'reports': sorted_reports}), 200
@server.route('/archive/<path:path>')
def archive_detail(path):
return send_from_directory(ARCHIVE_PATH, path)
@server.route('/builds')
def builds_list():
reports = []
for root, folders, files in os.walk(BUILDS_PATH):
for folder in folders:
folder_path = os.path.join(root, folder)
report_json_path = os.path.join(folder_path, 'report.json')
if os.path.exists(report_json_path):
with open(report_json_path, mode='r') as report_json_file:
report = json.loads(report_json_file.read())
reports.append(report)
break
sorted_reports = sorted(
reports,
key=lambda r: datetime.datetime.fromisoformat(r['start_iso']),
reverse=True
)
template = get_template('builds_list.html')
return template.render({'reports': sorted_reports}), 200
@server.route('/builds/<path:path>')
def builds_detail(path):
return send_from_directory(BUILDS_PATH, path)
@server.route('/static/<path:path>')
def static(path):
return send_from_directory(STATIC_PATH, path)
@server.route('/favicon.ico')
def favicon():
return send_from_directory(STATIC_PATH, 'favicon.ico')
| StarcoderdataPython |
1836694 | <filename>count_operations.py
import torch
import torchvision.models as models
import torch.nn as nn
import pretrainedmodels
from torchsummary import summary
from torch.autograd import Variable
from collections import OrderedDict
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
from networkx.drawing.nx_agraph import graphviz_layout, to_agraph
import pygraphviz as pgv
import json
import copy
import numpy as np
import pickle
import os
from modelsummary import summaryX
from utils import Utils
from calc_tam_layer import *
import data_loader_cifar10
from operations import * # Pytorch operations (layers)
import time
from simplecnn import SimpleCNN
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def generateSummary(model, img_shape = (3,244,244), automatedModel=True, input_image=0):
try:
modelSummary = summaryX(img_shape, model, automatedModel, input_image)
except Exception as e:
print (f'Exception generating model summary: {e}')
return (None)
return (modelSummary)
def generateModels():
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
torchModels = {}
for modelName in model_names:
print(f"\n---- {modelName} [...] ----")
print(modelName)
model = models.__dict__[modelName]().to(device)
print("[ Generating Summary ... ]")
# Generate the stateTransition graph of the model and get the graph
graph = generateSummary(model, img_shape=(3,244,244), automatedModel=False)
print("[ Storing Summary ... ]")
if graph is not None:
torchModels[modelName] = OrderedDict()
#stateTransitionFreq = generateDictStateTransitionGraph(modelSummary)
torchModels[modelName]['summary'] = graph
else:
continue
# The model itself is no longer needed
del model
torch.cuda.empty_cache()
return torchModels
def getParamDependingLayer(layerName):
layerName = layerName.lower()
if "conv" in layerName or ("pool" in layerName and "adaptive" not in layerName):
return "kernel_size"
elif "adaptive" in layerName:
return "output_size"
elif "dropout" in layerName:
return "p"
elif "linear" in layerName:
return "out_features"
else:
return None
#Count operations
if __name__ == "__main__":
population = generateModels()
operations = {}
for ind in population.keys():
print (population[ind]['summary'].keys())
for layerName, values in population[ind]['summary'].items():
layerName = layerName.split("-")[0]
#print(values)
param = getParamDependingLayer(layerName)
if layerName not in operations.keys():
operations[layerName] = []
if param is not None:
operations[layerName] += [values[param]]
#else:
# print(f"Layer: {layerName}, had {param}")
else:
if param is not None:
operations[layerName] += [values[param]]
#else:
# print(f"Layer: {layerName}, had {param}")
#operations[]
#exit()
# Remove duplicates
for layer in operations:
operations[layer] = set(operations[layer])
# count operations
count = 0
for layer in operations:
size = len(operations[layer])
if size == 0:
count += 1 #relu, etc
else:
count += size
print (operations)
print(f'{count} operations!')
| StarcoderdataPython |
11200007 | import json
from django.http import HttpResponse
from django.utils.deprecation import MiddlewareMixin
from service.account.account_base_service import AccountBaseService
from service.common.common_service import CommonService
class ApiPermissionCheck(MiddlewareMixin):
"""
api调用权限校验中间件
"""
def process_request(self, request):
if request.path.startswith('/api/v1.0/accounts/login'):
# 登录接口特殊处理
return
if request.path.startswith('/api/'):
# api开头的为接口调用,需要额外验证权限,如果用户已经登录loonflow管理后台,允许直接调用
if request.user.is_authenticated:
request.META.update(dict(HTTP_APPNAME='loonflow'))
request.META.update(dict(HTTP_USERNAME=request.user.username))
return
flag, msg = self.token_permission_check(request)
if not flag:
return HttpResponse(json.dumps(dict(code=-1, msg='权限校验失败:{}'.format(msg), data=[])))
def token_permission_check(self, request):
signature = request.META.get('HTTP_SIGNATURE')
timestamp = request.META.get('HTTP_TIMESTAMP')
app_name = request.META.get('HTTP_APPNAME')
if not app_name:
return False, '未提供appname(调用loonflow接口需要鉴权,请根据文档中"调用授权"部分说明来调用)'
app_token_obj, msg = AccountBaseService.get_token_by_app_name(app_name)
if not app_token_obj:
return False, 'appname未授权,请联系管理员(调用loonflow接口需要鉴权,请根据文档中"调用授权"部分说明来调用)'
return CommonService.signature_check(timestamp, signature, app_token_obj.token)
| StarcoderdataPython |
4821174 | <reponame>PotatoHD404/py-exceptions<filename>examples/exclude_example.py
from pyexceptions import handle_exceptions
def divide(a, b):
return a / b
def real_main():
i = 5
j = 0
c = divide(i, j)
print(c)
def wrapper():
real_main()
@handle_exceptions(exclude=2)
def main():
wrapper()
if __name__ == '__main__':
main()
| StarcoderdataPython |
143703 | """
Utility to call cloudformation command with args
"""
import os
import logging
import platform
import subprocess
import sys
from samcli.cli.global_config import GlobalConfig
LOG = logging.getLogger(__name__)
def execute_command(command, args, template_file):
LOG.debug("%s command is called", command)
try:
aws_cmd = find_executable("aws")
# Add SAM CLI information for AWS CLI to know about the caller.
gc = GlobalConfig()
env = os.environ.copy()
if gc.telemetry_enabled:
env["AWS_EXECUTION_ENV"] = "SAM-" + gc.installation_id
args = list(args)
if template_file:
# Since --template-file was parsed separately, add it here manually
args.extend(["--template-file", template_file])
subprocess.check_call([aws_cmd, 'cloudformation', command] + args, env=env)
LOG.debug("%s command successful", command)
except subprocess.CalledProcessError as e:
# Underlying aws command will print the exception to the user
LOG.debug("Exception: %s", e)
sys.exit(e.returncode)
def find_executable(execname):
if platform.system().lower() == 'windows':
options = [
"{}.cmd".format(execname),
"{}.exe".format(execname),
execname
]
else:
options = [execname]
for name in options:
try:
subprocess.Popen([name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# No exception. Let's pick this
return name
except OSError as ex:
LOG.debug("Unable to find executable %s", name, exc_info=ex)
raise OSError("Cannot find AWS CLI installation, was looking at executables with names: {}".format(options))
| StarcoderdataPython |
4828715 | """Module with the common cryptography-assotiated utils.
This module uses libsodium as a backend."""
from typing import Optional
from pysodium import (
crypto_sign_keypair,
crypto_sign_detached,
crypto_sign_verify_detached,
crypto_hash_sha256,
crypto_hash_sha256_BYTES,
crypto_sign_SECRETKEYBYTES,
crypto_sign_PUBLICKEYBYTES,
crypto_sign_BYTES,
)
HASH_BYTES_LEN = crypto_hash_sha256_BYTES
PUBLIC_KEY_BYTES_LEN = crypto_sign_PUBLICKEYBYTES
SECRET_KEY_BYTES_LEN = crypto_sign_SECRETKEYBYTES
SIGNATURE_BYTES_LEN = crypto_sign_BYTES
# Classes here not only used as a storage, but also contain verification, so it's OK.
# pylint: disable=too-few-public-methods
class _FixedByteArray:
"""Base class for types which store a bytes sequence of a fixed length."""
def __init__(self, data: bytes, expected_len: int):
if len(data) != expected_len:
raise ValueError("Incorrect data length (expected {}, got {}".format(expected_len, len(data)))
self.value = data
def __eq__(self, other: object) -> bool:
if not isinstance(other, _FixedByteArray):
return False
return self.value == other.value
def __str__(self) -> str:
return self.hex()
def hex(self) -> str:
"""Returns the hex representation of the value."""
return self.value.hex()
class Hash(_FixedByteArray):
"""Representation of the SHA-256 hash."""
def __init__(self, hash_bytes: bytes):
super().__init__(hash_bytes, HASH_BYTES_LEN)
@classmethod
def hash_data(cls, data: Optional[bytes]) -> "Hash":
"""Calculates the hash of provided bytes sequence and returns a Hash object.
If `None` is provided, a hash of the empty sequence will be returned."""
if data is not None:
hash_bytes = crypto_hash_sha256(data)
else:
hash_bytes = crypto_hash_sha256(bytes())
return cls(hash_bytes)
class PublicKey(_FixedByteArray):
"""Representation of Curve25519 Public Key"""
def __init__(self, key: bytes):
super().__init__(key, PUBLIC_KEY_BYTES_LEN)
class SecretKey(_FixedByteArray):
"""Representation of Curve25519 Secret Key"""
def __init__(self, key: bytes):
super().__init__(key, SECRET_KEY_BYTES_LEN)
class KeyPair:
"""Representation of Curve25519 keypair"""
def __init__(self, public_key: PublicKey, secret_key: SecretKey):
# Check that public_key corresponds to the secret_key.
# Since we're use only the libsodium backend, it's okay to check it like that.
# libsodium secret key contains a public key inside.
if secret_key.value[PUBLIC_KEY_BYTES_LEN:] != public_key.value:
raise ValueError("Public key doesn't correspond to the secret key")
self.public_key = public_key
self.secret_key = secret_key
@classmethod
def generate(cls) -> "KeyPair":
"""Generates a new random keypair."""
public_key, secret_key = crypto_sign_keypair()
return cls(PublicKey(public_key), SecretKey(secret_key))
class Signature(_FixedByteArray):
"""Representation of Curve25519 signature"""
def __init__(self, signature: bytes):
super().__init__(signature, SIGNATURE_BYTES_LEN)
@classmethod
def sign(cls, data: bytes, key: SecretKey) -> "Signature":
"""Signs the provided bytes sequence with a provided secret key."""
signature = crypto_sign_detached(data, key.value)
return Signature(signature)
def verify(self, data: bytes, key: PublicKey) -> bool:
"""Verifies the signature against provided data and public key."""
try:
crypto_sign_verify_detached(self.value, data, key.value)
return True
except ValueError:
# ValueError is raised if verification is failed.
return False
| StarcoderdataPython |
1862243 | from . import Connection
class ConnectionTCP(Connection):
def _connect(self):
hostname = os.getenvb(b'SELPI_CONNECTION_TCP_HOSTNAME')
port = int(os.getenvb(b'SELPI_CONNECTION_TCP_PORT'))
self.__sock = socket.create_connection((hostname, port))
def _read(self, length: int):
return self.__sock.recv(length)
def _write(self, data: bytes):
return self.__sock.send(data)
| StarcoderdataPython |
3445758 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
sys.stdout.write("This is a program which throws an exception.")
raise Exception("Paf Boum Boum Bada Boum !!!")
| StarcoderdataPython |
6643480 | from abc import ABC, abstractmethod
from typing import Any, Dict, List, Tuple
import numpy as np
from scipy.sparse import diags
import lumos.numpy as lnp
from lumos.optimal_control.collocation import (
build_lagrange_differential_matrix,
build_lagrange_integration_matrix,
CollocationEnum,
get_collocation_points,
)
class Transcription(ABC):
"""Transcription method turning a continuous time problem into a discrete one.
# TODO: should we eturn this into A*x + B*x_dot to make it more conventional?
It constructs the linear continuity constraints: A*x - B*x_dot*T = 0
assuming an m-stage interval, and d states
A and B are both [m-1, m], while x and x_dot are both [m, d]. (m-stage interval, d
states) T is a constant of the interval time. This is natural for integration scheme
, but for differential schemes usually the interval time is combined with A in the
form of 1/T.
We unify it here to:
- make the interface consistent for differential and integration schcheme
- ensure the continuity constraint is in the order of the states instead of swtching
between states and state derivatives.
"""
num_stages_per_interval: int
num_constraints_per_interval: int
@property
def _cont_matrix_shape(self) -> Tuple[int, int]:
return self.num_constraints_per_interval, self.num_stages_per_interval
def get_continuity_matrices(self) -> Tuple[np.ndarray, np.ndarray]:
return self._get_A_matrix(), self._get_B_matrix()
def continuity_con(
self, x: lnp.ndarray, x_dot: lnp.ndarray, interval_length: float
) -> lnp.ndarray:
A, B = self.get_continuity_matrices()
continuity_con = A @ x - B @ x_dot * interval_length
return continuity_con
@abstractmethod
def _get_A_matrix(self) -> np.ndarray:
pass
@abstractmethod
def _get_B_matrix(self) -> np.ndarray:
pass
class ForwardEuler(Transcription):
"""x_{i+1} - x_{i} - x_dot_{i}*dt = 0"""
num_stages_per_interval: int = 2
num_constraints_per_interval: int = 1
def _get_A_matrix(self):
return diags([-1, 1], [0, 1], shape=self._cont_matrix_shape).toarray()
def _get_B_matrix(self):
return diags([1], [0], shape=self._cont_matrix_shape).toarray()
class Trapezoidal(Transcription):
"""x_{i+1} - x_{i} - (x_dot_{i+1} + x_dot_{i}) * dt/2 = 0"""
num_stages_per_interval: int = 2
num_constraints_per_interval: int = 1
def _get_A_matrix(self):
return diags([-1, 1], [0, 1], shape=self._cont_matrix_shape).toarray()
def _get_B_matrix(self):
return diags([0.5, 0.5], [0, 1], shape=self._cont_matrix_shape).toarray()
class Collocation(Transcription):
"""Transcription with Legendre collocation
Interval of collocation is converted to [0, 1] from the standard of [-1, 1] to make
downstream computations easier.
More details, refer to: AN OVERVIEW OF THREE PSEUDOSPECTRAL METHODS FOR THE
NUMERICAL SOLUTION OF OPTIMAL CONTROL PROBLEMS
https://hal.archives-ouvertes.fr/hal-01615132/document
"""
interp_points: np.ndarray
collocation_points: np.ndarray
def __init__(self, num_stages: int):
self.num_stages_per_interval: int = num_stages
self._set_collocation_points(num_stages)
self._set_interp_points()
self.d_matrix: np.ndarray = build_lagrange_differential_matrix(
support=self.interp_points, evaluation_points=self.collocation_points
)
@property
def num_constraints_per_interval(self):
return len(self.collocation_points)
@abstractmethod
def _set_collocation_points(self, num_stages: int) -> None:
pass
@abstractmethod
def _set_interp_points(self) -> None:
pass
def _get_A_matrix(self) -> np.ndarray:
# multiply by two because collocation is in the domain of [-1, 1],
# so to map to [0, 1] (easy to scale to interval time), we first need to multiply
# by two.
return self.d_matrix
def _get_B_matrix(self) -> np.ndarray:
return diags([1], [1], shape=self._cont_matrix_shape).toarray()
class LGR(Collocation):
"""Transcription with LGR collocation"""
def __init__(self, num_stages: int = 3):
super().__init__(num_stages=num_stages)
def _set_collocation_points(self, num_stages: int) -> None:
# map collocation points from [-1, 1] to [0, 1]
self.collocation_points = (
get_collocation_points(
num_points=num_stages - 1, scheme=CollocationEnum.LGR
)
+ 1
) / 2
def _set_interp_points(self) -> None:
# Add the 0 to the interp_points
self.interp_points = np.insert(self.collocation_points, 0, 0)
class LGRIntegral(LGR):
"""Integral variant of the LGR scheme"""
def __init__(self, num_stages: int = 3):
super().__init__(num_stages=num_stages)
# Now we fit the polynomial on the derivatives (so on collocaiton points)
# And then evaluate the ingral at the interpretation points (except for the 1st
# point)
self.i_matrix = build_lagrange_integration_matrix(
support=self.interp_points, evaluation_points=self.collocation_points,
)
def _get_A_matrix(self) -> np.ndarray:
return np.hstack(
[
-np.ones((self.num_stages_per_interval - 1, 1)),
np.eye(self.num_stages_per_interval - 1),
]
)
def _get_B_matrix(self) -> np.ndarray:
return self.i_matrix
TRANSCRIPTION_OPTIONS = {
t.__name__: t for t in (ForwardEuler, Trapezoidal, LGR, LGRIntegral)
}
def get_transcription_options() -> List[str]:
"""Return names of available transcription classes.
Returns:
List[str]: a list of names of the available Transcription classes.
"""
return [n for n in TRANSCRIPTION_OPTIONS.keys()]
def make_transcription(name: str, kwargs: Dict[str, Any] = None) -> Transcription:
"""Create a Transcription object from name and keyword arguments.
Args:
name (str): name of the transcription class.
kwargs (Dict[str, Any], optional): additional kwargs to be passed to the
transcription constructtor. Defaults to None, which will be set to empty.
Raises:
RuntimeError: if the transcription required is not a valid option.
Returns:
Transcription: Transcription object that defines a descritization scheme.
"""
if not kwargs:
kwargs = {}
if name in TRANSCRIPTION_OPTIONS:
return TRANSCRIPTION_OPTIONS[name](**kwargs)
else:
raise RuntimeError(
"name is not a valid transcription type. "
f"Valid options are {get_transcription_options()}"
)
| StarcoderdataPython |
11305751 | <reponame>CACF/Lost-and-Stolen-Device-Subsystem
from flask import abort
from functools import wraps
from ..models.case import Case
def restricted(f):
@wraps(f)
def user_level(self, tracking_id, **kwargs):
case = Case.get_case(tracking_id)
if case and case['creator']['user_id'] != kwargs.get('status_args').get('user_id') and kwargs.get('status_args').get('role') == "staff":
abort(401, description="Unauthorized Access")
return f(self, tracking_id=tracking_id, **kwargs)
return user_level
| StarcoderdataPython |
1984465 | <filename>src/ppopt/plot.py
import time
from math import atan2
from typing import List
import numpy
import plotly.graph_objects as go
import pypoman
from matplotlib import pyplot
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
from .solution import Solution
from .utils.general_utils import make_column
def sort_clockwise(vertices: List[numpy.ndarray]) -> List[numpy.ndarray]:
"""
Sorts the vertices in clockwise order. This is important for rendering as if they were not sorted then you would see nonsense.
:param vertices:
:return:
"""
# find the center
x_center = 0
y_center = 0
for i in vertices:
x_center += i[0]
y_center += i[1]
x_center = x_center / len(vertices)
y_center = y_center / len(vertices)
return sorted(vertices, key=lambda x: atan2((x[1] - y_center), (x[0] - x_center)))
# TODO: specify dimensions to fix
def gen_vertices(solution: Solution):
"""
Generates the vertices associated with the critical regions in the solution.
:param solution: a multiparametric region
:return: a list of a collection of vertices sorted counterclockwise that correspond to the specific region
"""
vertex_list = list()
for region in solution.critical_regions:
vertices = pypoman.compute_polytope_vertices(region.E, region.f)
vertex_list.append(sort_clockwise(vertices))
return vertex_list
def plotly_plot(solution: Solution, save_path: str = None, show=True) -> None:
"""
Makes a plot via the plotly library, this is good for interactive figures that you can embed into webpages and handle interactively.
:param solution:
:param save_path: Keyword argument, if a directory path is specified it will save a html copy and a png to that directory
:param show: Keyword argument, if True displays the plot otherwise does not display
:return: no return, creates a graph of the solution
"""
fig = go.Figure()
vertex_list = gen_vertices(solution)
for i, region_v in enumerate(vertex_list):
x_ = [region_v[j][0] for j in range(len(region_v))]
y_ = [region_v[j][1] for j in range(len(region_v))]
fig.add_trace(go.Scatter(x=x_, y=y_, fill="toself", name=f'Critical Region {i}'))
fig.update_layout(
autosize=False,
width=1000,
height=1000
)
fig.update_layout(
hoverlabel=dict(
bgcolor='white'
)
)
if save_path is not None:
file_tag = str(time.time())
fig.write_image(save_path + file_tag + ".png")
fig.write_html(save_path + file_tag + ".html", include_plotyjs=False, full_html=False)
if show:
fig.show()
def parametric_plot(solution: Solution, save_path: str = None, show=True) -> None:
"""
Makes a simple plot from a solution. This uses matplotlib to generate a plot, it is the general plotting backend.
:param solution: a multiparametric solution
:param save_path: if specified saves the plot in the directory
:param show: Keyword argument, if True displays the plot otherwise does not display
:return: no return, creates graph of solution
"""
vertex_list = gen_vertices(solution)
polygon_list = [Polygon(v) for v in vertex_list]
fig, ax = pyplot.subplots()
cm = pyplot.cm.get_cmap('Paired')
colors = 100 * numpy.random.rand(len(solution.critical_regions))
p = PatchCollection(polygon_list, cmap=cm, alpha=.8, edgecolors='black', linewidths=1)
p.set_array(colors)
ax.add_collection(p)
pyplot.autoscale()
if save_path is not None:
pyplot.savefig(save_path + str(time.time()) + ".png", dpi=1000)
if show:
pyplot.show()
def parametric_plot_1D(solution: Solution, save_path: str = None, show=True) -> None:
"""
Makes a simple plot of a 1D parametric solution
:param solution:
:param save_path:
:param show:
:return:
"""
# check if the solution is actually 1 dimensional
if solution.theta_dim() != 1:
print(f"Solution is not 1D, the dimensionality of the solution is {solution.theta_dim()}")
return None
# see the dimensionality of the response variable x*
# x_dim = solution.program.num_x()
# set up the plotting object
fig, ax = pyplot.subplots()
# plot the critical regions w.r.t. x*
for critical_region in solution.critical_regions:
# get extents
boundaries = critical_region.f / critical_region.E
y = [critical_region.evaluate(theta=make_column(boundary)).flatten() for boundary in boundaries]
ax.plot(boundaries, y, solid_capstyle='round')
if save_path is not None:
pyplot.savefig(save_path + str(time.time()) + ".png", dpi=1000)
if show:
pyplot.show()
| StarcoderdataPython |
1845143 | <reponame>mirfan899/MTTS
# -*- coding: UTF-8 -*-
"""
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
src.wxgui.cliens.dataroamerclient.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
GUI management of annotated data.
"""
import os.path
import wx
import wx.lib.scrolledpanel as scrolled
from sppas.src.ui.wxgui.sp_icons import TIER_RENAME
from sppas.src.ui.wxgui.sp_icons import TIER_DELETE
from sppas.src.ui.wxgui.sp_icons import TIER_CUT
from sppas.src.ui.wxgui.sp_icons import TIER_COPY
from sppas.src.ui.wxgui.sp_icons import TIER_PASTE
from sppas.src.ui.wxgui.sp_icons import TIER_DUPLICATE
from sppas.src.ui.wxgui.sp_icons import TIER_MOVE_UP
from sppas.src.ui.wxgui.sp_icons import TIER_MOVE_DOWN
from sppas.src.ui.wxgui.sp_icons import TIER_PREVIEW
from sppas.src.ui.wxgui.sp_icons import TIER_RADIUS
from sppas.src.ui.wxgui.ui.CustomEvents import FileWanderEvent, spEVT_FILE_WANDER
from sppas.src.ui.wxgui.ui.CustomEvents import spEVT_PANEL_SELECTED
from sppas.src.ui.wxgui.ui.CustomEvents import spEVT_SETTINGS
from sppas.src.ui.wxgui.panels.trslist import TrsList
from sppas.src.ui.wxgui.panels.mainbuttons import MainToolbarPanel
from sppas.src.ui.wxgui.structs.files import xFiles
import sppas.src.ui.wxgui.dialogs.filedialogs as filedialogs
from sppas.src.ui.wxgui.dialogs.msgdialogs import ShowInformation
from sppas.src.ui.wxgui.dialogs.msgdialogs import ShowYesNoQuestion
from .baseclient import BaseClient
# ----------------------------------------------------------------------------
# Constants
# ----------------------------------------------------------------------------
RENAME_ID = wx.NewId()
DUPLICATE_ID = wx.NewId()
PREVIEW_ID = wx.NewId()
TIER_RADIUS_ID = wx.NewId()
# ----------------------------------------------------------------------------
# Main class that manage the notebook
# ----------------------------------------------------------------------------
class DataRoamerClient(BaseClient):
"""
:author: <NAME>
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: <EMAIL>
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 <NAME>
:summary: Manage the opened files.
This class manages the pages of a notebook with all opened files.
Each page (except if empty...) contains an instance of a DataRoamer.
"""
def __init__(self, parent, prefsIO):
BaseClient.__init__(self, parent, prefsIO)
self._update_members()
# ------------------------------------------------------------------------
def _update_members(self):
"""Update members."""
self._multiplefiles = True
# Quick and dirty solution to communicate to the file manager:
self._prefsIO.SetValue('F_CCB_MULTIPLE', t='bool', v=True, text='')
# ------------------------------------------------------------------------
def CreateComponent(self, parent, prefsIO):
return DataRoamer(parent, prefsIO)
# ------------------------------------------------------------------------
def New(self):
"""Add a new file into the current page."""
# Ask for the new file name
filename = filedialogs.SaveAsAnnotationFile()
if filename is None:
return
# Add the newly created file in the file manager and that's it!
evt = FileWanderEvent(filename=filename, status=False)
evt.SetEventObject(self)
wx.PostEvent(self.GetTopLevelParent(), evt)
# ------------------------------------------------------------------------
def Save(self):
"""Save the current file(s)."""
page = self._notebook.GetCurrentPage()
for i in range(self._xfiles.GetSize()):
if self._xfiles.GetOther(i) == page:
o = self._xfiles.GetObject(i)
o.Save()
# ------------------------------------------------------------------------
def SaveAs(self):
"""Save the current file(s)."""
page = self._notebook.GetCurrentPage()
for i in range(self._xfiles.GetSize()):
if self._xfiles.GetOther(i) == page:
o = self._xfiles.GetObject(i)
o.SaveAs()
# ------------------------------------------------------------------------
def SaveAll(self):
"""Save all files of a page."""
for i in range(self._xfiles.GetSize()):
o = self._xfiles.GetObject(i)
o.SaveAll()
# ----------------------------------------------------------------------------
# The Component is the content of one page of the notebook.
# ----------------------------------------------------------------------------
class DataRoamer(wx.Panel):
"""
:author: <NAME>
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: <EMAIL>
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 <NAME>
:summary: This component allows to manage annotated files.
"""
def __init__(self, parent, prefsIO):
wx.Panel.__init__(self, parent, -1)
# members
self._filetrs = xFiles() # Associate files/trsdata
self._selection = None # the index of the selected trsdata panel
self._clipboard = None # Used to cut and paste
self._prefsIO = prefsIO
# create the client panel
sizer = wx.BoxSizer(wx.VERTICAL)
toolbar = self._create_toolbar()
sizer.Add(toolbar, proportion=0, flag=wx.EXPAND | wx.LEFT | wx.RIGHT, border=4)
self._trspanel = self._create_content()
sizer.Add(self._trspanel, proportion=2, flag=wx.EXPAND | wx.LEFT | wx.RIGHT, border=4)
# Bind events
self._trspanel.Bind(spEVT_PANEL_SELECTED, self.OnPanelSelection)
self.Bind(spEVT_FILE_WANDER, self.OnFileWander)
self.Bind(spEVT_SETTINGS, self.OnSettings)
self.Bind(wx.EVT_BUTTON, self.ProcessEvent)
self.SetBackgroundColour(prefsIO.GetValue('M_BG_COLOUR'))
self.SetForegroundColour(prefsIO.GetValue('M_FG_COLOUR'))
self.SetFont(prefsIO.GetValue('M_FONT'))
self.SetSizer(sizer)
self.Layout()
# ----------------------------------------------------------------------
def _create_toolbar(self):
"""Creates a toolbar panel."""
toolbar = MainToolbarPanel(self, self._prefsIO)
toolbar.AddButton(RENAME_ID,
TIER_RENAME,
'Rename',
tooltip="Rename the selected tier.")
toolbar.AddButton(wx.ID_DELETE,
TIER_DELETE,
'Delete',
tooltip="Delete the selected tier.")
toolbar.AddButton(wx.ID_CUT,
TIER_CUT,
'Cut',
tooltip="Cut the selected tier.")
toolbar.AddButton(wx.ID_COPY,
TIER_COPY,
"Copy",
tooltip="Copy the selected tier.")
toolbar.AddButton(wx.ID_PASTE,
TIER_PASTE,
"Paste",
tooltip="Paste the selected tier.")
toolbar.AddButton(DUPLICATE_ID,
TIER_DUPLICATE,
"Duplicate",
tooltip="Duplicate the selected tier.")
toolbar.AddButton(wx.ID_UP,
TIER_MOVE_UP,
"Move Up",
tooltip="Move up the selected tier.")
toolbar.AddButton(wx.ID_DOWN,
TIER_MOVE_DOWN,
"Move Down",
tooltip="Move down the selected tier.")
toolbar.AddButton(TIER_RADIUS_ID,
TIER_RADIUS,
"Radius",
tooltip="Fix the vagueness of each boundary. "
"Useful only for .xra file format.")
toolbar.AddButton(PREVIEW_ID,
TIER_PREVIEW,
"View",
tooltip="Preview of the selected tier.")
return toolbar
# ----------------------------------------------------------------------
def _create_content(self):
"""Create the panel with files content."""
panel = scrolled.ScrolledPanel(self, -1)
self._trssizer = wx.BoxSizer(wx.VERTICAL)
panel.SetSizerAndFit(self._trssizer)
panel.SetAutoLayout(True)
panel.SetupScrolling()
return panel
# ------------------------------------------------------------------------
# Callbacks to any kind of event
# ------------------------------------------------------------------------
def ProcessEvent(self, event):
"""Processes an event.
Processes an event, searching event tables and calling zero or more
suitable event handler function(s). Note that the ProcessEvent
method is called from the wxPython docview framework directly since
wxPython does not have a virtual ProcessEvent function.
:param event: (wx.Event)
"""
ide = event.GetId()
if ide == RENAME_ID:
self.Rename()
return True
elif ide == wx.ID_DELETE:
self.Delete()
return True
elif ide == wx.ID_CUT:
self.Cut()
return True
elif ide == wx.ID_COPY:
self.Copy()
return True
elif ide == wx.ID_PASTE:
self.Paste()
return True
elif ide == DUPLICATE_ID:
self.Duplicate()
return True
elif ide == wx.ID_UP:
self.MoveUp()
return True
elif ide == wx.ID_DOWN:
self.MoveDown()
return True
elif ide == PREVIEW_ID:
self.Preview()
return True
elif ide == TIER_RADIUS_ID:
self.Radius()
return True
return wx.GetApp().ProcessEvent(event)
# ----------------------------------------------------------------------
# Callbacks
# ----------------------------------------------------------------------
def OnFileWander(self, event):
"""A file was checked/unchecked somewhere else, then set/unset the data.
:param event: (wx.Event)
"""
f = event.filename
s = event.status
if s is True:
r = self.SetData(f)
if r is False:
evt = FileWanderEvent(filename=f, status=False)
evt.SetEventObject(self)
wx.PostEvent(self.GetParent().GetParent().GetParent(), evt)
else:
if f is None:
self.UnsetAllData()
else:
self.UnsetData(f)
evt = FileWanderEvent(filename=f, status=False)
evt.SetEventObject(self)
wx.PostEvent(self.GetParent().GetParent().GetParent(), evt)
# ------------------------------------------------------------------------
def OnPanelSelection(self, event):
"""Change the current selection (the transcription file that was clicked on)."""
sel = event.panel
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p != sel:
p.Deselect()
p.SetBackgroundColour(self._prefsIO.GetValue('M_BG_COLOUR'))
else:
# set the new selection
self._selection = p
p.SetBackgroundColour(wx.Colour(215, 215, 240))
# -----------------------------------------------------------------------
# Functions on a tier...
# -----------------------------------------------------------------------
def Rename(self):
"""Rename a tier."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
p.Rename()
# -----------------------------------------------------------------------
def Delete(self):
"""Delete a tier."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
p.Delete()
# -----------------------------------------------------------------------
def Cut(self):
"""Cut a tier."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
self._clipboard = p.Cut()
# -----------------------------------------------------------------------
def Copy(self):
"""Copy a tier."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
self._clipboard = p.Copy()
# -----------------------------------------------------------------------
def Paste(self):
"""Paste a tier."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
p.Paste(self._clipboard)
# -----------------------------------------------------------------------
def Duplicate(self):
"""Duplicate a tier."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
p.Duplicate()
# -----------------------------------------------------------------------
def MoveUp(self):
"""Move up a tier."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
p.MoveUp()
# -----------------------------------------------------------------------
def MoveDown(self):
"""Move down a tier."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
p.MoveDown()
# -----------------------------------------------------------------------
def Preview(self):
"""Open a frame to view a tier."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
p.Preview()
# -----------------------------------------------------------------------
def Radius(self):
"""Change radius value of all TimePoint instances of the tier."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
p.Radius()
# ----------------------------------------------------------------------
# Functions on a file...
# ----------------------------------------------------------------------
def Save(self):
"""Save the selected file."""
if self._selection is None:
ShowInformation(self,
self._prefsIO,
"No file selected!\n"
"Click on a tier to select a file...",
style=wx.ICON_INFORMATION)
return
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
p.Save()
# ----------------------------------------------------------------------
def SaveAs(self):
"""Save as... the selected file."""
if self._selection is None:
ShowInformation(self,
self._prefsIO,
"No file selected!\n"
"Click on a tier to select a file...",
style=wx.ICON_INFORMATION)
return
found = -1
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
if p == self._selection:
found = i
break
if found > -1:
f = self._filetrs.GetFilename(i)
p = self._filetrs.GetObject(i)
# Ask for the new file name
filename = filedialogs.SaveAsAnnotationFile()
if filename is None:
return
# do not erase the file if it is already existing!
if os.path.exists(filename) and f != filename:
ShowInformation(self,
self._prefsIO,
"File not saved: this file name is already existing!",
style=wx.ICON_INFORMATION)
elif f == filename:
p.Save()
else:
p.SaveAs(filename)
# Add the newly created file in the file manager
evt = FileWanderEvent(filename=filename, status=True)
evt.SetEventObject(self)
wx.PostEvent(self.GetTopLevelParent(), evt)
evt = FileWanderEvent(filename=filename, status=True)
evt.SetEventObject(self)
wx.PostEvent(self.GetParent().GetParent().GetParent(), evt)
# ----------------------------------------------------------------------
def SaveAll(self):
"""Save all files."""
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
p.Save()
# ----------------------------------------------------------------------
# GUI
# ----------------------------------------------------------------------
def OnSettings(self, event):
"""Set new preferences, then apply them."""
self._prefsIO = event.prefsIO
# Apply the changes on self
self.SetBackgroundColour(self._prefsIO.GetValue('M_BG_COLOUR'))
self.SetForegroundColour(self._prefsIO.GetValue('M_FG_COLOUR'))
self.SetFont(self._prefsIO.GetValue('M_FONT'))
for i in range(self._filetrs.GetSize()):
obj = self._filetrs.GetObject(i)
obj.SetPreferences(self._prefsIO)
self.Layout()
self.Refresh()
# ----------------------------------------------------------------------
def SetFont(self, font):
"""Change font of all texts."""
wx.Window.SetFont(self, font)
# Apply to all panels
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
p.SetFont(font)
# ----------------------------------------------------------------------
def SetBackgroundColour(self, color):
"""Change background of all texts."""
wx.Window.SetBackgroundColour(self,color)
# Apply as background on all panels
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
p.SetBackgroundColour(color)
# ----------------------------------------------------------------------
def SetForegroundColour(self, color):
"""Change foreground of all texts."""
wx.Window.SetForegroundColour(self, color)
# Apply as foreground on all panels
for i in range(self._filetrs.GetSize()):
p = self._filetrs.GetObject(i)
p.SetForegroundColour(color)
# ----------------------------------------------------------------------
# Manage the data
# ----------------------------------------------------------------------
def SetData(self, filename):
"""Add a file."""
# Do not add an already loaded file
if self._filetrs.Exists(filename):
return False
# create the object
new_trs = TrsList(self._trspanel, filename)
new_trs.SetPreferences(self._prefsIO)
if new_trs.GetTranscriptionName() == "IO-Error":
ShowInformation(self,
self._prefsIO,
'Error loading: '+filename,
style=wx.ICON_ERROR)
# put the new trs in a sizer (required to enable sizer.Remove())
s = wx.BoxSizer(wx.HORIZONTAL)
s.Add(new_trs, proportion=1, flag=wx.EXPAND, border=0)
self._trssizer.Add(s, proportion=1, flag=wx.EXPAND | wx.TOP, border=4)
# add in the list of files
self._filetrs.Append(filename, new_trs)
self.Layout()
self._trspanel.Refresh()
return True
# ----------------------------------------------------------------------
def UnsetData(self, f):
"""Remove the given file."""
if self._filetrs.Exists(f):
i = self._filetrs.GetIndex(f)
o = self._filetrs.GetObject(i)
if o._dirty is True:
# dlg to ask to save or not
userChoice = ShowYesNoQuestion(None, self._prefsIO,
"Do you want to save changes on the transcription of\n%s?" % f)
if userChoice == wx.ID_YES:
o.Save()
o.Destroy()
self._filetrs.Remove(i)
self._trssizer.Remove(i)
self.Layout()
self.Refresh()
# ----------------------------------------------------------------------
def UnsetAllData(self):
"""Clean information and destroy all data."""
self._filetrs.RemoveAll()
self._trssizer.DeleteWindows()
self.Layout()
self.Refresh()
# ----------------------------------------------------------------------
def GetSelection(self):
"""Return the current selection (the panel TrsList witch is selected)."""
return self._selection
# ----------------------------------------------------------------------------
| StarcoderdataPython |
11351462 | <filename>iob2DynaML.py
from math import sqrt
import math
import numpy
from numpy import matmul
from math import sin, cos, radians
import sys,datetime, os
class DnaStation:
def __init__(self):
self.name = ''
self.Constraint=''
self.Type=''
self.XAxis=''
self.YAxis=''
self.Height=''
self.Description=''
self.aAxis=0
self.bAxis=0
self.ErrAz=0
class AdditionalInfoStn:
def __init__(self):
self.HorizCoordMethod=''
self.RelativeHorizAccuracy=''
self.NonGSNumber=''
self.SelectPoint='true'
self.SelectRL='true'
class AdditionalInfoMsr:
def __init__(self):
#Additional information is included as a comment in the DynaML file. This can be used for database import
self.StartDateTime=datetime.datetime(1994, 1, 1, 00, 00,00)
self.Duration=datetime.datetime(1966, 1, 1, 00, 00,00)
self.TimeStatus=''
self.EphemerisType=''
self.AtReceiver=''
self.ToReceiver=''
self.FrequencyMode=''
self.SurveyTechnique='SLEV'
self.Solution=''
self.EpochInterval=''
self.Class='LC'
self.LevelDistance='0.01'
self.InstrumentModel=''
self.Derivation='MEAS'
self.NonGSNumber=''
class DnaMeasurement:
def __init__(self):
self.type = ''
self.vscale='1'
self.pscale='1'
self.lscale='1'
self.hscale='1'
self.first=''
self.second=''
self.third=''
self.stddev=''
self.total=''
self.instheight=0
self.targheight=0
self.targets=[]
self.values=[]
self.targetstddevs=[]
self.dx=''
self.dy=''
self.dz=''
self.Vs=numpy.zeros([3,3])
def add_targets(self,target):
self.targets.append(target)
def add_values(self,Value):
self.values.append(Value)
def add_targetstddevs(self,targetstddev):
self.targetstddevs.append(targetstddev)
class DeviceHeight:
def __init__(self):
#Device Height might be the height of instrument at a point or Height of target
self.StnName=[]
self.RefHeight=[]
def add_DeviceHeight(self,Stn,Hgt):
self.StnName.append(Stn)
self.RefHeight.append(Hgt)
def hms2hp(HMS_Ang):
#Input: HH MM SS.ssss used by Geolab
#Output: HH.MMSSSsssss used by DynAdjust
sign=1
if HMS_Ang.find('S')<>-1 or HMS_Ang.find('-')<>-1:
sign=-1
while HMS_Ang.find(' ')<>-1:
HMS_Ang=HMS_Ang.replace(' ',' ')
HMS_Ang=HMS_Ang.replace('S','')
HMS_Ang=HMS_Ang.replace('E','')
HMS_Ang=HMS_Ang.replace('.',' ')
aAng=HMS_Ang.split()
aAng[0]=str(sign*abs(int(aAng[0])))
aAng[1]="%02d" % float(aAng[1])
aAng[2]="%02d" % float(aAng[2])
return aAng[0] + '.' + aAng[1] + aAng[2]+ aAng[3]
def hp2dec(hp):
#Input: HH.MMSSsss
#Output: dd.dddddd
degmin, second = divmod(abs(hp) * 1000, 10)
degree, minute = divmod(degmin, 100)
dec = degree + (minute / 60) + (second / 360)
return dec if hp >= 0 else -dec
def FindJobNumber(strg):
#search a string for 8 consecutive numbers, this is probably the Job Number
JN=''
i=0
while i+7<>len(strg):
if unicode(strg[i:i+8]).isnumeric()==True:
JN=strg[i:i+8]
i=i+1
return JN
def Stn_xml_str(Stn,stnAdditionalRec):
#Output: String for printing to xml that is one complete station
xml_str='<DnaStation>\n'
xml_str=xml_str+'<Name>' + Stn.Name + '</Name>\n'
xml_str=xml_str+'<Constraints>' + Stn.Constraint + '</Constraints>\n'
xml_str=xml_str+'<Type>' + Stn.Type + '</Type>\n'
xml_str=xml_str+'<StationCoord>\n'
xml_str=xml_str+'<Name>' + Stn.Name + '</Name>\n'
xml_str=xml_str+'<XAxis>' + str(Stn.XAxis) + '</XAxis>\n'
xml_str=xml_str+'<YAxis>' + str(Stn.YAxis) + '</YAxis>\n'
xml_str=xml_str+'<Height>' + Stn.Height + '</Height>\n'
xml_str=xml_str+'</StationCoord>\n'
xml_str=xml_str+'<Description>'+ Stn.Description +'</Description>\n'
xml_str=xml_str+'<!--AdditionalInfoStn>\n'
xml_str=xml_str+'<HorizCoordMethod>' + stnAdditionalRec.HorizCoordMethod + '</HorizCoordMethod>\n'
xml_str=xml_str+'<RelativeHorizAccuracy>' + stnAdditionalRec.RelativeHorizAccuracy + '</RelativeHorizAccuracy>\n'
xml_str=xml_str+'<NonGSNumber>' + stnAdditionalRec.NonGSNumber + '</NonGSNumber>\n'
xml_str=xml_str+'<SelectPoint>' + stnAdditionalRec.SelectPoint + '</SelectPoint>\n'
xml_str=xml_str+'<SelectRL>' + stnAdditionalRec.SelectRL + '</SelectRL>\n'
xml_str=xml_str+'</AdditionalInfoStn-->\n'
xml_str=xml_str+'</DnaStation>\n'
return xml_str
def Msr_xml_str(Msr, ControlRec):
#Output: xml string for printing to file. Caters for type G, D, S, B, D, L, H
xml_str='<DnaMeasurement>\n'
xml_str=xml_str+'<Type>' + Msr.type + '</Type>\n'
xml_str=xml_str+'<Ignore/>\n'
if Msr.type == 'G':
xml_str=xml_str+'<ReferenceFrame>' + GNSSdate2Ref(ControlRec.StartDateTime) + '</ReferenceFrame>\n'
xml_str=xml_str+'<Epoch>' + ControlRec.StartDateTime.strftime('%d.%m.%Y') + '</Epoch>\n'
xml_str=xml_str+'<Vscale>' + Msr.vscale + '</Vscale>\n'
xml_str=xml_str+'<Pscale>' + Msr.pscale + '</Pscale>\n'
xml_str=xml_str+'<Lscale>' + Msr.lscale + '</Lscale>\n'
xml_str=xml_str+'<Hscale>' + Msr.hscale + '</Hscale>\n'
xml_str=xml_str+'<First>' + Msr.first + '</First>\n'
if Msr.second <> '':
xml_str=xml_str+'<Second>' + Msr.second + '</Second>\n'
if Msr.type <> 'G' and Msr.type <> 'D':
xml_str=xml_str+'<Value>' + Msr.values[0] + '</Value>\n'
xml_str=xml_str+'<StdDev>' + Msr.stddev + '</StdDev>\n'
if Msr.type == 'G':
xml_str=xml_str+'<GPSBaseline>\n'
xml_str=xml_str+'<X>' + Msr.dx + '</X>\n'
xml_str=xml_str+'<Y>' + Msr.dy + '</Y>\n'
xml_str=xml_str+'<Z>' + Msr.dz + '</Z>\n'
xml_str=xml_str+'<SigmaXX>' + str(Msr.Vs[0,0]) + '</SigmaXX>\n'
xml_str=xml_str+'<SigmaXY>' + str(Msr.Vs[0,1]) + '</SigmaXY>\n'
xml_str=xml_str+'<SigmaXZ>' + str(Msr.Vs[0,2]) + '</SigmaXZ>\n'
xml_str=xml_str+'<SigmaYY>' + str(Msr.Vs[1,0]) + '</SigmaYY>\n'
xml_str=xml_str+'<SigmaYZ>' + str(Msr.Vs[1,1]) + '</SigmaYZ>\n'
xml_str=xml_str+'<SigmaZZ>' + str(Msr.Vs[2,0]) + '</SigmaZZ>\n'
xml_str=xml_str+'</GPSBaseline>\n'
xml_str=xml_str+'<!--AdditionalInfoMsrG>\n'
xml_str=xml_str+'<StartDateTime>'+ControlRec.StartDateTime.strftime('%Y-%m-%dT%H:%M:%S')+'</StartDateTime>\n'
xml_str=xml_str+'<Duration>P'+str(ControlRec.Duration.year-1900)+'Y'+str(ControlRec.Duration.month-1)+'M'+str(ControlRec.Duration.day-1)+'DT'+ControlRec.Duration.strftime('%HH%MM%SS')+'</Duration>\n'
xml_str=xml_str+'<TimeStatus>'+ControlRec.TimeStatus+'</TimeStatus>\n'
xml_str=xml_str+'<EphemerisType>'+ControlRec.EphemerisType+'</EphemerisType>\n'
xml_str=xml_str+'<AtReceiver>'+ControlRec.AtReceiver+'</AtReceiver>\n'
xml_str=xml_str+'<ToReceiver>'+ControlRec.ToReceiver+'</ToReceiver>\n'
xml_str=xml_str+'<FrequencyMode>'+ControlRec.FrequencyMode+'</FrequencyMode>\n'
xml_str=xml_str+'<SurveyTechnique>'+ControlRec.SurveyTechnique+'</SurveyTechnique>\n'
xml_str=xml_str+'<Solution>'+ControlRec.Solution+'</Solution>\n'
xml_str=xml_str+'<EpochInterval>'+str(ControlRec.EpochInterval)+'</EpochInterval>\n'
xml_str=xml_str+'<Class>'+ControlRec.Class+'</Class>\n'
xml_str=xml_str+'<NonGSNumber>'+ControlRec.NonGSNumber+'</NonGSNumber>\n'
xml_str=xml_str+'</AdditionalInfoMsrG-->\n'
if Msr.type == 'L':
xml_str=xml_str+'<!--AdditionalInfoMsrL>\n'
xml_str=xml_str+'<SurveyTechnique>'+ControlRec.SurveyTechnique+'</SurveyTechnique>\n'
xml_str=xml_str+'<LevelDistance>'+ ControlRec.LevelDistance +'</LevelDistance>\n'
xml_str=xml_str+'<ObsDate>'+ControlRec.StartDateTime.strftime('%Y-%m-%d')+'</ObsDate>\n'
xml_str=xml_str+'<Derivation>'+ControlRec.Derivation+'</Derivation>\n'
xml_str=xml_str+'<Class>'+ControlRec.Class+'</Class>\n'
xml_str=xml_str+'<NonGSNumber>'+ControlRec.NonGSNumber+'</NonGSNumber>\n'
xml_str=xml_str+'</AdditionalInfoMsrL-->\n'
if Msr.type == 'S':
xml_str=xml_str+'<InstHeight>' + str(Msr.instheight) + '</InstHeight>\n'
xml_str=xml_str+'<TargHeight>' + str(Msr.targheight) + '</TargHeight>\n'
xml_str=xml_str+'<!--AdditionalInfoMsrS>\n'
xml_str=xml_str+'<InstrumentModel>'+ControlRec.InstrumentModel+'</InstrumentModel>\n'
xml_str=xml_str+'<ObsDate>'+ControlRec.StartDateTime.strftime('%Y-%m-%d')+'</ObsDate>\n'
xml_str=xml_str+'<Derivation>'+ControlRec.Derivation+'</Derivation>\n'
xml_str=xml_str+'<Class>'+ControlRec.Class+'</Class>\n'
xml_str=xml_str+'<NonGSNumber>'+ControlRec.NonGSNumber+'</NonGSNumber>\n'
xml_str=xml_str+'</AdditionalInfoMsrS-->\n'
if Msr.type == 'D':
xml_str=xml_str+'<Value>' + Msr.values[0] + '</Value>\n'
xml_str=xml_str+'<StdDev>' + Msr.targetstddevs[0] + '</StdDev>\n'
xml_str=xml_str+'<Total>' + str(Msr.total-1) + '</Total>\n'
ObsNumber=1
while ObsNumber<Msr.total:
xml_str=xml_str+'<Directions>\n'
xml_str=xml_str+'<Ignore/>\n'
xml_str=xml_str+'<Target>' + Msr.targets[ObsNumber] + '</Target>\n'
xml_str=xml_str+'<Value>' + Msr.values[ObsNumber] + '</Value>\n'
xml_str=xml_str+'<StdDev>' + Msr.targetstddevs[ObsNumber] + '</StdDev>\n'
xml_str=xml_str+'</Directions>\n'
ObsNumber=ObsNumber+1
xml_str=xml_str+'<!--AdditionalInfoMsrD>\n'
xml_str=xml_str+'<InstrumentModel>'+ControlRec.InstrumentModel+'</InstrumentModel>\n'
xml_str=xml_str+'<ObsDate>'+ControlRec.StartDateTime.strftime('%Y-%m-%d')+'</ObsDate>\n'
xml_str=xml_str+'<Derivation>'+ControlRec.Derivation+'</Derivation>\n'
xml_str=xml_str+'<Class>'+ControlRec.Class+'</Class>\n'
xml_str=xml_str+'<NonGSNumber>'+ControlRec.NonGSNumber+'</NonGSNumber>\n'
xml_str=xml_str+'</AdditionalInfoMsrD-->\n'
xml_str=xml_str+'<Source></Source>\n'
xml_str=xml_str+'</DnaMeasurement>\n'
return xml_str
c_vac = 299792.458
k_0 = 0.9996
# Ellipsoid Constants
class Ellipsoid(object):
def __init__(self, semimaj, inversef):
self.semimaj = semimaj
self.inversef = inversef
self.f = 1 / self.inversef
self.semimin = float(self.semimaj * (1 - self.f))
self.ecc1sq = float(self.f * (2 - self.f))
self.ecc2sq = float(self.ecc1sq / (1 - self.ecc1sq))
self.ecc1 = sqrt(self.ecc1sq)
self.n = float(self.f / (2 - self.f))
self.n2 = self.n ** 2
# Geodetic Reference System 1980
grs80 = Ellipsoid(6378137, 298.25722210088)
def llh2xyz(lat, lng, ellht, ellipsoid=grs80):
# Add input for ellipsoid (default: grs80)
# Convert lat & long to radians
lat = radians(hp2dec(float(lat)))
lng = radians(hp2dec(float(lng)))
ellht=float(ellht)
# Calculate Ellipsoid Radius of Curvature in the Prime Vertical - nu
if lat == 0:
nu = grs80.semimaj
else:
nu = ellipsoid.semimaj/(sqrt(1 - ellipsoid.ecc1sq * (sin(lat)**2)))
# Calculate x, y, z
x = (nu + ellht) * cos(lat) * cos(lng)
y = (nu + ellht) * cos(lat) * sin(lng)
z = ((ellipsoid.semimin**2 / ellipsoid.semimaj**2) * nu + ellht) * sin(lat)
return x, y, z
def ErrEllip2Ycluster(Stn):
#Input: Supply a station with coordinates and error ellipse for coordinate uncertainty
#Output: xml string for point cluster (Y-type observation)
x, y, z = llh2xyz(Stn.XAxis, Stn.YAxis, Stn.Height)
a=Stn.aAxis/2.44774683068
b=Stn.bAxis/2.44774683068
Az=90-Stn.ErrAz
rAz=math.radians(Az)
rlat=math.radians(float(Stn.XAxis))
rlng=math.radians(float(Stn.YAxis))
rl=numpy.zeros([3,3])
rl[0,0]=-sin(rlng)
rl[0,1]=-sin(rlat)*cos(rlng)
rl[0,2]=cos(rlat)*cos(rlng)
rl[1,0]=cos(rlng)
rl[1,1]=-sin(rlat)*sin(rlng)
rl[1,2]=cos(rlat)*sin(rlng)
rl[2,1]=cos(rlat)
rl[2,2]=sin(rlat)
iA=numpy.zeros([3,3])
iA[0,0]=(cos(rAz)*cos(rAz)*a*a)+(b*b*sin(rAz)*sin(rAz))
iA[0,1]=(a*a-b*b)*cos(rAz)*sin(rAz)
iA[1,0]=iA[0,1]
iA[1,1]=(a*a*sin(rAz)*sin(rAz))+(b*b*cos(rAz)*cos(rAz))
iA[2,2]=0.000001
Wt=matmul(matmul(rl,iA),rl.transpose())
xml_str='<DnaMeasurement>\n'
xml_str=xml_str+'<Type>Y</Type>\n'
xml_str=xml_str+'<Ignore/>\n'
xml_str=xml_str+'<ReferenceFrame>GDA2020</ReferenceFrame>\n'
xml_str=xml_str+'<Epoch>01.01.2020</Epoch>\n'
xml_str=xml_str+'<Vscale>1.000</Vscale>\n'
xml_str=xml_str+'<Pscale>1.000</Pscale>\n'
xml_str=xml_str+'<Lscale>1.000</Lscale>\n'
xml_str=xml_str+'<Hscale>1.000</Hscale>\n'
xml_str=xml_str+'<Coords>XYZ</Coords>\n'
xml_str=xml_str+'<Total>1</Total>\n'
xml_str=xml_str+'<First>' + Stn.Name + '</First>\n'
xml_str=xml_str+'<Clusterpoint>\n'
xml_str=xml_str+'<X>'+str(x)+'</X>\n'
xml_str=xml_str+'<Y>'+str(y)+'</Y>\n'
xml_str=xml_str+'<Z>'+str(z)+'</Z>\n'
xml_str=xml_str+'<SigmaXX>'+str(Wt[0,0])+'</SigmaXX>\n'
xml_str=xml_str+'<SigmaXY>'+str(Wt[0,1])+'</SigmaXY>\n'
xml_str=xml_str+'<SigmaXZ>'+str(Wt[0,2])+'</SigmaXZ>\n'
xml_str=xml_str+'<SigmaYY>'+str(Wt[1,1])+'</SigmaYY>\n'
xml_str=xml_str+'<SigmaYZ>'+str(Wt[1,2])+'</SigmaYZ>\n'
xml_str=xml_str+'<SigmaZZ>'+str(Wt[2,2])+'</SigmaZZ>\n'
xml_str=xml_str+'</Clusterpoint>\n'
xml_str=xml_str+'</DnaMeasurement>\n'
return xml_str
def stn_header():
xml_str='<?xml version="1.0" encoding="utf-8"?>\n'
xml_str=xml_str+'<DnaXmlFormat type="Station File" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="DynaML.xsd">\n'
return xml_str
def msr_header():
xml_str='<?xml version="1.0" encoding="utf-8"?>\n'
xml_str=xml_str+'<DnaXmlFormat type="Measurement File" referenceframe="GDA94" epoch="01.01.1994" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="DynaML.xsd">\n'
return xml_str
def dML_footer():
xml_str='</DnaXmlFormat>\n'
return xml_str
def GNSSdate2Ref(obsDate):
#Use the date of GNSS baseline obsedrvation to determine the reference frame used by broadcast ephemeris
if obsDate >= datetime.datetime(1900, 1, 1) and obsDate < datetime.datetime(1994, 1, 2):
Ref = 'ITRF1991'
if obsDate >= datetime.datetime(1994, 1, 2) and obsDate < datetime.datetime(1995, 1, 1):
Ref = 'ITRF1992'
if obsDate >= datetime.datetime(1995, 1, 1) and obsDate < datetime.datetime(1996, 6, 30):
Ref = 'ITRF1993'
if obsDate >= datetime.datetime(1996, 6, 30) and obsDate < datetime.datetime(1998, 3, 1):
Ref = 'ITRF1994'
if obsDate >= datetime.datetime(1998, 3, 1) and obsDate < datetime.datetime(1999, 8, 1):
Ref = 'ITRF1996'
if obsDate >= datetime.datetime(1999, 8, 1) and obsDate < datetime.datetime(2001, 12, 2):
Ref = 'ITRF1997'
if obsDate >= datetime.datetime(2001, 12, 2) and obsDate < datetime.datetime(2006, 11, 5):
Ref = 'ITRF2000'
if obsDate >= datetime.datetime(2006, 11, 5) and obsDate < datetime.datetime(2011, 4, 17):
Ref = 'ITRF2005'
if obsDate >= datetime.datetime(2011, 4, 17):
Ref = 'ITRF2008'
return Ref
#####################################################################################
#### Input:Geolab *.iob file #####
#### Output: DynaML stn and msr file, If the input file contains the word final #####
#### and the inpuut contains error ellipse information for fixed marks #####
#### an extra stn and msr file will be produced for doing a weighted #####
#### adjustmanets and 2-D propogation of uncertainty. #####
#####################################################################################
filename = '20192277_3.Final_Adjustment.iob'
if len(sys.argv) >1: filename = sys.argv[1]
f = open(filename, 'r')
SumRelativeUncertainties=0
AvgRelativeUncertainties=0
stnCnt=0
#Open,run through and close the file for initial informatiokn on the adjustment
GNSSmarksStr=';'
FloatmarksStr=';'
w_MksWithObs=';'
for linestr in f.readlines():
if linestr[0:4]==' PLO' or linestr[0:4] == ' PLH':
if linestr[72:len(linestr)].strip()<>'' and linestr.find('ppm')<>-1:
stnRec=linestr[72:len(linestr)].strip().replace(';','|').split('|')
SumRelativeUncertainties=SumRelativeUncertainties+float(stnRec[7].replace('ppm',''))
stnCnt=stnCnt+1
if linestr[6:8]=='00' and filename.lower().find('final')<>-1:FloatmarksStr=FloatmarksStr + linestr[10:23].strip() +';'
if linestr[0:5] == ' OHDF' or linestr[0:5] == ' GAZI' or linestr[0:5] == ' AZIM' or linestr[0:5] == ' DIST' or linestr[0:5] == ' DSET' or linestr[0:4] == ' DIR' or linestr[0:5] == ' DXYZ':
CurrentMsr=DnaMeasurement()
CurrentMsr.first=linestr[10:23].strip()
CurrentMsr.second=linestr[23:35].strip()
if linestr[0:5] == ' DXYZ':
GNSSmarksStr=GNSSmarksStr+';'+CurrentMsr.first+';'+CurrentMsr.second
if FloatmarksStr.find(';' + CurrentMsr.first+';')<>-1 or FloatmarksStr.find(';' + CurrentMsr.second+';')<>-1:
w_MksWithObs=w_MksWithObs+CurrentMsr.first+';'+CurrentMsr.second+';'
f.close
#bin the guessing of GDA94 relative uncertainty for the float marks
if stnCnt<>0:
AvgRelativeUncertainties=SumRelativeUncertainties/stnCnt
if AvgRelativeUncertainties<3:
AvgRelativeUncertainties=3
if AvgRelativeUncertainties>3 and AvgRelativeUncertainties<=7:
AvgRelativeUncertainties=7.5
if AvgRelativeUncertainties>7.5 and AvgRelativeUncertainties<=10:
AvgRelativeUncertainties=10
if AvgRelativeUncertainties>10 and AvgRelativeUncertainties<=20:
AvgRelativeUncertainties=20
if AvgRelativeUncertainties>20 and AvgRelativeUncertainties<=30:
AvgRelativeUncertainties=30
if AvgRelativeUncertainties>30 and AvgRelativeUncertainties<=50:
AvgRelativeUncertainties=50
if AvgRelativeUncertainties>50:
AvgRelativeUncertainties=int(AvgRelativeUncertainties/10)*10
f = open(filename, 'r')
stnout = open(filename.replace('.iob', '.stn.xml'), 'w')
msrout = open(filename.replace('.iob', '.msr.xml'), 'w')
stnout.write(stn_header())
msrout.write(msr_header())
if filename.lower().find('final')<>-1:
w_stnout = open(filename.replace('.iob', '_W.stn.xml'), 'w')
w_msrout = open(filename.replace('.iob', '_W.msr.xml'), 'w')
w_stnout.write(stn_header())
w_msrout.write(msr_header())
# Run through each line of the input file and extract the relevant lines
lineCount=0
InstHts=DeviceHeight()
TgtHts=DeviceHeight()
CurrentMsr=DnaMeasurement()
ControlRec=AdditionalInfoMsr()
for linestr in f.readlines():
print(linestr)
if linestr[0:5]==' TITL':
jobNumber = FindJobNumber(linestr)
if jobNumber=='':
jobNumber=FindJobNumber(os.getcwd())
if linestr[0:4]==' PLO' or linestr[0:4] == ' PLH':
CurrentStn=DnaStation()
CurrentStn.Name=linestr[10:23].strip()
CurrentStn.Constraint=linestr[6:9].replace('1','C')
CurrentStn.Constraint=CurrentStn.Constraint.replace('0','F')
CurrentStn.Constraint=CurrentStn.Constraint.strip()
if linestr[0:4] == ' PLO':
CurrentStn.Type='LLH'
if linestr[0:4] == ' PLH':
CurrentStn.Type='LLh'
CurrentStn.XAxis=hms2hp(linestr[23:41].strip())
CurrentStn.YAxis=hms2hp(linestr[41:59].strip())
CurrentStn.Height=linestr[59:72].strip()
CurrentStn.Description=linestr[72:len(linestr)].strip()
stnAdditionalRec=AdditionalInfoStn()
stnAdditionalRec.NonGSNumber='E'+jobNumber
if CurrentStn.Description<>'':
stnRec=CurrentStn.Description.replace(';','|').split('|')
stnAdditionalRec.HorizCoordMethod=stnRec[8]
stnAdditionalRec.RelativeHorizAccuracy=stnRec[7]
if stnRec[10]!='':
CurrentStn.aAxis=float(stnRec[10])
CurrentStn.bAxis=float(stnRec[11])
CurrentStn.ErrAz=float(stnRec[12])
stnAdditionalRec.SelectPoint='true'
stnAdditionalRec.SelectRL='false'
if CurrentStn.Constraint[0:2]=='FF' and AvgRelativeUncertainties !=0:
stnAdditionalRec.RelativeHorizAccuracy=str(AvgRelativeUncertainties)+'ppm'
if GNSSmarksStr.find(';'+CurrentStn.Name+';'):
stnAdditionalRec.HorizCoordMethod='GNSS'
stnout.write(Stn_xml_str(CurrentStn,stnAdditionalRec))
if filename.lower().find('final')<>-1:
w_CurrentStn=CurrentStn
if w_MksWithObs.find(';' + w_CurrentStn.Name+';')<>-1:
if CurrentStn.Constraint[0:2]<>'FF'and CurrentStn.aAxis<>0:
w_CurrentStn.Constraint='FF' + CurrentStn.Constraint[2:3]
w_msrout.write(ErrEllip2Ycluster(w_CurrentStn))
w_stnout.write(Stn_xml_str(w_CurrentStn,stnAdditionalRec))
if linestr[0:5] == ' HI ':
add_DeviceHeight(InstHts,linestr[10:23].strip(),linestr[23:33].strip())
if linestr[0:5] == ' HT ':
add_DeviceHeight(TgtHts,linestr[10:23].strip(),linestr[23:33].strip())
if linestr[0:5] == ' OHGT':
CurrentMsr=DnaMeasurement()
CurrentMsr.type='H'
CurrentMsr.first=linestr[10:23].strip()
CurrentMsr.add_values(linestr[36:65].strip())
CurrentMsr.stddev=linestr[65:76].strip()
msrout.write(Msr_xml_str(CurrentMsr,ControlRec))
if linestr[0:5] == ' OHDF':
CurrentMsr=DnaMeasurement()
CurrentMsr.type='L'
CurrentMsr.first=linestr[10:23].strip()
CurrentMsr.second=linestr[23:35].strip()
CurrentMsr.add_values(linestr[50:65].strip())
CurrentMsr.stddev=linestr[65:76].strip()
ControlRec.LevelDistance=linestr[36:50].strip()
msrout.write(Msr_xml_str(CurrentMsr,ControlRec))
if FloatmarksStr.find(';' + CurrentMsr.first+';')<>-1 or FloatmarksStr.find(';' + CurrentMsr.second+';')<>-1:
w_msrout.write(Msr_xml_str(CurrentMsr,ControlRec))
if linestr[0:5] == ' GAZI':
CurrentMsr=DnaMeasurement()
CurrentMsr.type='B'
CurrentMsr.first=linestr[10:23].strip()
CurrentMsr.second=linestr[23:35].strip()
CurrentMsr.add_values(hms2hp(linestr[36:65].strip()))
CurrentMsr.stddev=linestr[65:76].strip()
msrout.write(Msr_xml_str(CurrentMsr,ControlRec))
if FloatmarksStr.find(';' + CurrentMsr.first+';')<>-1 or FloatmarksStr.find(';' + CurrentMsr.second+';')<>-1:
w_msrout.write(Msr_xml_str(CurrentMsr,ControlRec))
if linestr[0:5] == ' AZIM':
CurrentMsr=DnaMeasurement()
CurrentMsr.type='K'
CurrentMsr.first=linestr[10:23].strip()
CurrentMsr.second=linestr[23:35].strip()
CurrentMsr.add_values(hms2hp(linestr[36:65].strip()))
CurrentMsr.stddev=linestr[65:76].strip()
msrout.write(Msr_xml_str(CurrentMsr,ControlRec))
if FloatmarksStr.find(';' + CurrentMsr.first+';')<>-1 or FloatmarksStr.find(';' + CurrentMsr.second+';')<>-1:
w_msrout.write(Msr_xml_str(CurrentMsr,ControlRec))
if linestr[0:5] == ' DIST':
CurrentMsr=DnaMeasurement()
CurrentMsr.type='S'
CurrentMsr.first=linestr[10:23].strip()
CurrentMsr.second=linestr[23:35].strip()
CurrentMsr.add_values(linestr[36:65].strip())
rw=0
for Stn in InstHts.StnName:
if Stn==CurrentMsr.first:
CurrentMsr.instheight=InstHts.RefHeight[rw]
rw=rw+1
rw=0
for Stn in TgtHts.StnName:
if Stn==CurrentMsr.first:
CurrentMsr.targheight=TgtHts.RefHeight[rw]
rw=rw+1
CurrentMsr.stddev=linestr[65:76].strip()
msrout.write(Msr_xml_str(CurrentMsr,ControlRec))
if FloatmarksStr.find(';' + CurrentMsr.first+';')<>-1 or FloatmarksStr.find(';' + CurrentMsr.second+';')<>-1:
w_msrout.write(Msr_xml_str(CurrentMsr,ControlRec))
print FloatmarksStr, CurrentMsr.first, CurrentMsr.second
if linestr[0:5] == ' DSET':
CurrentMsr=DnaMeasurement()
CurrentMsr.type='D'
lineCount=0
if linestr[0:4] == ' DIR' and lineCount==1 and CurrentMsr.type=='D':
CurrentMsr.first=linestr[10:23].strip()
CurrentMsr.second=linestr[23:35].strip()
CurrentMsr.add_targets(linestr[23:35].strip())
CurrentMsr.add_values(hms2hp(linestr[36:65].strip()))
CurrentMsr.add_targetstddevs(linestr[65:76].strip())
CurrentMsr.total=lineCount
if linestr[0:4] == ' DIR' and lineCount>1 and CurrentMsr.type=='D':
CurrentMsr.add_targets(linestr[23:35].strip())
CurrentMsr.add_values(hms2hp(linestr[36:65].strip()))
CurrentMsr.add_targetstddevs(linestr[65:76].strip())
CurrentMsr.total=lineCount
if CurrentMsr.type=='D' and linestr[0:4] <> ' DIR' and lineCount>1:
msrout.write(Msr_xml_str(CurrentMsr,ControlRec))
if FloatmarksStr.find(';' + CurrentMsr.first+';')<>-1 or FloatmarksStr.find(';' + CurrentMsr.second+';')<>-1:
w_msrout.write(Msr_xml_str(CurrentMsr,ControlRec))
CurrentMsr=DnaMeasurement()
# Scrap information from Landgate GESMAR control Records
# eg.*CONTROL;GPS;201810010007;012057;E;B;TRIM;TRIM;D;ST ;FX;015;N
# eg.*CONTROL;OHDF;20181001;SLEV;LC;2.71;MEAS
# eg.*CONTROL;DIS;20181213;TS 16;C;MEAS
# eg.*CONTROL;ANG;20181213;TS16;C;MEAS
if linestr[0:9]=='*CONTROL;':
ControlRec=AdditionalInfoMsr()
alinestr=linestr.split(';')
stdatetimestr=alinestr[2] + '0000'
yr=int(stdatetimestr[0:4])
mth=int(stdatetimestr[4:6])
ddy=int(stdatetimestr[6:8])
hr=int(stdatetimestr[8:10])
mn=int(stdatetimestr[10:12])
ControlRec.StartDateTime=datetime.datetime(yr, mth, ddy, hr, mn,00)
ControlRec.NonGSNumber='E' + jobNumber
if linestr[0:13]=='*CONTROL;DIS;':
ControlRec.InstrumentModel=alinestr[2].strip()
ControlRec.Class=alinestr[3].strip()
ControlRec.Derivation=alinestr[4].strip()
if linestr[0:13]=='*CONTROL;ANG;':
ControlRec.InstrumentModel=alinestr[2].strip()
ControlRec.Class=alinestr[3].strip()
ControlRec.Derivation=alinestr[4].strip()
if linestr[0:14]=='*CONTROL;OHDF;':
ControlRec.SurveyTechnique=alinestr[3].strip()
ControlRec.Class=alinestr[4].strip()
ControlRec.LevelDistance=alinestr[5].strip()
ControlRec.Derivation=alinestr[6].strip()
if linestr[0:13]=='*CONTROL;GPS;':
durationstr=alinestr[3]
hr=int(durationstr[0:2])
mn=int(durationstr[2:4])
sec=int(durationstr[4:6])
ControlRec.Duration=datetime.datetime(1900,1,1,0,0,0) + datetime.timedelta(hours=hr, minutes= mn, seconds=sec)
ControlRec.TimeStatus=alinestr[4].strip()
ControlRec.EphemerisType=alinestr[5].strip()
ControlRec.AtReceiver=alinestr[6].strip()
ControlRec.ToReceiver=alinestr[7].strip()
ControlRec.FrequencyMode=alinestr[8].strip()
ControlRec.SurveyTechnique=alinestr[9].strip()
ControlRec.Solution=alinestr[10].strip()
ControlRec.EpochInterval=int(alinestr[11])
ControlRec.Class=alinestr[12].strip()
if linestr[0:4] == ' GRP':
CurrentMsr=DnaMeasurement()
CurrentMsr.type='G'
if linestr[0:5] == ' DXYZ':
CurrentMsr.first=linestr[10:23].strip()
CurrentMsr.second=linestr[23:35].strip()
CurrentMsr.dx=linestr[36:50].strip()
CurrentMsr.dy=linestr[50:64].strip()
CurrentMsr.dz=linestr[64:78].strip()
if linestr[0:13] == ' COV CT UPPR':
lineCount=0
CurrentMsr.vscale=linestr[26:36].strip()
if linestr[0:5] == ' ELEM' and CurrentMsr.type=='G' and lineCount==1:
CurrentMsr.Vs[0,0]=linestr[7:30].strip()
CurrentMsr.Vs[0,1]=linestr[30:54].strip()
CurrentMsr.Vs[0,2]=linestr[54:78].strip()
if linestr[0:5] == ' ELEM' and CurrentMsr.type=='G' and lineCount==2:
CurrentMsr.Vs[1,0]=linestr[7:30].strip()
CurrentMsr.Vs[1,1]=linestr[30:54].strip()
if linestr[0:5] == ' ELEM' and CurrentMsr.type=='G' and lineCount==3:
CurrentMsr.Vs[2,0]=linestr[7:30].strip()
msrout.write(Msr_xml_str(CurrentMsr,ControlRec))
if FloatmarksStr.find(';' + CurrentMsr.first+';')<>-1 or FloatmarksStr.find(';' + CurrentMsr.second+';')<>-1:
w_msrout.write(Msr_xml_str(CurrentMsr,ControlRec))
lineCount=lineCount+1
# Close the files
f.close
stnout.write(dML_footer())
msrout.write(dML_footer())
stnout.close()
msrout.close()
if filename.lower().find('final')<>-1:
w_stnout.write(dML_footer())
w_msrout.write(dML_footer())
w_stnout.close()
w_msrout.close()
print('Done :)')
| StarcoderdataPython |
11306785 | <reponame>ubco-mds-2021-labs/dashboard1-group-a
import pandas as pd
TEMP_DATA_PATH = "data/temperature_df_full.csv"
ENERGY_DATA_PATH = "data/energy_df_full.csv"
# Temperature dataframe for Tab 1 Use
# Extra column from csv index dropped for performance.
temperature_df_full = pd.read_csv("data/temperature_df_full.csv").astype(
{"Date": "datetime64"}
).drop(columns ="Unnamed: 0")
# Energy dataframe for Tab 2 Use
energy_df_full = pd.read_csv("data/energy_df_full.csv").astype({"Date": "datetime64"}).drop(columns = "Unnamed: 0")
| StarcoderdataPython |
1686411 | # Code below is the expansion of the statement:
#
# RESULT = yield from EXPR
#
# Copied verbatim from the Formal Semantics section of
# PEP 380 -- Syntax for Delegating to a Subgenerator
#
# https://www.python.org/dev/peps/pep-0380/#formal-semantics
# tag::YIELD_FROM_EXPANSION[]
_i = iter(EXPR) # <1>
try:
_y = next(_i) # <2>
except StopIteration as _e:
_r = _e.value # <3>
else:
while 1: # <4>
try:
_s = yield _y # <5>
except GeneratorExit as _e: # <6>
try:
_m = _i.close
except AttributeError:
pass
else:
_m()
raise _e
except BaseException as _e: # <7>
_x = sys.exc_info()
try:
_m = _i.throw
except AttributeError:
raise _e
else: # <8>
try:
_y = _m(*_x)
except StopIteration as _e:
_r = _e.value
break
else: # <9>
try: # <10>
if _s is None: # <11>
_y = next(_i)
else:
_y = _i.send(_s)
except StopIteration as _e: # <12>
_r = _e.value
break
RESULT = _r # <13>
# end::YIELD_FROM_EXPANSION[]
| StarcoderdataPython |
1758344 | import numpy as _np
import copy as _copy
def read_energy_acceptance_file(fname, eRF):
# reads raw data from file
lines = [line.strip() for line in open(fname)]
# processes raw data
accp, accn = [], []
for line in lines:
if not line or line[0] == '#':
continue
values = [float(word) for word in line.split()]
pos, e_ac = values[4], values[7]
if e_ac > 0.0:
accp.append([pos,min(abs(e_ac),eRF)])
else:
accn.append([pos,min(abs(e_ac),eRF)])
accp = _np.array(accp)
accn = _np.array(accn)
return (accp,accn)
def read_twiss_file(fname, orig_parameters):
# reads raw data from file
lines = [line.strip() for line in open(fname)]
parameters = _copy.deepcopy(orig_parameters)
# processes raw data into twiss and element structures
twiss, elements = [], []
for line in lines:
words = line.split()
if not words or words[0][0] == '*':
continue
if words[0][0] == '#':
if words[0] == '#MCF':
parameters.mcf = float(words[1])
elif words[0] == '#I1':
parameters.latt_i1 = float(words[1])
elif words[0] == '#I2':
parameters.latt_i2 = float(words[1])
elif words[0] == '#I3':
parameters.latt_i3 = float(words[1])
elif words[0] == '#I4':
parameters.latt_i4 = float(words[1])
elif words[0] == '#I5':
parameters.latt_i5 = float(words[1])
elif words[0] == '#I6':
parameters.latt_i6 = float(words[1])
else:
pass
continue
else:
if float(words[3]) > 0:
values = [float(word) for word in words[2:]]
values = values + [0,0] # for acceptances insertion latter on
#print(values)
twiss.append(values)
elements.append(words[0])
twiss = _np.array(twiss)
elements = _np.array(elements)
return (elements, twiss, parameters)
| StarcoderdataPython |
36676 | <gh_stars>0
import youtube_dl
class Download2Mp3:
def __init__(self, CallableHook=None):
self.hook = CallableHook
self.notDownloaded = []
self.setupDownloadParam()
# Public functions
def downloadMusicFile(self, url):
if type(url) is not str:
raise TypeError("url argument is not a string.")
try:
with youtube_dl.YoutubeDL(self.ydlParams) as ydl:
ydl.download([url])
except Exception as e:
e = str(e)
self.notDownloaded.append(url)
raise RuntimeError(e)
def musicsNotDownloaded(self):
return self.notDownloaded
# Non-Public functions
def setupDownloadParam(self):
if self.hook == None:
self.ydlParams = {
'outtmpl': 'downloadMusics/%(title)s.%(ext)s',
'noplaylist': True,
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',}],}
else:
self.ydlParams = {
"progress_hooks": [self.hook],
'outtmpl': 'downloadMusics/%(title)s.%(ext)s',
'noplaylist': True,
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',}],}
| StarcoderdataPython |
8049604 | <filename>samples/uvas/heat_map_generation/count_heatMap.py
import numpy as np
import os
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
from matplotlib import cm
def segment_hilera(pt1, pt2, delta_h):
"""
Segmenta la hilera que va desde pt1 a pt2 basado en el tamaño
de intervalo delta_h
:param (int) pt1:
punto de inicio de hilera
:param (int) pt2:
punto de fin de hilera
:param (int) delta_h:
tamaño de cada intervalo en pixeles
:return (int,list(int),list(int)):
retorna número de intervalos, las coordenadas horizontales y verticales
de la hilera segmentada
"""
# Primero calcula la hipotenusa del triangulo que se forma entre las
# coordenadas de inicio y fin de la hilera y la horizontal
hipotenuse = np.sqrt((pt2[0]-pt1[0])**2+(pt2[1]-pt1[1])**2)
# Encuentra el número de intervalos
n_intervals = int(np.ceil(hipotenuse/delta_h))
# Calcula el tamaño en x e y de cada intervalo
x_delta = (pt2[0]-pt1[0])/float(n_intervals)
y_delta = (pt2[1]-pt1[1])/float(n_intervals)
# Crea las listas con las coordenadas x e y de cada punto
# en la hilera segmentada
x_hilera = []
y_hilera = []
x_hilera.append(pt1[0])
y_hilera.append(pt1[1])
for i in range(1,n_intervals):
x_hilera.append(round(pt1[0] + i * x_delta))
y_hilera.append(round(pt1[1] + i * y_delta))
# El último intervalo puede no ser del mismo tamaño que el resto
# por lo que se agrega al final
x_hilera.append(pt2[0])
y_hilera.append(pt2[1])
return n_intervals, x_hilera, y_hilera
def create_frame_intervals(start_frame,end_frame,n_intervals):
"""
Segmenta el espacio de los números de frames de acuerdo a n_intervals
para realizar el conteo de racimos por intervalo
:param start_frame:
frame donde inicia la hilera
:param end_frame:
frame donde termina la hilera
:param n_intervals:
cantidad de intervalos para segmentar
:return list(int):
Lista con espacio de números de frames segmentado
"""
# Calcula la cantidad de frames totales y extrae el tamaño, en frames, de los
# intervalos (no aplica necesariamente para el último)
total_frames = end_frame - start_frame
delta_frame = int(np.floor(total_frames/n_intervals))
# Segmenta el rango de frames desde start frame a end frame en base a
# delta_frame
frame_intervals = np.zeros(n_intervals+1)
frame_intervals[0] = start_frame
frame_intervals[n_intervals] = end_frame
for i in range(1,n_intervals):
frame_intervals[i] = start_frame + delta_frame*i
return frame_intervals
def count_intervals(frames,pts_hilera,frame_limits,DELTA_H):
"""
Cuenta la cantidad de racimos que hay por cada segmento de la hilera
de acuerdo a los números de los frames en el video donde se hicieron las
detecciones
:param list(int) frames:
lista con los números de los frames de las detecciones
:param tuple(tuple,tuple) pts_hilera:
tupla con las coordenadas de los puntos de inicio y fin de la hilera
:param tuple(int,int) frame_limits:
tupla con los frames donde aparece el inicio y fin de la hilera
:param (int) DELTA_H:
tamaño, en pixeles, de los segmentos de la hilera
:return:
"""
# Extracción de las coordenadas de inicio y fin de la hilera y
# del número del primer y ultimo frame
pt1, pt2 = pts_hilera[0], pts_hilera[1]
start_frame, end_frame = frame_limits[0], frame_limits[1]
# Segmenta la hilera de acuerdo a DELTA_H y retorna las coordenadas
# de cada punto junto a la cantidad de intervalos
n_intervals, x_hilera, y_hilera = segment_hilera(pt1,pt2,DELTA_H)
# Segmenta el rango de frames de acuerdo al mismo número de intervalos
# anterior
frame_bins = create_frame_intervals(start_frame,end_frame,n_intervals)
# Cuenta la cantidad de racimos por segmento de la hilera
counts,_ = np.histogram(frames,bins=frame_bins)
# Ordena los puntos de la hilera en intervalos para plotear
points = np.array([x_hilera, y_hilera]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return counts, segments
def plot_heatmap(counts,segments,ax,linewidth,colmap,norm):
"""
Dibuja la densidad de racimos en una hilera de un cuartel sobre ax
:param (list) counts:
conteos de cada racimo por segmento
:param (list(tuple)) segments:
intervalos de coordenadas de los segmentos de la hilera
:param (Axes) ax:
ax (matplotlib) donde se dibuja el mapa de calor
:param (ColorMap) colmap:
ColorMap (matplotlib) para los colores del mapa de calor
:param (Norm) norm:
Norm (matplotlib) que define como se comporta cada segmento
:return:
"""
lc = LineCollection(segments, cmap=colmap, norm=norm)
lc.set_array(counts)
lc.set_linewidth(linewidth)
line = ax.add_collection(lc)
def main():
# Algunos tests hechos para probar el correcto funcionamiento
# del código
test_pt1 = (186,166)
test_pt2 = (327,372)
test_pt11 = (280,94)
test_pt22 = (438,327)
test_pts = [(test_pt1,test_pt2),(test_pt11,test_pt22)]
test_start_frame_1 = 10
test_end_frame_1 = 100
test_start_frame_2 = 0
test_end_frame_2 = 200
test_frame_pts = [(test_start_frame_1,test_end_frame_1),\
(test_start_frame_2,test_end_frame_2)]
test_frames_1 = [5,40,20,30,70,100,56,20,50,90,100,20,45,2,70,24,87,39,44,\
34,67,57,47,35,10,12,24,67,58,68,89,79,79,90]
test_frames_2 = [20,30,20,30,120,200,56,20,51,110,130,200,125,75,76,95,101,139,144,\
134,67,117,47,135,10,112,24,167,58,68,189,79,179,190]
test_frames = [test_frames_1,test_frames_2]
fig,ax = plt.subplots()
for hilera_idx in range(len(test_pts)):
counts,segments = count_intervals(test_frames[hilera_idx],\
test_pts[hilera_idx],test_frame_pts[hilera_idx])
plot_heatmap(counts,segments,ax)
img = plt.imread("./vinaBack.png")
ax.imshow(img)
plt.show()
if __name__ == '__main__':
main()
| StarcoderdataPython |
5136256 | import unittest
from beziers.cubicbezier import CubicBezier
from beziers.point import Point
from beziers.path import BezierPath
class CubicMethods(unittest.TestCase):
def test_extremes(self):
q = CubicBezier(
Point(65,59), Point(194,90), Point(220,260), Point(70,261)
)
# console.log(Bezier(65,59, 194,90, 220,260, 70,261).extrema())
r = q.findExtremes()
self.assertEqual(len(r), 1)
self.assertAlmostEqual(r[0], 0.5275787707261016)
r = q.findExtremes(inflections = True)
self.assertEqual(len(r), 2)
self.assertAlmostEqual(r[0], 0.4512987012987013)
self.assertAlmostEqual(r[1], 0.5275787707261016)
def test_length(self):
q = CubicBezier(
Point(120,160), Point(35,200), Point(220,260), Point(220,40)
)
self.assertAlmostEqual(q.length,272.87003168)
def test_align(self):
q = CubicBezier(
Point(120,160), Point(35,200), Point(220,260), Point(220,40)
)
s = q.aligned()
self.assertAlmostEqual(s[0].x,0.0)
self.assertAlmostEqual(s[0].y,0.0)
self.assertAlmostEqual(s[1].x,-85.14452515537582)
self.assertAlmostEqual(s[1].y,-39.69143277919774)
self.assertAlmostEqual(s[2].x,-12.803687993289572)
self.assertAlmostEqual(s[2].y,140.84056792618557)
self.assertAlmostEqual(s[3].x,156.2049935181331)
self.assertAlmostEqual(s[3].y,0.0)
def test_curvature(self):
q = CubicBezier(
Point(122,102), Point(35,200), Point(228,145), Point(190,46)
)
self.assertAlmostEqual(q.curvatureAtTime(0.5),-103450.5)
def test_loop(self):
q = CubicBezier(
Point(171,272), Point(388,249), Point(167,444), Point(388,176)
)
self.assertTrue(not q.hasLoop)
q = CubicBezier(
Point(171,272), Point(595,249), Point(167,444), Point(388,176)
)
roots = q.hasLoop
p1 = q.pointAtTime(roots[0])
p2 = q.pointAtTime(roots[1])
self.assertTrue(q.hasLoop)
self.assertEqual(p1,p2) | StarcoderdataPython |
8009339 | """
This is a module used in a test, not a test itself.
"""
from zope.interface import classProvides
from twisted.plugin import IPlugin
from eridanus.ieridanus import IEridanusPluginProvider
from eridanus.plugin import Plugin
class SadPlugin(Plugin):
"""
A plugin that does nothing.
"""
classProvides(IPlugin, IEridanusPluginProvider)
# Here we import a mythical module to generate an ImportError.
import gyre_and_gimble_in_the_wabe
gyre_and_gimble_in_the_wabe
| StarcoderdataPython |
12839522 | <gh_stars>1-10
from typing import Optional, Dict, List
import json
import os
import re
from pprint import pprint
LIBC_REPO_PATH = os.getenv("LIBC_REPO_PATH", "libc")
PREDICATES = {
"fuchsia/mod.rs": {"os": ["fuchsia"]},
"unix/bsd/apple/mod.rs": {"os": ["macos", "ios"]},
"unix/bsd/freebsdlike/dragonfly/mod.rs": {"os": ["dragonfly"]},
"unix/bsd/freebsdlike/freebsd/mod.rs": {"os": ["freebsd"]},
"unix/bsd/freebsdlike/mod.rs": {"os": ["freebsd", "dragonfly"]},
"unix/bsd/netbsdlike/mod.rs": {"os": ["openbsd", "netbsd"]},
"unix/bsd/netbsdlike/netbsd/mod.rs": {"os": ["netbsd"]},
"unix/haiku/mod.rs": {"os": ["haiku"]},
"unix/linux_like/android/mod.rs": {"os": ["android"]},
"unix/linux_like/emscripten/mod.rs": {"os": ["emscripten"]},
"unix/linux_like/linux/arch/generic/mod.rs": {"os": ["linux"]},
"unix/linux_like/linux/arch/mips/mod.rs": {"os": ["linux"], "arch": ["mips", "mips64"]},
"unix/linux_like/linux/arch/powerpc/mod.rs": {"os": ["linux"], "arch": ["powerpc", "powerpc64"]},
"unix/linux_like/linux/arch/sparc/mod.rs": {"os": ["linux"], "arch": ["sparc", "sparc64"]},
"unix/solarish/mod.rs": {"os": ["solaris", "illumos"]},
"unix/linux_like/linux/gnu/mod.rs": {"os": ["linux"], "env": ["gnu"]},
"unix/linux_like/linux/musl/mod.rs": {"os": ["linux"], "env": ["musl"]},
"unix/linux_like/linux/uclibc/mod.rs": {"os": ["linux"], "env": ["uclibc"]},
"unix/linux_like/android/b32/mod.rs": {"os": ["android"], "pointer_width": ["32"]},
"unix/linux_like/android/b64/mod.rs": {"os": ["android"], "pointer_width": ["64"]},
"unix/linux_like/linux/mod.rs": {"os": ["linux"]},
"unix/mod.rs": {"family": ["unix"]},
"vxworks/mod.rs": {"os": ["vxworks"]},
"unix/bsd/mod.rs": {"os": ["macos", "ios", "watchos", "freebsd", "dragonfly", "openbsd", "netbsd"]},
"unix/hermit/mod.rs": {"os": ["hermit"]},
"unix/newlib/mod.rs": {"env": ["newlib"]},
"unix/redox/mod.rs": {"os": ["redox"]},
}
def extract_paths(rg_lines: List[str]) -> List[str]:
paths = set()
for line in rg_lines:
item = json.loads(line)
if item["type"] == "match":
file_path = item["data"]["path"]["text"]
rel_file_path = re.match(".+src/(.+)", file_path).group(1) # type: ignore
paths.add(rel_file_path)
return sorted(paths)
def search(prefix: str, ident: str) -> List[Dict[str, List[str]]]:
pipe = os.popen(f"rg --json 'pub {prefix} {ident}' {LIBC_REPO_PATH}")
lines = [l for l in pipe.read().split("\n") if l != ""]
cfgs = [PREDICATES[path] for path in extract_paths(lines)]
return cfgs
def emit_predicate(kind: str, cond: List[str]) -> str:
if len(cond) == 1:
return f'{kind} = "{cond[0]}"'
else:
return "any(" + ", ".join(f'{kind} = "{c}"' for c in cond) + ")"
def emit_cfg(cfgs: List[Dict[str, List[str]]], indent: int) -> str:
predicates = []
for cfg in cfgs:
ps = []
for kind in ["os", "arch", "env", "pointer_width", "family"]:
if kind in cfg:
ps.append(emit_predicate(f"target_{kind}", cfg[kind]))
if len(ps) == 1:
predicates.append(ps[0])
else:
predicates.append("all(" + ", ".join(ps) + ")")
ans = "any(\n"
for p in predicates:
ans += " " * (indent + 1) + p + ",\n"
ans += " " * indent + ")"
return ans
if __name__ == "__main__":
resources = [
"RLIMIT_AS",
"RLIMIT_CORE",
"RLIMIT_CPU",
"RLIMIT_DATA",
"RLIMIT_FSIZE",
"RLIMIT_KQUEUES",
"RLIMIT_LOCKS",
"RLIMIT_MEMLOCK",
"RLIMIT_MSGQUEUE",
"RLIMIT_NICE",
"RLIMIT_NOFILE",
"RLIMIT_NOVMON",
"RLIMIT_NPROC",
"RLIMIT_NPTS",
"RLIMIT_NTHR",
"RLIMIT_POSIXLOCKS",
"RLIMIT_RSS",
"RLIMIT_RTPRIO",
"RLIMIT_RTTIME",
"RLIMIT_SBSIZE",
"RLIMIT_SIGPENDING",
"RLIMIT_STACK",
"RLIMIT_SWAP",
"RLIMIT_UMTXP",
"RLIMIT_VMEM",
]
print(
"#![allow("
"clippy::assertions_on_constants, "
"clippy::absurd_extreme_comparisons, "
"clippy::cast_possible_truncation, "
"unused_comparisons)]\n"
)
resource_cfgs = []
for resource in resources:
cfg = emit_cfg(search("const", resource), indent=0)
resource_cfgs.append((resource, cfg))
print(f"#[cfg({cfg})]")
print(f"pub const {resource}: u8 = libc::{resource} as u8;")
print()
print(f"#[cfg(not({cfg}))]")
print(f"pub const {resource}: u8 = u8::MAX;")
print()
print("// " + "-" * 77)
print()
print("#[allow(clippy::too_many_lines)]")
print("#[test]")
print("fn resource_bound() {")
for resource, cfg in resource_cfgs:
print(f" #[cfg({cfg})]")
print(f" assert!((0..128).contains(&libc::{resource}));")
print()
print("}")
print()
for ident in ["rlimit", "getrlimit", "setrlimit"]:
if ident == "rlimit":
cfg64 = emit_cfg(search("struct", ident + "64"), indent=0)
cfg = emit_cfg(search("struct", ident), indent=0)
else:
cfg64 = emit_cfg(search("fn", ident + "64"), indent=0)
cfg = emit_cfg(search("fn", ident), indent=0)
print(f"#[cfg({cfg64})]")
print(f"pub use libc::{ident}64 as {ident};")
print()
print(f"#[cfg(all(not({cfg64}), {cfg}))]")
print(f"pub use libc::{ident};")
print()
ident = "RLIM_INFINITY"
cfg = emit_cfg(search("const", ident), indent=0)
print(f"#[cfg({cfg})]")
print(f"pub const {ident}: u64 = libc::{ident} as u64;")
print()
print(f"#[cfg(not({cfg}))]")
print(f"pub const {ident}: u64 = u64::MAX;")
print()
| StarcoderdataPython |
254621 | # ToDo
#
# Add a syntax like
# .define mp_XXX
# with no definition of the value. When this is encountered,
# the script will get the variable's value from the python
# global variables. This makes it easier to define multiline
# strings and get them into the output.
'''
A macro processor; print the man page for details.
Copyright (C) 2002 GDS Software
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
MA 02111-1307 USA
See http://www.gnu.org/licenses/licenses.html for more details.
'''
import string, sys, os, re
script = os.path.split(sys.argv[0])[1]
verbose = 0 # Log to stderr if true
dump_macros = 0
special_char = "."
files_to_process = None
output_on = 1
out = None # Output stream writing function
current_file = ""
current_line = 0
macros = {}
macro_names = []
start_dir = os.getcwd()
cmd_char = "." # Character that denotes a command line
include_dirs = [] # Where to search for include files
# Globals to help with evaluating chunks of code
code_mode = 0 # If true, we're in a code section
current_code = "" # String to hold the code lines
code_names = {} # Keep track of named sections of code
# The following regular expression is used to identify lines that contain
# formatting strings that need to be expanded with the global dictionary.
need_global_expansion = re.compile(r"%\(([a-zA-Z][a-zA-Z_]*)\)")
#x = " %(a)s kdjfkdj %(kj)d "
#mo = need_global_expansion.search(x)
#a = 9
#kj = 898
#if mo:
# print "match"
# print "groups() = ", mo.groups()
# varname = mo.groups()[0]
# if globals().has_key(varname):
# print "In globals dict"
# else:
# print "Not in globals dict"
#else:
# print "no match"
#print "Substitution:"
#print x % globals()
#sys.exit(0)
special_macros = ''' 25 Aug 2002 Is the current date in DD MMM YYYY format.
25 Is the current date's day in DD form.
08 Is the current date's month in MM (01-12) form.
Aug Is the current date's month in MMM (Jan-Dec) form.
02 Is the current date's year in YY form.
2002 Is the current date's year in YYYY form.
10:06:34 Is the current time in HH:MM:SS AM/PM format.
10 Is the current time's hour in 24 hour format
10 Is the current time's hour in 12 hour format
06 Is the current time's minute (00-59)
34 Is the current time's second (00-59)
AM Is the current time's AM or PM designator
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
MA 02111-1307 USA
See http://www.gnu.org/licenses/licenses.html for more details. The GNU General Public License Notice (text)
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.<p>
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.<p>
You should have received a copy of the GNU General Public
License along with this program; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
MA 02111-1307 USA<p>
See <a href="http://www.gnu.org/licenses/licenses.html">http://www.gnu.org/licenses/licenses.html</a> for more details.
The GNU General Public License Notice (html)'''
GPL_txt = '''This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
MA 02111-1307 USA
See http://www.gnu.org/licenses/licenses.html for more details.'''
gnu_url = "http://www.gnu.org/licenses/licenses.html"
GPL_html = '''This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.<p>
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.<p>
You should have received a copy of the GNU General Public
License along with this program; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
MA 02111-1307 USA<p>
See <a href="%s">%s</a> for more details.
''' % (gnu_url, gnu_url)
del gnu_url
def Usage():
print '''Usage: %(script)s [options] file1 [file2...]
Options
-d Print list of macro definitions found in files
-h Print a man page
-I dir Define an include directory
Command lines have the form (whitespace after '.' optional):
. command [parameters]
Commands:
define Macro = Value Define a new macro
# This line is a comment
on Turn output on
off Turn output off
code [name] Define a [named] section of python code
endcode End of python code section
include Insert a file; error if not found
sinclude Insert a file; no error if not found
cd [dir] Change the current directory
Special macros
%(special_macros)s''' % globals()
def ManPage():
print '''NAME
%(script)s - Macro processor
Version: $Revision: 1.9 $
SYNOPSIS
%(script)s [options] file1 [file2...]
DESCRIPTION
The %(script)s script is intended to be used as a macro processor.
It is primarily a string substitution tool. However, since it is
implemented in python, it allows the use of arbitrary python code
in your text files.
%(script)s replaces strings in the input files, then prints them
to stdout. It knows nothing about things like identifiers or
tokens in programming languages. It's a dumb text substitution
program. Thus, you must make SURE your macro names will not be
found in text except where you deliberately put them in. A
suggestion would be to prefix all macro names with "mp_".
For example, suppose the macro mp_MyMacro had the value of
"abc123" (quote characters not included). Then if the macro
processor encountered the following line
This is mp_MyMacro4.
the line would be transformed to:
This is abc1234.
In other words, the string "mp_MyMacro" was found on the line
and the substitution was made. This is done until no more macro
definitions are found on the line; then the line is printed to
stdout.
Programmers would probably see the 'mp_MyMacro4' above as a
token and expect the substitution to be based on tokens found.
The %(script)s doesn't behave this way; a common mistake is to
define macros with similar names:
.define mp_MyMacro =abc123
.define mp_MyMacro4 =xyz
Then, when the macro processor encounters 'mp_MyMacro4' in the
input text, we'd expect to see 'xyz' subsituted. But what
happens is that 'abc123' is substituted, since the name
mp_MyMacro was matched. The lesson is to never name a macro
with a string that will appear in another macro name.
You can add built-in macros to the script by editting the
BuildSpecialMacros() function.
Options
-d
Print a list of macro definitions found in the input files to
stdout.
-h
Prints this man page.
-I dir
Define an include directory. When a file is included, it
is first looked for in the current directory (or the
indicated directory). If it cannot be found, directories
specified with the -I option are searched sequentially
and the first match is used. More than one -I option
can be given.
Command lines
Command lines to the macro processor are denoted by having a '.'
character in the first column. You can edit the IsCommandLine()
function in the script if you'd like to change this syntax.
The general form of a command is:
. token [token's arguments]
There can be optional whitespace between the '.' character and
the characters of the token.
The allowed command tokens are:
define macro = value
Defines the value of a macro. All characters after the '='
character become part of the macro's value, except for the
newline.
cd dir
Set the current directory of the macro processor to dir.
If dir is missing, the current directory is set to what it
was at the start of the script.
code [code_section_name]
endcode
These two tokens must appear on a line by themselves. They
delimit lines of text that will be interpreted as python
code. Typically, this is used to define and set some global
variables that are used for variable expansions in lines.
See the Examples section below for details. However,
arbitrary processing can be done. Any variables that you
define in your code section are added to the global variable
namespace of the %(script)s script.
If code_section_name is given, it must be encountered only
once while processing, otherwise an error will occur and
processing stops. You can use this feature to avoid
accidentally including files multiple times (if your code
in the included file has a name, the %(script)s script will
stop executing the second time it is included).
include file
Used to include another file at this point. The behavior is
to read all the lines of the indicated file and insert them
at the current location. It is a fatal error if the file
cannot be found. The
sinclude file
Same as include, except it's not an error if the file cannot
be found.
#
If this character follows the command line string (with optional
preceding whitespace), the line is considered a comment and the
line as a whole is discarded.
on
Turns macro substitution back on if it was off. Ignored if it is
already on.
off
Turns macro substitution off. Ignored if it is already off.
Built-in macros
There are some built-in macros:
%(special_macros)s
Python variables
You may use python variable references in your text. These references
must be of the form '%%(varname)s', where varname is the name of a
python variable and s is a formatting string, such as 's', 'd', 'f',
etc. These expressions will be evaluated with the global dictionary
in effect at the time the code is evaluated.
Typical use of this functionality is to give counters that get
incremented or to define multiline strings.
Example
The following example shows a simple use of python code to
generate serial numbers for a set of files. The idea is that the
serial numbers will be incremented each time they are referenced,
allowing a set of files to have unique numbers.
File 1 contains:
.code
sn = 100 # Starting serial number
def IncrementSerialNumber():
global sn
sn += 1
.endcode
This is file 1. The serial number is %%(sn)d.
.code
IncrementSerialNumber()
.endcode
File 2 contains:
This is file 2. The serial number is %%(sn)d.
.code
IncrementSerialNumber()
.endcode
Running the command
python mp.py 1 2
produces the output:
This is file 1. The serial number is 100.
This is file 2. The serial number is 101.
Note: any global variables and functions you define in your code
will be put into the %(script)s script's global namespace.
''' % globals()
def Log(str):
if verbose:
#sys.stderr.write("+ " + str)
sys.stdout.write("+ " + str) #xx
def Initialize():
global files_to_process
global out
import getopt
try:
optlist, args = getopt.getopt(sys.argv[1:], "dhI:v")
except getopt.error, str:
print "getopt error: %s\n" % str
sys.exit(1)
for opt in optlist:
if opt[0] == "-d":
global dump_macros
dump_macros = 1
if opt[0] == "-v":
global verbose
verbose = 1
if opt[0] == "-h":
ManPage()
sys.exit(0)
if opt[0] == "-I":
global include_dirs
include_dirs.append(opt[1])
files_to_process = args
if len(files_to_process) == 0:
Usage()
Log("dump_macros = %d\n" % dump_macros)
Log("verbose = %d\n" % verbose)
Log("files_to_process = %s\n" % `files_to_process`)
Log("-" * 70 + "\n")
out = sys.stdout.write
BuildSpecialMacros()
assert(len(cmd_char) == 1)
def BuildSpecialMacros():
'''Construct the special macros used by the script. Edit this
function as needed to add your own built-in macros.
'''
global macros, macro_names
import time
tm = time.localtime(time.time())
settings = [
["25 Aug 2002", "%d %b %Y"],
["25", "%d"],
["08", "%m"],
["Aug", "%b"],
["02", "%y"],
["2002", "%Y"],
["10:06:34", "%H:%M:%S"],
["10", "%H"],
["10", "%I"],
["06", "%M"],
["34", "%S"],
["AM", "%p"],
]
for setting in settings:
key = setting[0]
value = [time.strftime(setting[1], tm), 0, ""]
macros[key] = value
if verbose:
Log("%-20s %s\n" % (key, value))
# Add the GPL macros.
macros['This program is free software; you can redistribute it and/or \nmodify it under the terms of the GNU General Public License as \npublished by the Free Software Foundation; either version 2 of \nthe License, or (at your option) any later version. \n\nThis program is distributed in the hope that it will be useful, \nbut WITHOUT ANY WARRANTY; without even the implied warranty of \nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the \nGNU General Public License for more details. \n\nYou should have received a copy of the GNU General Public \nLicense along with this program; if not, write to the Free \nSoftware Foundation, Inc., 59 Temple Place, Suite 330, Boston, \nMA 02111-1307 USA \n\nSee http://www.gnu.org/licenses/licenses.html for more details.'] = [GPL_txt, 0, ""]
macros['This program is free software; you can redistribute it and/or \nmodify it under the terms of the GNU General Public License as \npublished by the Free Software Foundation; either version 2 of \nthe License, or (at your option) any later version.<p> \n\nThis program is distributed in the hope that it will be useful, \nbut WITHOUT ANY WARRANTY; without even the implied warranty of \nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the \nGNU General Public License for more details.<p> \n\nYou should have received a copy of the GNU General Public \nLicense along with this program; if not, write to the Free \nSoftware Foundation, Inc., 59 Temple Place, Suite 330, Boston, \nMA 02111-1307 USA<p> \n\nSee <a href="http://www.gnu.org/licenses/licenses.html">http://www.gnu.org/licenses/licenses.html</a> for more details. \n'] = [GPL_html, 0, ""]
macro_names = macros.keys()
macro_names.sort()
def Error(msg):
sys.stderr.write(msg)
sys.exit(1)
def ProcessCommandLine(line):
'''The line is a command line, so parse out the command and execute
it. If we're in code mode, the line is appended to the current_code
string.
'''
global output_on, macros, macro_names
global code_mode, current_code, codes_names
# If we're in code mode, just append the line to current_code and return
# (unless we're at the code end).
if code_mode and line[0] != cmd_char:
Log("Code line: " + line)
current_code = current_code + line
return
str = string.strip(line[1:])
if len(str) == 0:
return
fields = string.split(str)
Log("Command line: " + line)
if len(fields) == 0:
return
cmd = fields[0]
if cmd == "define":
if len(fields) < 3:
Error("Too few fields in line %d of file '%s'\n" % (current_line, current_file))
macro_name = fields[1]
loc_eq = string.find(line, "=")
if loc_eq < 0:
Error("Missing '=' in line %d of file '%s'\n" % (current_line, current_file))
macro_value = line[loc_eq+1:]
# Remove the trailing newline if it is present
if macro_value[-1] == "\n":
macro_value = macro_value[:-1]
if dump_macros:
out("%s = '%s'\n" % (macro_name, macro_value))
if macros.has_key(macro_name):
msg = "Warning: redefining macro name '%s' in line %d of file '%s'\n" % (macro_name, current_line, current_file)
msg = msg + (" Previous definition at line %d of file '%s'\n" % (macros[macro_name][1], macros[macro_name][2]))
sys.stderr.write(msg)
macros[macro_name] = [macro_value, current_line, current_file]
macro_names = macros.keys()
macro_names.sort()
Log("Defined %s to '%s'\n" % (macro_name, macro_value))
elif cmd == "code":
# Beginning of a code section. If it's got a name, let's make
# sure it hasn't been executed before.
if len(fields) > 1:
# It's got a name
code_name = fields[1]
if code_names.has_key(code_name):
msg = "Error: code section named '%s' at line %d of file '%s' already defined\n" % (code_name, curr_line, curr_file)
msg = msg + ("Previous definition line %d in file '%s'\n" % (code_names[code_name][0], code_names[code_name][1]))
Error(msg)
else:
# Add it to the dictionary
code_names[code_name] = [curr_line, curr_file]
# Flag that we're now reading code
code_mode = 1
elif cmd == "endcode":
# End of a code section. Compile and execute the code.
if not code_mode:
Error("Error: endcode at line %d of file '%s' missing a matching previous 'code' token\n" % (curr_line, curr_file))
code_mode = 0
# Compile and execute. If we get an exception, the user will
# know about where the problem is because the file and line
# number of the endcode statement will be in the traceback.
loc = "[%s:%d]" % (current_file, current_line)
co = compile(current_code, loc, "exec")
exec co
# Save our variables in the global namespace, but remove this
# function's locals.
code_variables = locals()
vars = ["loc", "co", "str", "fields", "line", "cmd"]
for var in vars:
del code_variables[var]
# Now add these to the global dictionary
g = globals()
for varname in code_variables.keys():
g[varname] = code_variables[varname]
elif cmd == "cd":
if len(fields) > 1:
os.chdir(fields[1])
else:
os.chdir(start_dir)
elif cmd[0] == "#":
# It's a comment - ignore it
pass
elif cmd == "on":
output_on = 1
elif cmd == "off":
output_on = 0
elif cmd == "include":
if len(fields) != 2:
Error("Bad include in line %d of file '%s': missing file\n" % (current_line, current_file))
file = FindIncludeFile(fields[1])
if file == "":
Error("Error: include file '%s' in line %d of file '%s' not found\n" % (fields[1], current_line, current_file))
ProcessFile(file, 0, current_line, current_file)
elif cmd == "sinclude":
if len(fields) != 2:
Error("Bad sinclude in line %d of file '%s'\n" % (current_line, current_file))
file = FindIncludeFile(fields[1])
ProcessFile(file, 1, current_line, current_file)
else:
Error("Command '%s' on line %d of file '%s' not recognized\n" % (cmd, current_line, current_file))
def FindIncludeFile(file):
'''Search for the indicated file. If it is an absolute path, just
return it. If it is a relative path, first try the current directory,
then the directories in the include_dirs list. If it is not found,
return an empty string. Otherwise return the full path name.
'''
import os
if os.path.isfile(file):
path = os.path.normcase(os.path.abspath(file))
return path
# Didn't find it, so search include_dirs
for dir in include_dirs:
path = os.path.normcase(os.path.join(dir, file))
if os.path.isfile(path):
return os.path.abspath(path)
# Couldn't find it, so return empty string
return ""
def IsCommandLine(line):
'''This function determines if the line is a command line; if so, return
true. Otherwise, return false. Note we always return 1 if we're in
code mode.
'''
if line[0] == cmd_char or code_mode != 0:
return 1
else:
return 0
def ExpandMacros(line):
'''We look for any macro name matches. Any that are found are
replaced, then we start the search over again so we don't miss
any macros within macros.
'''
done = 0
while not done:
found = 0 # Flags finding at least one macro
for macro in macro_names:
pos = string.find(line, macro)
if pos != -1:
# Found a macro name in the line
found = 1
old_value = macro
new_value = macros[macro][0]
line = string.replace(line, old_value, new_value)
break
if found == 0:
done = 1
# If current_code is not the null string, we've had at least one
# code section, so evaluate using the global variables. We'll only
# do this if the line has at least one formatting string of the
# form %(varname)X, where varname is the name of a global variable
# and X is s, d, etc.
if len(current_code) > 0:
mo = need_global_expansion.search(line)
if mo:
line = line % globals()
return line
def ProcessLine(line):
'''Determine if the line is a command line or not. If it is, process
it with ProcessCommandLine(). Otherwise, expand the macros in the
line and print it to stdout.
'''
if IsCommandLine(line):
ProcessCommandLine(line)
else:
if output_on and not dump_macros:
Output(line)
def Output(line):
'''Send the line to the output stream. First, expand all the macros
in the line. Then check the character before the trailing newline:
if it is a '\' character, remove the newline unless the character
before that is another '\', in which case substitute '\' for the
'\\' and keep the newline.
'''
line = ExpandMacros(line)
if len(line) < 2:
out(line)
return
if line[-2] == '\\':
# Second to last character is a backslash
if len(line) > 2:
# If the character before the last backslash is a backslash,
# just output the line as is.
if line[-3] == "\\":
out(line)
else:
# It's an escaped backslash, so chop off the newline
out(line[:-2])
else:
# It's just a backslash and a newline.
return
else:
out(line)
def ProcessFile(file, ignore_failure_to_open=0, restore_line=0, restore_file=""):
'''Read in and process each line in the file. The
ignore_failure_to_open variable is used to handle the sinclude case
when a file is missing or can't be opened.
If present, restore_line and restore_file are used to reset the
current_line and current_file global variables, since we're in a
recursive call from include or sinclude commands.
'''
global current_file
global current_line
try:
ifp = open(file)
except:
if ignore_failure_to_open:
return
else:
Error("Couldn't open file '%s' for reading\n" % file)
str = "\n\n===== %s processing file '%s' =====\n\n"
Log(str % ("Started", file))
line = ifp.readline()
current_file = file
current_line = 1
while line:
ProcessLine(line)
line = ifp.readline()
current_line = current_line + 1
ifp.close()
if restore_line:
current_line = restore_line
if restore_file:
current_file = restore_file
Log(str % ("Finished", file))
def main():
Initialize()
for file in files_to_process:
ProcessFile(file)
sys.exit(0)
main()
| StarcoderdataPython |
11394931 | #===========================================#
# #
# #
#----------CROSSWALK RECOGNITION------------#
#-----------WRITTEN BY N.DALAL--------------#
#-----------------2017 (c)------------------#
# #
# #
#===========================================#
#Copyright by <NAME>, 2017 (c)
#Licensed under the MIT License:
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import numpy as np
import cv2
import math
import scipy.misc
import PIL.Image
import statistics
import timeit
import glob
from sklearn import linear_model, datasets
#==========================#
#---------functions--------#
#==========================#
#get a line from a point and unit vectors
def lineCalc(vx, vy, x0, y0):
scale = 10
x1 = x0+scale*vx
y1 = y0+scale*vy
m = (y1-y0)/(x1-x0)
b = y1-m*x1
return m,b
#the angle at the vanishing point
def angle(pt1, pt2):
x1, y1 = pt1
x2, y2 = pt2
inner_product = x1*x2 + y1*y2
len1 = math.hypot(x1, y1)
len2 = math.hypot(x2, y2)
print(len1)
print(len2)
a=math.acos(inner_product/(len1*len2))
return a*180/math.pi
#vanishing point - cramer's rule
def lineIntersect(m1,b1, m2,b2) :
#a1*x+b1*y=c1
#a2*x+b2*y=c2
#convert to cramer's system
a_1 = -m1
b_1 = 1
c_1 = b1
a_2 = -m2
b_2 = 1
c_2 = b2
d = a_1*b_2 - a_2*b_1 #determinant
dx = c_1*b_2 - c_2*b_1
dy = a_1*c_2 - a_2*c_1
intersectionX = dx/d
intersectionY = dy/d
return intersectionX,intersectionY
#process a frame
def process(im):
start = timeit.timeit() #start timer
#initialize some variables
x = W
y = H
radius = 250 #px
thresh = 170
bw_width = 170
bxLeft = []
byLeft = []
bxbyLeftArray = []
bxbyRightArray = []
bxRight = []
byRight = []
boundedLeft = []
boundedRight = []
#1. filter the white color
lower = np.array([170,170,170])
upper = np.array([255,255,255])
mask = cv2.inRange(im,lower,upper)
#2. erode the frame
erodeSize = int(y / 30)
erodeStructure = cv2.getStructuringElement(cv2.MORPH_RECT, (erodeSize,1))
erode = cv2.erode(mask, erodeStructure, (-1, -1))
#3. find contours and draw the green lines on the white strips
_ , contours,hierarchy = cv2.findContours(erode,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE )
for i in contours:
bx,by,bw,bh = cv2.boundingRect(i)
if (bw > bw_width):
cv2.line(im,(bx,by),(bx+bw,by),(0,255,0),2) # draw the a contour line
bxRight.append(bx+bw) #right line
byRight.append(by) #right line
bxLeft.append(bx) #left line
byLeft.append(by) #left line
bxbyLeftArray.append([bx,by]) #x,y for the left line
bxbyRightArray.append([bx+bw,by]) # x,y for the left line
cv2.circle(im,(int(bx),int(by)),5,(0,250,250),2) #circles -> left line
cv2.circle(im,(int(bx+bw),int(by)),5,(250,250,0),2) #circles -> right line
#calculate median average for each line
medianR = np.median(bxbyRightArray, axis=0)
medianL = np.median(bxbyLeftArray, axis=0)
bxbyLeftArray = np.asarray(bxbyLeftArray)
bxbyRightArray = np.asarray(bxbyRightArray)
#4. are the points bounded within the median circle?
for i in bxbyLeftArray:
if (((medianL[0] - i[0])**2 + (medianL[1] - i[1])**2) < radius**2) == True:
boundedLeft.append(i)
boundedLeft = np.asarray(boundedLeft)
for i in bxbyRightArray:
if (((medianR[0] - i[0])**2 + (medianR[1] - i[1])**2) < radius**2) == True:
boundedRight.append(i)
boundedRight = np.asarray(boundedRight)
#5. RANSAC Algorithm
#select the points enclosed within the circle (from the last part)
bxLeft = np.asarray(boundedLeft[:,0])
byLeft = np.asarray(boundedLeft[:,1])
bxRight = np.asarray(boundedRight[:,0])
byRight = np.asarray(boundedRight[:,1])
#transpose x of the right and the left line
bxLeftT = np.array([bxLeft]).transpose()
bxRightT = np.array([bxRight]).transpose()
#run ransac for LEFT
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
ransacX = model_ransac.fit(bxLeftT, byLeft)
inlier_maskL = model_ransac.inlier_mask_ #right mask
#run ransac for RIGHT
ransacY = model_ransac.fit(bxRightT, byRight)
inlier_maskR = model_ransac.inlier_mask_ #left mask
#draw RANSAC selected circles
for i, element in enumerate(boundedRight[inlier_maskR]):
# print(i,element[0])
cv2.circle(im,(element[0],element[1]),10,(250,250,250),2) #circles -> right line
for i, element in enumerate(boundedLeft[inlier_maskL]):
# print(i,element[0])
cv2.circle(im,(element[0],element[1]),10,(100,100,250),2) #circles -> right line
#6. Calcuate the intersection point of the bounding lines
#unit vector + a point on each line
vx, vy, x0, y0 = cv2.fitLine(boundedLeft[inlier_maskL],cv2.DIST_L2,0,0.01,0.01)
vx_R, vy_R, x0_R, y0_R = cv2.fitLine(boundedRight[inlier_maskR],cv2.DIST_L2,0,0.01,0.01)
#get m*x+b
m_L,b_L=lineCalc(vx, vy, x0, y0)
m_R,b_R=lineCalc(vx_R, vy_R, x0_R, y0_R)
#calculate intersention
intersectionX,intersectionY = lineIntersect(m_R,b_R,m_L,b_L)
#7. draw the bounding lines and the intersection point
m = radius*10
if (intersectionY < H/2 ):
cv2.circle(im,(int(intersectionX),int(intersectionY)),10,(0,0,255),15)
cv2.line(im,(x0-m*vx, y0-m*vy), (x0+m*vx, y0+m*vy),(255,0,0),3)
cv2.line(im,(x0_R-m*vx_R, y0_R-m*vy_R), (x0_R+m*vx_R, y0_R+m*vy_R),(255,0,0),3)
#8. calculating the direction vector
POVx = W/2 #camera POV - center of the screen
POVy = H/2 # camera POV - center of the screen
Dx = -int(intersectionX-POVx) #regular x,y axis coordinates
Dy = -int(intersectionY-POVy) #regular x,y axis coordinates
#focal length in pixels = (image width in pixels) * (focal length in mm) / (CCD width in mm)
focalpx = int(W * 4.26 / 6.604) #all in mm
end = timeit.timeit() #STOP TIMER
time_ = end - start
print('DELTA (x,y from POV):' + str(Dx) + ',' + str(Dy))
return im,Dx,Dy
#=============================#
#---------MAIN PROGRAM--------#
#=============================#
#initialization
cap = cv2.VideoCapture('inputVideo.mp4') #load a video
W = cap.get(3) #get width
H = cap.get(4) #get height
#Define a new resolution
ratio = H/W
W = 800
H = int(W * ratio)
#setup the parameters for saving the processed file
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter('processedVideo.mp4',fourcc, 15.0, (int(W),int(H)))
Dx = []
Dy = []
after =0
DxAve =0
Dxold =0
DyAve =0
Dyold =0
i = 0
state = ""
while(cap.isOpened()):
ret, frame = cap.read()
img = scipy.misc.imresize(frame, (H,W))
#draw camera's POV
cv2.circle(img,(int(W/2),int(H/2)),5,(0,0,255),8)
try:
processedFrame,dx,dy = process(img)
if (i < 6):
Dx.append(dx)
Dy.append(dy)
i=i+1
else:
DxAve = sum(Dx)/len(Dx)
DyAve = sum(Dy)/len(Dy)
del Dx[:]
del Dy[:]
i=0
if (DyAve > 30) and (abs(DxAve) < 300):
#check if the vanishing point and the next vanishing point aren't too far from each other
if (((DxAve - Dxold)**2 + (DyAve - Dyold)**2) < 150**2) == True: ##distance 150 px max
cv2.line(img,(int(W/2),int(H/2)),(int(W/2)+int(DxAve),int(H/2)+int(DyAve)),(0,0,255),7)
#walking directions
if abs(DxAve) < 80 and DyAve > 100 and abs(Dxold-DxAve) < 20:
state = 'Straight'
cv2.putText(img,state,(50,50), cv2.FONT_HERSHEY_PLAIN, 3,(0,0,0),2,cv2.LINE_AA)
elif DxAve > 80 and DyAve > 100 and abs(Dxold-DxAve) < 20:
state = 'Right'
cv2.putText(img,state,(50,50), cv2.FONT_HERSHEY_PLAIN, 3,(0,0,255),2,cv2.LINE_AA)
elif DxAve < 80 and DyAve > 100 and abs(Dxold-DxAve) < 20:
state = 'Left'
cv2.putText(img,state,(50,50), cv2.FONT_HERSHEY_PLAIN, 3,(0,0,255),2,cv2.LINE_AA)
else:
cv2.line(img,(int(W/2),int(H/2)),(int(W/2)+int(Dxold),int(H/2)+int(Dyold)),(0,0,255),)
#walking directions
if state == 'Straight':
cv2.putText(img,state,(50,50), cv2.FONT_HERSHEY_PLAIN, 3,(0,0,0),2,cv2.LINE_AA)
else:
cv2.putText(img,state,(50,50), cv2.FONT_HERSHEY_PLAIN, 3,(0,0,255),2,cv2.LINE_AA)
Dxold = DxAve
Dyold = DyAve
except:
print('Failed to process frame')
#show & save
img = cv2.imshow('Processed',processedFrame)
out.write(processedFrame)
if cv2.waitKey(1) & 0xFF == ord('q') or cv2.waitKey(1) & 0xFF == ord('Q'):
break
out.release()
cap.release()
cv2.destroyAllWindows()
| StarcoderdataPython |
184783 | <reponame>yasuaki-344/streamlit-docker
"""プロットデータアクセスオブジェクトのサンプル."""
import numpy as np
class PlotDataRepository:
"""データアクセスオブジェクトの一例."""
def __init__(self):
"""Initialize a new instance of ExampleRepository."""
pass
def generate_data(self) -> np.array:
"""データ取得の一例."""
x_data = np.random.random(20)
return x_data
| StarcoderdataPython |
4967987 | <gh_stars>0
#!/usr/bin/env python3
#
# Copyright (c) <NAME> and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
import copy
import os
import subprocess
import time
import signal
from collections import OrderedDict, Mapping, Sequence
from .eval import SoS_eval, SoS_exec
from .monitor import ProcessMonitor
from .targets import (InMemorySignature, file_target, sos_step, dynamic,
sos_targets)
from .utils import StopInputGroup, env, pickleable, ProcessKilled
from .tasks import TaskFile, remove_task_files
from .step_executor import parse_shared_vars
from .executor_utils import __null_func__, get_traceback_msg, prepare_env, clear_output
def signal_handler(*args, **kwargs):
raise ProcessKilled()
def collect_task_result(task_id, sos_dict, skipped=False, signature=None):
shared = {}
if 'shared' in env.sos_dict['_runtime']:
svars = env.sos_dict['_runtime']['shared']
if isinstance(svars, str):
if svars not in env.sos_dict:
raise ValueError(
f'Unavailable shared variable {svars} after the completion of task {task_id}'
)
if not pickleable(env.sos_dict[svars], svars):
env.logger.warning(
f'{svars} of type {type(env.sos_dict[svars])} is not sharable'
)
else:
shared[svars] = copy.deepcopy(env.sos_dict[svars])
elif isinstance(svars, Mapping):
for var, val in svars.items():
if var != val:
env.sos_dict.set(var, SoS_eval(val))
if var not in env.sos_dict:
raise ValueError(
f'Unavailable shared variable {var} after the completion of task {task_id}'
)
if not pickleable(env.sos_dict[var], var):
env.logger.warning(
f'{var} of type {type(env.sos_dict[var])} is not sharable'
)
else:
shared[var] = copy.deepcopy(env.sos_dict[var])
elif isinstance(svars, Sequence):
# if there are dictionaries in the sequence, e.g.
# shared=['A', 'B', {'C':'D"}]
for item in svars:
if isinstance(item, str):
if item not in env.sos_dict:
raise ValueError(
f'Unavailable shared variable {item} after the completion of task {task_id}'
)
if not pickleable(env.sos_dict[item], item):
env.logger.warning(
f'{item} of type {type(env.sos_dict[item])} is not sharable'
)
else:
shared[item] = copy.deepcopy(env.sos_dict[item])
elif isinstance(item, Mapping):
for var, val in item.items():
if var != val:
env.sos_dict.set(var, SoS_eval(val))
if var not in env.sos_dict:
raise ValueError(
f'Unavailable shared variable {var} after the completion of task {task_id}'
)
if not pickleable(env.sos_dict[var], var):
env.logger.warning(
f'{var} of type {type(env.sos_dict[var])} is not sharable'
)
else:
shared[var] = copy.deepcopy(env.sos_dict[var])
else:
raise ValueError(
f'Option shared should be a string, a mapping of expression, or a list of string or mappings. {svars} provided'
)
else:
raise ValueError(
f'Option shared should be a string, a mapping of expression, or a list of string or mappings. {svars} provided'
)
env.log_to_file(
'TASK',
f'task {task_id} (index={env.sos_dict["_index"]}) return shared variable {shared}'
)
# the difference between sos_dict and env.sos_dict is that sos_dict (the original version) can have remote() targets
# which should not be reported.
if env.sos_dict['_output'].undetermined():
# re-process the output statement to determine output files
args, _ = SoS_eval(
f'__null_func__({env.sos_dict["_output"]._undetermined})',
extra_dict={'__null_func__': __null_func__})
# handle dynamic args
env.sos_dict.set(
'_output',
sos_targets(
[x.resolve() if isinstance(x, dynamic) else x for x in args]))
return {
'ret_code': 0,
'task': task_id,
'input': sos_dict['_input'],
'output': sos_dict['_output'],
'depends': sos_dict['_depends'],
'shared': shared,
'skipped': skipped,
'start_time': sos_dict.get('start_time', ''),
'peak_cpu': sos_dict.get('peak_cpu', 0),
'peak_mem': sos_dict.get('peak_mem', 0),
'end_time': time.time(),
'signature': {
task_id: signature.write()
} if signature else {}
}
def execute_task(task_id,
verbosity=None,
runmode='run',
sigmode=None,
monitor_interval=5,
resource_monitor_interval=60):
'''Execute single or master task, return a dictionary'''
tf = TaskFile(task_id)
# this will automatically create a pulse file
tf.status = 'running'
# write result file
try:
signal.signal(signal.SIGTERM, signal_handler)
res = _execute_task(task_id, verbosity, runmode, sigmode,
monitor_interval, resource_monitor_interval)
except KeyboardInterrupt:
tf.status = 'aborted'
raise
except ProcessKilled:
tf.status = 'aborted'
raise ProcessKilled('task interrupted')
finally:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
if res['ret_code'] != 0 and 'exception' in res:
with open(
os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', task_id + '.err'),
'a') as err:
err.write(f'Task {task_id} exits with code {res["ret_code"]}')
if res.get('skipped', False):
# a special mode for skipped to set running time to zero
tf.status = 'skipped'
else:
tf.add_outputs()
sig = res.get('signature', {})
res.pop('signature', None)
tf.add_result(res)
if sig:
tf.add_signature(sig)
# **after** result file is created, remove other files
#
# NOTE: if the pulse is not removed. When another sos process checkes
# the task is started very quickly so the task has satus 'pending',
# the task might be considered already running.
tf.status = 'completed' if res['ret_code'] == 0 else 'failed'
return res['ret_code']
def _validate_task_signature(sig, saved_sig, task_id, is_subtask):
idx = env.sos_dict['_index']
if env.config['sig_mode'] == 'default':
matched = sig.validate(saved_sig)
if isinstance(matched, dict):
# in this case, an Undetermined output can get real output files
# from a signature
env.sos_dict.set('_input', sos_targets(matched['input']))
env.sos_dict.set('_depends', sos_targets(matched['depends']))
env.sos_dict.set('_output', sos_targets(matched['output']))
env.sos_dict.update(matched['vars'])
(env.logger.debug if is_subtask else env.logger.info
)(f'Task ``{task_id}`` for substep ``{env.sos_dict["step_name"]}`` (index={idx}) is ``ignored`` due to saved signature'
)
return True
elif env.config['sig_mode'] == 'assert':
matched = sig.validate(saved_sig)
if isinstance(matched, str):
raise RuntimeError(f'Signature mismatch: {matched}')
env.sos_dict.set('_input', sos_targets(matched['input']))
env.sos_dict.set('_depends', sos_targets(matched['depends']))
env.sos_dict.set('_output', sos_targets(matched['output']))
env.sos_dict.update(matched['vars'])
(env.logger.debug if is_subtask else env.logger.info
)(f'Task ``{task_id}`` for substep ``{env.sos_dict["step_name"]}`` (index={idx}) is ``ignored`` with matching signature'
)
sig.content = saved_sig
return True
elif env.config['sig_mode'] == 'build':
# The signature will be write twice
if sig.write():
(env.logger.debug if is_subtask else env.logger.info
)(f'Task ``{task_id}`` for substep ``{env.sos_dict["step_name"]}`` (index={idx}) is ``ignored`` with signature constructed'
)
return True
return False
elif env.config['sig_mode'] == 'force':
return False
else:
raise RuntimeError(
f'Unrecognized signature mode {env.config["sig_mode"]}')
def _execute_sub_tasks(task_id, params, sig_content, verbosity, runmode,
sigmode, monitor_interval, resource_monitor_interval,
master_runtime):
'''If this is a master task, execute as individual tasks'''
m = ProcessMonitor(
task_id,
monitor_interval=monitor_interval,
resource_monitor_interval=resource_monitor_interval,
max_walltime=params.sos_dict['_runtime'].get('max_walltime', None),
max_mem=params.sos_dict['_runtime'].get('max_mem', None),
max_procs=params.sos_dict['_runtime'].get('max_procs', None),
sos_dict=params.sos_dict)
m.start()
env.logger.info(f'{task_id} ``started``')
master_out = os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', task_id + '.out')
master_err = os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', task_id + '.err')
# if this is a master task, calling each sub task
with open(master_out, 'wb') as out, open(master_err, 'wb') as err:
def copy_out_and_err(result):
tid = result['task']
out.write(
f'{tid}: {"completed" if result["ret_code"] == 0 else "failed"}\n'
.encode())
if 'output' in result:
out.write(f'output: {result["output"]}\n'.encode())
sub_out = os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', tid + '.out')
if os.path.isfile(sub_out):
with open(sub_out, 'rb') as sout:
out.write(sout.read())
try:
os.remove(sub_out)
except Exception as e:
env.logger.warning(f'Failed to remove {sub_out}: {e}')
sub_err = os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', tid + '.err')
if 'exception' in result:
err.write(str(result['exception']).encode())
err.write(
f'{tid}: {"completed" if result["ret_code"] == 0 else "failed"}\n'
.encode())
if os.path.isfile(sub_err):
with open(sub_err, 'rb') as serr:
err.write(serr.read())
try:
os.remove(sub_err)
except Exception as e:
env.logger.warning(f'Failed to remove {sub_err}: {e}')
# remove other files as well
try:
remove_task_files(tid, ['.out', '.err'])
except Exception as e:
env.logger.debug(f'Failed to remove files {tid}: {e}')
if params.num_workers > 1:
from multiprocessing.pool import Pool
p = Pool(params.num_workers)
results = []
for tid, tdef in params.task_stack:
if hasattr(params, 'common_dict'):
tdef.sos_dict.update(params.common_dict)
results.append(
p.apply_async(
_execute_task, ((tid, tdef, {
tid: sig_content.get(tid, {})
}), verbosity, runmode, sigmode, None, None, {
x: master_runtime.get(x, {})
for x in ('_runtime', tid)
}),
callback=copy_out_and_err))
for idx, r in enumerate(results):
results[idx] = r.get()
p.close()
p.join()
# we wait for all results to be ready to return or raise
# but we only raise exception for one of the subtasks
# for res in results:
# if 'exception' in res:
# failed = [x.get("task", "")
# for x in results if "exception" in x]
# env.logger.error(
# f'{task_id} ``failed`` due to failure of subtask{"s" if len(failed) > 1 else ""} {", ".join(failed)}')
# return {'ret_code': 1, 'exception': res['exception'], 'task': task_id}
else:
results = []
for tid, tdef in params.task_stack:
if hasattr(params, 'common_dict'):
tdef.sos_dict.update(params.common_dict)
# no monitor process for subtasks
res = _execute_task(
(tid, tdef, {
tid: sig_content.get(tid, {})
}),
verbosity=verbosity,
runmode=runmode,
sigmode=sigmode,
monitor_interval=None,
resource_monitor_interval=None,
master_runtime={
x: master_runtime.get(x, {}) for x in ('_runtime', tid)
})
try:
copy_out_and_err(res)
except Exception as e:
env.logger.warning(
f'Failed to copy result of subtask {tid}: {e}')
results.append(res)
# for res in results:
# if 'exception' in res:
# failed = [x.get("task", "")
# for x in results if "exception" in x]
# env.logger.error(
# f'{task_id} ``failed`` due to failure of subtask{"s" if len(failed) > 1 else ""} {", ".join(failed)}')
# return {'ret_code': 1, 'exception': res['exception'], 'task': task_id}
#
# now we collect result
all_res = {
'ret_code': 0,
'output': None,
'subtasks': {},
'shared': {},
'skipped': 0,
'signature': {}
}
for tid, x in zip(params.task_stack, results):
all_res['subtasks'][tid[0]] = x
if 'exception' in x:
all_res['exception'] = x['exception']
all_res['ret_code'] += 1
continue
all_res['ret_code'] += x['ret_code']
if all_res['output'] is None:
all_res['output'] = copy.deepcopy(x['output'])
else:
try:
all_res['output'].extend(x['output'], keep_groups=True)
except Exception as e:
env.logger.warning(
f"Failed to extend output {all_res['output']} with {x['output']}"
)
all_res['shared'].update(x['shared'])
# does not care if one or all subtasks are executed or skipped.
all_res['skipped'] += x.get('skipped', 0)
if 'signature' in x:
all_res['signature'].update(x['signature'])
if all_res['ret_code'] != 0:
if all_res['ret_code'] == len(results):
env.logger.info(f'All {len(results)} tasks in {task_id} ``failed``')
else:
env.logger.info(
f'{all_res["ret_code"]} of {len(results)} tasks in {task_id} ``failed``'
)
# if some failed, some skipped, not skipped
if 'skipped' in all_res:
all_res.pop('skipped')
elif all_res['skipped']:
if all_res['skipped'] == len(results):
env.logger.info(
f'All {len(results)} tasks in {task_id} ``ignored`` or skipped')
else:
# if only partial skip, we still save signature and result etc
env.logger.info(
f'{all_res["skipped"]} of {len(results)} tasks in {task_id} ``ignored`` or skipped'
)
all_res.pop('skipped')
else:
env.logger.info(f'All {len(results)} tasks in {task_id} ``completed``')
return all_res
def _execute_task(task_id,
verbosity=None,
runmode='run',
sigmode=None,
monitor_interval=5,
resource_monitor_interval=60,
master_runtime={}):
'''A function that execute specified task within a local dictionary
(from SoS env.sos_dict). This function should be self-contained in that
it can be handled by a task manager, be executed locally in a separate
process or remotely on a different machine.'''
# start a monitoring file, which would be killed after the job
# is done (killed etc)
if isinstance(task_id, str):
params, master_runtime = TaskFile(task_id).get_params_and_runtime()
sig_content = TaskFile(task_id).signature
subtask = False
else:
# subtask
subtask = True
task_id, params, sig_content = task_id
if 'TASK' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('TASK', f'Executing subtask {task_id}')
# update local runtime with master runtime
if '_runtime' in master_runtime:
params.sos_dict['_runtime'].update(master_runtime['_runtime'])
if task_id in master_runtime:
params.sos_dict.update(master_runtime[task_id])
if hasattr(params, 'task_stack'):
return _execute_sub_tasks(task_id, params, sig_content, verbosity,
runmode, sigmode, monitor_interval,
resource_monitor_interval, master_runtime)
global_def, task, sos_dict = params.global_def, params.task, params.sos_dict
# task output
env.sos_dict.set(
'__std_out__',
os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', task_id + '.out'))
env.sos_dict.set(
'__std_err__',
os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', task_id + '.err'))
env.logfile = os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', task_id + '.err')
# clear the content of existing .out and .err file if exists, but do not create one if it does not exist
if os.path.exists(env.sos_dict['__std_out__']):
open(env.sos_dict['__std_out__'], 'w').close()
if os.path.exists(env.sos_dict['__std_err__']):
open(env.sos_dict['__std_err__'], 'w').close()
if verbosity is not None:
env.verbosity = verbosity
prepare_env(global_def[0], global_def[1])
if '_runtime' not in sos_dict:
sos_dict['_runtime'] = {}
# pulse thread
if monitor_interval is not None:
m = ProcessMonitor(
task_id,
monitor_interval=monitor_interval,
resource_monitor_interval=resource_monitor_interval,
max_walltime=sos_dict['_runtime'].get('max_walltime', None),
max_mem=sos_dict['_runtime'].get('max_mem', None),
max_procs=sos_dict['_runtime'].get('max_procs', None),
sos_dict=sos_dict)
m.start()
env.config['run_mode'] = runmode
if runmode == 'dryrun':
env.config['sig_mode'] = 'ignore'
elif sigmode is not None:
env.config['sig_mode'] = sigmode
#
(env.logger.debug if subtask else env.logger.info)(f'{task_id} ``started``')
env.sos_dict.quick_update(sos_dict)
for key in [
'step_input', '_input', 'step_output', '_output', 'step_depends',
'_depends'
]:
if key in sos_dict and isinstance(sos_dict[key], sos_targets):
# resolve remote() target
env.sos_dict.set(
key,
sos_dict[key].remove_targets(type=sos_step).resolve_remote())
# when no output is specified, we just treat the task as having no output (determined)
env.sos_dict['_output']._undetermined = False
sig = None if env.config['sig_mode'] == 'ignore' else InMemorySignature(
env.sos_dict['_input'],
env.sos_dict['_output'],
env.sos_dict['_depends'],
env.sos_dict['__signature_vars__'],
shared_vars=parse_shared_vars(env.sos_dict['_runtime'].get(
'shared', None)))
if sig and _validate_task_signature(sig, sig_content.get(task_id, {}),
task_id, subtask):
#env.logger.info(f'{task_id} ``skipped``')
return collect_task_result(
task_id, sos_dict, skipped=True, signature=sig)
# if we are to really execute the task, touch the task file so that sos status shows correct
# execution duration.
if not subtask:
sos_dict['start_time'] = time.time()
try:
# go to 'workdir'
if '_runtime' in sos_dict and 'workdir' in sos_dict['_runtime']:
if not os.path.isdir(
os.path.expanduser(sos_dict['_runtime']['workdir'])):
try:
os.makedirs(
os.path.expanduser(sos_dict['_runtime']['workdir']))
os.chdir(
os.path.expanduser(sos_dict['_runtime']['workdir']))
except Exception as e:
# sometimes it is not possible to go to a "workdir" because of
# file system differences, but this should be ok if a work_dir
# has been specified.
env.logger.debug(
f'Failed to create workdir {sos_dict["_runtime"]["workdir"]}: {e}'
)
else:
os.chdir(os.path.expanduser(sos_dict['_runtime']['workdir']))
#
orig_dir = os.getcwd()
# we will need to check existence of targets because the task might
# be executed on a remote host where the targets are not available.
for target in (sos_dict['_input'] if isinstance(sos_dict['_input'], list) else []) + \
(sos_dict['_depends'] if isinstance(sos_dict['_depends'], list) else []):
# if the file does not exist (although the signature exists)
# request generation of files
if isinstance(target, str):
if not file_target(target).target_exists('target'):
# remove the signature and regenerate the file
raise RuntimeError(f'{target} not found')
# the sos_step target should not be checked in tasks because tasks are
# independently executable units.
elif not isinstance(
target, sos_step) and not target.target_exists('target'):
raise RuntimeError(f'{target} not found')
# create directory. This usually has been done at the step level but the task can be executed
# on a remote host where the directory does not yet exist.
ofiles = env.sos_dict['_output']
if ofiles.valid():
for ofile in ofiles:
parent_dir = ofile.parent
if not parent_dir.is_dir():
parent_dir.mkdir(parents=True, exist_ok=True)
# go to user specified workdir
if '_runtime' in sos_dict and 'workdir' in sos_dict['_runtime']:
if not os.path.isdir(
os.path.expanduser(sos_dict['_runtime']['workdir'])):
try:
os.makedirs(
os.path.expanduser(sos_dict['_runtime']['workdir']))
except Exception as e:
raise RuntimeError(
f'Failed to create workdir {sos_dict["_runtime"]["workdir"]}: {e}'
)
os.chdir(os.path.expanduser(sos_dict['_runtime']['workdir']))
# set environ ...
# we join PATH because the task might be executed on a different machine
if '_runtime' in sos_dict:
if 'env' in sos_dict['_runtime']:
for key, value in sos_dict['_runtime']['env'].items():
if 'PATH' in key and key in os.environ:
new_path = OrderedDict()
for p in value.split(os.pathsep):
new_path[p] = 1
for p in value.split(os.environ[key]):
new_path[p] = 1
os.environ[key] = os.pathsep.join(new_path.keys())
else:
os.environ[key] = value
if 'prepend_path' in sos_dict['_runtime']:
if isinstance(sos_dict['_runtime']['prepend_path'], str):
os.environ['PATH'] = sos_dict['_runtime']['prepend_path'] + \
os.pathsep + os.environ['PATH']
elif isinstance(env.sos_dict['_runtime']['prepend_path'],
Sequence):
os.environ['PATH'] = os.pathsep.join(
sos_dict['_runtime']
['prepend_path']) + os.pathsep + os.environ['PATH']
else:
raise ValueError(
f'Unacceptable input for option prepend_path: {sos_dict["_runtime"]["prepend_path"]}'
)
# step process
SoS_exec(task)
(env.logger.debug
if subtask else env.logger.info)(f'{task_id} ``completed``')
except StopInputGroup as e:
# task ignored with stop_if exception
if not e.keep_output:
clear_output()
env.sos_dict['_output'] = sos_targets([])
if e.message:
env.logger.info(e.message)
return {
'ret_code': 0,
'task': task_id,
'input': sos_targets([]),
'output': env.sos_dict['_output'],
'depends': sos_targets([]),
'shared': {}
}
except KeyboardInterrupt:
env.logger.error(f'{task_id} ``interrupted``')
raise
except subprocess.CalledProcessError as e:
return {
'ret_code': e.returncode,
'task': task_id,
'shared': {},
'exception': RuntimeError(e.stderr)
}
except ProcessKilled:
env.logger.error(f'{task_id} ``interrupted``')
raise
except Exception as e:
msg = get_traceback_msg(e)
# env.logger.error(f'{task_id} ``failed``: {msg}')
with open(
os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', task_id + '.err'),
'a') as err:
err.write(msg + '\n')
return {
'ret_code': 1,
'exception': RuntimeError(msg),
'task': task_id,
'shared': {}
}
finally:
os.chdir(orig_dir)
return collect_task_result(task_id, sos_dict, signature=sig)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.