content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import warnings
class AppPlatformError(Exception):
"""
Raised by :meth:`Client.request()` for requests that:
- Return a non-200 HTTP response, or
- Connection refused/timeout or
- Response timeout or
- Malformed request
- Have a malformed/missing header in the response.
"""
def __init__(self, exc_message, status_code, error_code=None):
super(AppPlatformError, self).__init__(exc_message)
self.status_code = status_code
self.error_code = error_code
class ServerError(AppPlatformError):
"""
For 500-level responses from the server
"""
class ClientError(AppPlatformError):
"""
For 400-level responses from the server
"""
class InputNotUnderstoodError(Exception):
"""
Raised if a method is called in a way that cannot be understood
"""
class AllRetriesFailedError(Exception):
"""Raised when the retry manager does not successfully make a request"""
class InvalidModelCategoryError(Exception):
"""
Raised when method specific for model category was called from wrong model
"""
class AsyncTimeoutError(Exception):
"""
Raised when an asynchronous operation did not successfully get resolved
within a specified time limit
"""
class AsyncFailureError(Exception):
"""
Raised when querying an asynchronous status resulted in an exceptional
status code (not 200 and not 303)
"""
class ProjectAsyncFailureError(AsyncFailureError):
"""
When an AsyncFailureError occurs during project creation or finalizing the project
settings for modeling. This exception will have the attributes ``status_code``
indicating the unexpected status code from the server, and ``async_location`` indicating
which asynchronous status object was being polled when the failure happened.
"""
def __init__(self, exc_message, status_code, async_location):
super(ProjectAsyncFailureError, self).__init__(exc_message)
self.status_code = status_code
self.async_location = async_location
class AsyncProcessUnsuccessfulError(Exception):
"""
Raised when querying an asynchronous status showed that async process
was not successful
"""
class AsyncModelCreationError(Exception):
"""
Raised when querying an asynchronous status showed that model creation
was not successful
"""
class AsyncPredictionsGenerationError(Exception):
"""
Raised when querying an asynchronous status showed that predictions
generation was not successful
"""
class PendingJobFinished(Exception):
"""
Raised when the server responds with a 303 for the pending creation of a
resource.
"""
class JobNotFinished(Exception):
"""
Raised when execution was trying to get a finished resource from a pending
job, but the job is not finished
"""
class DuplicateFeaturesError(Exception):
"""
Raised when trying to create featurelist with duplicating features
"""
class DataRobotDeprecationWarning(DeprecationWarning):
"""
Raised when using deprecated functions or using functions in a deprecated way
"""
pass
class IllegalFileName(Exception):
"""
Raised when trying to use a filename we can't handle.
"""
class JobAlreadyRequested(ClientError):
"""
Raised when the requested model has already been requested.
"""
warnings.filterwarnings('default', category=DataRobotDeprecationWarning)
|
nilq/baby-python
|
python
|
import setuptools
from glob import glob
setuptools.setup(
name="noteboard-extension",
version='0.1.0',
url="https://github.com/yuvipanda/noteboard",
author="Yuvi Panda",
description="Simple Jupyter extension to emit events about current notebooks to a noteboard server",
data_files=[
('share/jupyter/nbextensions/noteboard', glob('*.js'))
],
packages=setuptools.find_packages()
)
|
nilq/baby-python
|
python
|
import os.path as osp
import numpy as np
class Dataset(object):
def __init__(self, ids, labels, is_train=True, name='default'):
self._ids = list(ids)
self._labels = labels
self.name = name
self.is_train = is_train
def get_data(self, id):
activity = np.load(id)
label = self._labels[id]
return activity, label
def get_data_ROI(self, id):
activity = np.load(id)
activity = activity/255.*2-1
label = self._labels[id]
return activity, label
@property
def ids(self):
return self._ids
def __len__(self):
return len(self.ids)
def __repr__(self):
return 'Dataset (%s, %d examples)' % (
self.name,
len(self)
)
def create_default_splits(path, is_train=True):
train_ids, train_labels = get_activity_path_and_label(osp.join(path, 'train'))
val_ids, val_labels = get_activity_path_and_label(osp.join(path, 'val'))
test_ids, test_labels = get_activity_path_and_label(osp.join(path, 'test'))
dataset_train = Dataset(train_ids, train_labels, name='train', is_train=True)
dataset_val = Dataset(val_ids, val_labels, name='val', is_train=False)
dataset_test = Dataset(test_ids, test_labels, name='test', is_train=False)
return dataset_train, dataset_val, dataset_test
def get_activity_path_and_label(path):
ids = []
labels = {}
with open(osp.join(path, 'label.txt')) as f:
lines = [line.strip() for line in f.readlines()]
for line in lines:
newline = list(filter(str.strip, line.split(' ')))
id = osp.join(path, newline[0])
ids.append(id)
labels[id] = np.array([float(n) for n in newline[1:]])
rs = np.random.RandomState(123)
rs.shuffle(ids)
return ids, labels
|
nilq/baby-python
|
python
|
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
#color spaces --> rgb spaces,grayscal
img = cv.imread('photos/dog.jpg')
cv.imshow('Dog',img)
# plt.imshow(img)
# plt.show()
# point to note --> there is conversion of colour spaces in matplotlib
# BGR to grayscale -->
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
cv.imshow('Grayscale',gray)
#BGR to HSV -->
hsv =cv.cvtColor(img,cv.COLOR_BGR2HSV_FULL)
cv.imshow('HSV',hsv)
#BGR to L*A*B
lab =cv.cvtColor(img,cv.COLOR_BGR2LAB)
cv.imshow('Lab',lab)
#BGR to RGB
rgb = cv.cvtColor(img,cv.COLOR_BGR2RGB)
cv.imshow('RGB',rgb)
plt.imshow(rgb)
plt.show()
#cann't convert hsv to BGR
# HSV to BGR
hsv_bgr =cv.cvtColor(img,cv.COLOR_HSV2BGR)
cv.imshow('HSV-> BGR',hsv_bgr)
cv.waitKey(0)
|
nilq/baby-python
|
python
|
from rest_framework import serializers
from users.models.organizations import Organization
class OrganizationSerializer(serializers.ModelSerializer):
class Meta:
model = Organization
fields = ('id', 'name', 'domain')
|
nilq/baby-python
|
python
|
import sys
from data_wrapper.settings import MESSAGES
class DataWrapper:
def __init__(self, db=None, params=None, environment=None):
if db:
if db.lower() == 'mongodb':
from data_wrapper.mongodb_wrapper import MongodbDbWrapper
self.db = True
self.my_data = MongodbDbWrapper(params)
elif db.lower() == 'mysql':
from data_wrapper.mysql_wrapper import MysqlDbWrapper
self.db = True
self.my_data = MysqlDbWrapper(params, environment)
else:
print(MESSAGES["WRONG_DATABASE"])
sys.exit()
else:
self.db = False
self.my_data = {}
def __setitem__(self, key, value):
if not self.db and key not in self.my_data:
self.my_data[key] = value
self.my_data.__setitem__(key, value)
def __getitem__(self, item):
if not self.db and item not in self.my_data:
self.my_data[item] = []
return self.my_data[item]
def __delitem__(self, key):
self.my_data.__delitem__(key)
|
nilq/baby-python
|
python
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
from telemetry.internal.util import binary_manager
class MinidumpSymbolizer(object):
def __init__(self, os_name, arch_name, dump_finder, build_dir):
"""Abstract class for handling all minidump symbolizing code.
Args:
os_name: The OS of the host (if running the test on a device), or the OS
of the test machine (if running the test locally).
arch_name: The arch name of the host (if running the test on a device), or
the OS of the test machine (if running the test locally).
dump_finder: The minidump_finder.MinidumpFinder instance that is being
used to find minidumps for the test.
build_dir: The directory containing Chromium build artifacts to generate
symbols from.
"""
self._os_name = os_name
self._arch_name = arch_name
self._dump_finder = dump_finder
self._build_dir = build_dir
def SymbolizeMinidump(self, minidump):
"""Gets the stack trace from the given minidump.
Args:
minidump: the path to the minidump on disk
Returns:
None if the stack could not be retrieved for some reason, otherwise a
string containing the stack trace.
"""
if self._os_name == 'win':
cdb = self._GetCdbPath()
if not cdb:
logging.warning('cdb.exe not found.')
return None
# Move to the thread which triggered the exception (".ecxr"). Then include
# a description of the exception (".lastevent"). Also include all the
# threads' stacks ("~*kb30") as well as the ostensibly crashed stack
# associated with the exception context record ("kb30"). Note that stack
# dumps, including that for the crashed thread, may not be as precise as
# the one starting from the exception context record.
# Specify kb instead of k in order to get four arguments listed, for
# easier diagnosis from stacks.
output = subprocess.check_output([cdb, '-y', self.browser_directory,
'-c', '.ecxr;.lastevent;kb30;~*kb30;q',
'-z', minidump])
# The output we care about starts with "Last event:" or possibly
# other things we haven't seen yet. If we can't find the start of the
# last event entry, include output from the beginning.
info_start = 0
info_start_match = re.search("Last event:", output, re.MULTILINE)
if info_start_match:
info_start = info_start_match.start()
info_end = output.find('quit:')
return output[info_start:info_end]
stackwalk = binary_manager.FetchPath(
'minidump_stackwalk', self._arch_name, self._os_name)
if not stackwalk:
logging.warning('minidump_stackwalk binary not found.')
return None
# We only want this logic on linux platforms that are still using breakpad.
# See crbug.com/667475
if not self._dump_finder.MinidumpObtainedFromCrashpad(minidump):
with open(minidump, 'rb') as infile:
minidump += '.stripped'
with open(minidump, 'wb') as outfile:
outfile.write(''.join(infile.read().partition('MDMP')[1:]))
symbols_dir = tempfile.mkdtemp()
try:
self._GenerateBreakpadSymbols(symbols_dir, minidump)
return subprocess.check_output([stackwalk, minidump, symbols_dir],
stderr=open(os.devnull, 'w'))
finally:
shutil.rmtree(symbols_dir)
def GetSymbolBinaries(self, minidump):
"""Returns a list of paths to binaries where symbols may be located.
Args:
minidump: The path to the minidump being symbolized.
"""
raise NotImplementedError()
def GetBreakpadPlatformOverride(self):
"""Returns the platform to be passed to generate_breakpad_symbols."""
return None
def _GenerateBreakpadSymbols(self, symbols_dir, minidump):
"""Generates Breakpad symbols for use with stackwalking tools.
Args:
symbols_dir: The directory where symbols will be written to.
minidump: The path to the minidump being symbolized.
"""
logging.info('Dumping Breakpad symbols.')
generate_breakpad_symbols_command = binary_manager.FetchPath(
'generate_breakpad_symbols', self._arch_name, self._os_name)
if not generate_breakpad_symbols_command:
logging.warning('generate_breakpad_symbols binary not found')
return
for binary_path in self.GetSymbolBinaries(minidump):
cmd = [
sys.executable,
generate_breakpad_symbols_command,
'--binary=%s' % binary_path,
'--symbols-dir=%s' % symbols_dir,
'--build-dir=%s' % self._build_dir,
]
if self.GetBreakpadPlatformOverride():
cmd.append('--platform=%s' % self.GetBreakpadPlatformOverride())
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
logging.error(e.output)
logging.warning('Failed to execute "%s"', ' '.join(cmd))
return
|
nilq/baby-python
|
python
|
# ------------------------------------------------------------------------------
# Portions of this code are from
# OpenPCDet (https://github.com/open-mmlab/OpenPCDet)
# Licensed under the Apache License.
# ------------------------------------------------------------------------------
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from det3d.core.bbox import box_torch_ops
from .target_assigner.proposal_target_layer import ProposalTargetLayer
def limit_period(val, offset=0.5, period=np.pi):
return val - torch.floor(val / period + offset) * period
class RoIHeadTemplate(nn.Module):
def __init__(self, num_class, model_cfg):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.proposal_target_layer = ProposalTargetLayer(roi_sampler_cfg=self.model_cfg.TARGET_CONFIG)
self.forward_ret_dict = None
def make_fc_layers(self, input_channels, output_channels, fc_list):
fc_layers = []
pre_channel = input_channels
for k in range(0, fc_list.__len__()):
fc_layers.extend([
nn.Conv1d(pre_channel, fc_list[k], kernel_size=1, bias=False),
nn.BatchNorm1d(fc_list[k]),
nn.ReLU()
])
pre_channel = fc_list[k]
if self.model_cfg.DP_RATIO >= 0 and k == 0:
fc_layers.append(nn.Dropout(self.model_cfg.DP_RATIO))
fc_layers.append(nn.Conv1d(pre_channel, output_channels, kernel_size=1, bias=True))
fc_layers = nn.Sequential(*fc_layers)
return fc_layers
def assign_targets(self, batch_dict):
batch_size = batch_dict['batch_size']
with torch.no_grad():
targets_dict = self.proposal_target_layer.forward(batch_dict)
rois = targets_dict['rois'] # (B, N, 7 + C)
gt_of_rois = targets_dict['gt_of_rois'] # (B, N, 7 + C + 1)
targets_dict['gt_of_rois_src'] = gt_of_rois.clone().detach()
roi_ry = limit_period(rois[:, :, 6], offset=0.5, period=np.pi*2)
gt_of_rois[:, :, :6] = gt_of_rois[:, :, :6] - rois[:, :, :6]
gt_of_rois[:, :, 6] = gt_of_rois[:, :, 6] - roi_ry
gt_of_rois = box_torch_ops.rotate_points_along_z(
points=gt_of_rois.view(-1, 1, gt_of_rois.shape[-1]), angle=-roi_ry.view(-1)
).view(batch_size, -1, gt_of_rois.shape[-1])
if rois.shape[-1] == 9:
# rotate velocity
gt_of_rois[:, :, 7:-1] = gt_of_rois[:, :, 7:-1] - rois[:, :, 7:]
"""
roi_vel = gt_of_rois[:, :, 7:-1]
roi_vel = torch.cat([roi_vel, torch.zeros([roi_vel.shape[0], roi_vel.shape[1], 1]).to(roi_vel)], dim=-1)
gt_of_rois[:, :, 7:-1] = box_torch_ops.rotate_points_along_z(
points=roi_vel.view(-1, 1, 3), angle=-roi_ry.view(-1)
).view(batch_size, -1, 3)[..., :2]
"""
# flip orientation if rois have opposite orientation
heading_label = gt_of_rois[:, :, 6] % (2 * np.pi) # 0 ~ 2pi
opposite_flag = (heading_label > np.pi * 0.5) & (heading_label < np.pi * 1.5)
heading_label[opposite_flag] = (heading_label[opposite_flag] + np.pi) % (2 * np.pi) # (0 ~ pi/2, 3pi/2 ~ 2pi)
flag = heading_label > np.pi
heading_label[flag] = heading_label[flag] - np.pi * 2 # (-pi/2, pi/2)
heading_label = torch.clamp(heading_label, min=-np.pi / 2, max=np.pi / 2)
gt_of_rois[:, :, 6] = heading_label
targets_dict['gt_of_rois'] = gt_of_rois
return targets_dict
def get_box_reg_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg.LOSS_CONFIG
code_size = forward_ret_dict['rcnn_reg'].shape[-1]
reg_valid_mask = forward_ret_dict['reg_valid_mask'].view(-1)
gt_boxes3d_ct = forward_ret_dict['gt_of_rois'][..., 0:code_size]
rcnn_reg = forward_ret_dict['rcnn_reg'] # (rcnn_batch_size, C)
rcnn_batch_size = gt_boxes3d_ct.view(-1, code_size).shape[0]
fg_mask = (reg_valid_mask > 0)
fg_sum = fg_mask.long().sum().item()
tb_dict = {}
if loss_cfgs.REG_LOSS == 'L1':
reg_targets = gt_boxes3d_ct.view(rcnn_batch_size, -1)
rcnn_loss_reg = F.l1_loss(
rcnn_reg.view(rcnn_batch_size, -1),
reg_targets,
reduction='none'
) # [B, M, 7]
rcnn_loss_reg = rcnn_loss_reg * rcnn_loss_reg.new_tensor(\
loss_cfgs.LOSS_WEIGHTS['code_weights'])
rcnn_loss_reg = (rcnn_loss_reg.view(rcnn_batch_size, -1) * fg_mask.unsqueeze(dim=-1).float()).sum() / max(fg_sum, 1)
rcnn_loss_reg = rcnn_loss_reg * loss_cfgs.LOSS_WEIGHTS['rcnn_reg_weight']
tb_dict['rcnn_loss_reg'] = rcnn_loss_reg.detach()
else:
raise NotImplementedError
return rcnn_loss_reg, tb_dict
def get_box_cls_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg.LOSS_CONFIG
rcnn_cls = forward_ret_dict['rcnn_cls']
rcnn_cls_labels = forward_ret_dict['rcnn_cls_labels'].view(-1)
if loss_cfgs.CLS_LOSS == 'BinaryCrossEntropy':
rcnn_cls_flat = rcnn_cls.view(-1)
batch_loss_cls = F.binary_cross_entropy(torch.sigmoid(rcnn_cls_flat), rcnn_cls_labels.float(), reduction='none')
cls_valid_mask = (rcnn_cls_labels >= 0).float()
rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
elif loss_cfgs.CLS_LOSS == 'CrossEntropy':
batch_loss_cls = F.cross_entropy(rcnn_cls, rcnn_cls_labels, reduction='none', ignore_index=-1)
cls_valid_mask = (rcnn_cls_labels >= 0).float()
rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
else:
raise NotImplementedError
rcnn_loss_cls = rcnn_loss_cls * loss_cfgs.LOSS_WEIGHTS['rcnn_cls_weight']
tb_dict = {'rcnn_loss_cls': rcnn_loss_cls.detach()}
return rcnn_loss_cls, tb_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
rcnn_loss = 0
rcnn_loss_cls, cls_tb_dict = self.get_box_cls_layer_loss(self.forward_ret_dict)
rcnn_loss += rcnn_loss_cls
tb_dict.update(cls_tb_dict)
rcnn_loss_reg, reg_tb_dict = self.get_box_reg_layer_loss(self.forward_ret_dict)
rcnn_loss += rcnn_loss_reg
tb_dict.update(reg_tb_dict)
tb_dict['rcnn_loss'] = rcnn_loss.item()
return rcnn_loss, tb_dict
def generate_predicted_boxes(self, batch_size, rois, cls_preds, box_preds):
"""
Args:
batch_size:
rois: (B, N, 7)
cls_preds: (BN, num_class)
box_preds: (BN, code_size)
Returns:
"""
code_size = box_preds.shape[-1]
# batch_cls_preds: (B, N, num_class or 1)
batch_cls_preds = cls_preds.view(batch_size, -1, cls_preds.shape[-1])
batch_box_preds = box_preds.view(batch_size, -1, code_size)
roi_ry = rois[:, :, 6].view(-1)
roi_xyz = rois[:, :, 0:3].view(-1, 3)
local_rois = rois.clone().detach()
local_rois[:, :, 0:3] = 0
batch_box_preds = (batch_box_preds + local_rois).view(-1, code_size)
batch_box_preds = box_torch_ops.rotate_points_along_z(
batch_box_preds.unsqueeze(dim=1), roi_ry
).squeeze(dim=1)
batch_box_preds[:, 0:3] += roi_xyz
batch_box_preds = batch_box_preds.view(batch_size, -1, code_size)
return batch_cls_preds, batch_box_preds
|
nilq/baby-python
|
python
|
import typing
def printBinary(n: int) -> None:
if n > 1:
printBinary(n // 2)
print(n % 2, end = "")
def main() -> None:
N = int(input("Input an integer:\n"))
printBinary(N)
return None
main()
|
nilq/baby-python
|
python
|
# Authors: David Alexander, Lance Hepler
from __future__ import absolute_import, division, print_function
from GenomicConsensus.arrow.utils import allSingleBaseMutations
from GenomicConsensus.variants import Variant
from GenomicConsensus.quiver.diploid import variantsFromAlignment
import numpy as np
import ConsensusCore2 as cc
# IUPAC reference:
# http://www.bioinformatics.org/sms/iupac.html
_packIupac = { ("A", "G") : "R" ,
("G", "A") : "R" ,
("C", "T") : "Y" ,
("T", "C") : "Y" ,
("G", "C") : "S" ,
("C", "G") : "S" ,
("A", "T") : "W" ,
("T", "A") : "W" ,
("G", "T") : "K" ,
("T", "G") : "K" ,
("A", "C") : "M" ,
("C", "A") : "M" }
_unpackIupac = { "R" : ("A", "G") ,
"Y" : ("C", "T") ,
"S" : ("G", "C") ,
"W" : ("A", "T") ,
"K" : ("G", "T") ,
"M" : ("A", "C") }
def packIUPAC(bases):
return _packIupac[bases]
def unpackIUPAC(iupacCode):
return _unpackIupac[iupacCode]
def isHeterozygote(base):
return (base in _unpackIupac)
def packMuts(cssBase, mut1, mut2):
# Turn two muts (with same Start, End, LengthDiff) into a single mutation to
# IUPAC. The no-op mutation is coded as None.
#
# Example1: (_, Subs A, Subs T) -> Subs W
# Example2: (_, Ins A, Ins T) -> Ins W
# Example3: (A, None, Subs T) -> Subs W
#
nonNullMut = mut1 or mut2
start = nonNullMut.Start()
mutType = nonNullMut.Type()
newBase1 = mut1.Bases() if mut1 else cssBase
newBase2 = mut2.Bases() if mut2 else cssBase
newBasePacked = packIUPAC((newBase1, newBase2))
return cc.Mutation(mutType, start, newBasePacked)
def scoresForPosition(ai, pos):
muts = allSingleBaseMutations(str(ai), positions=[pos])
noMutScore = [0] * ai.NumReads()
mutScores_ = [ ai.ReadLLs(mut)
for mut in muts ]
mutScores = np.column_stack([noMutScore] + mutScores_).astype(np.float32)
return mutScores
def variantsFromConsensus(refWindow, refSequenceInWindow, cssSequenceInWindow,
cssQvInWindow=None, siteCoverage=None, aligner="affine",
ai=None):
"""
Compare the consensus and the reference in this window, returning
a list of variants.
Uses the integrator to identify heterozygous variants.
"""
assert (cssQvInWindow is None) == (siteCoverage is None) # Both or none
refId, refStart, refEnd = refWindow
if ai is not None:
#
# Hunting diploid variants:
# 1. find confident heterozygous sites;
# 2. build a "diploid consensus" using IUPAC encoding
# for het sites; mark cssQv accordingly
# 3. align diploid consensus to reference
# 4. extract and decorate variants
#
assert str(ai) == cssSequenceInWindow
iupacMutations = [] # List of (Mutation, confidence)
for pos in xrange(0, ai.Length()):
ds = cc.IsSiteHeterozygous(scoresForPosition(ai, pos), 40)
if ds:
muts = [None] + list(allSingleBaseMutations(cssSequenceInWindow, positions=[pos]))
mut0 = muts[ds.Allele0]
mut1 = muts[ds.Allele1]
cssBase = cssSequenceInWindow[pos]
packedMut = packMuts(cssBase, mut0, mut1)
iupacMutations.append((packedMut, 40))
# Create diploidCss by applying mutations, meanwhile updating the
# confidence vector accordingly.
diploidCss = cc.ApplyMutations([pair[0] for pair in iupacMutations],
cssSequenceInWindow)
diploidQv = list(cssQvInWindow) if cssQvInWindow is not None else None
runningLengthDiff = 0
for (mut, conf) in iupacMutations:
start = mut.Start() + runningLengthDiff
end = mut.End() + runningLengthDiff
diploidQv[start:end] = [conf]
assert len(diploidCss) == len(diploidQv)
cssSequenceInWindow = diploidCss
cssQvInWindow = diploidQv
vars = variantsFromAlignment(refWindow,
refSequenceInWindow, cssSequenceInWindow,
cssQvInWindow, siteCoverage)
return vars
|
nilq/baby-python
|
python
|
"""Tests for the /sessions/.../commands routes."""
import pytest
from datetime import datetime
from decoy import Decoy, matchers
from fastapi import FastAPI
from fastapi.testclient import TestClient
from httpx import AsyncClient
from typing import Callable, Awaitable
from tests.helpers import verify_response
from opentrons.protocol_engine import (
CommandStatus,
commands as pe_commands,
errors as pe_errors,
)
from robot_server.service.json_api import ResponseModel
from robot_server.sessions.session_models import BasicSession, SessionCommandSummary
from robot_server.sessions.engine_store import EngineStore
from robot_server.sessions.router.base_router import get_session as real_get_session
from robot_server.sessions.router.commands_router import (
commands_router,
CommandNotFound,
)
@pytest.fixture
def get_session(decoy: Decoy) -> Callable[..., Awaitable[ResponseModel]]:
"""Get a mock version of the get_session route handler."""
return decoy.mock(func=real_get_session)
@pytest.fixture(autouse=True)
def setup_app(
get_session: Callable[..., Awaitable[ResponseModel]],
app: FastAPI,
) -> None:
"""Setup the FastAPI app with commands routes and dependencies."""
app.dependency_overrides[real_get_session] = get_session
app.include_router(commands_router)
async def test_get_session_commands(
decoy: Decoy,
get_session: Callable[..., Awaitable[ResponseModel]],
async_client: AsyncClient,
) -> None:
"""It should return a list of all commands in a session."""
command_summary = SessionCommandSummary(
id="command-id",
commandType="moveToWell",
status=CommandStatus.RUNNING,
)
session_response = BasicSession(
id="session-id",
createdAt=datetime(year=2021, month=1, day=1),
actions=[],
commands=[command_summary],
)
decoy.when(
await get_session(
sessionId="session-id",
session_view=matchers.Anything(),
session_store=matchers.Anything(),
engine_store=matchers.Anything(),
),
).then_return(
ResponseModel(data=session_response) # type: ignore[arg-type]
)
response = await async_client.get("/sessions/session-id/commands")
verify_response(response, expected_status=200, expected_data=[command_summary])
def test_get_session_command_by_id(
decoy: Decoy,
engine_store: EngineStore,
client: TestClient,
) -> None:
"""It should return full details about a command by ID."""
command = pe_commands.MoveToWell(
id="command-id",
status=CommandStatus.RUNNING,
createdAt=datetime(year=2022, month=2, day=2),
data=pe_commands.MoveToWellData(pipetteId="a", labwareId="b", wellName="c"),
)
decoy.when(engine_store.engine.state_view.commands.get("command-id")).then_return(
command
)
response = client.get("/sessions/session-id/commands/command-id")
verify_response(response, expected_status=200, expected_data=command)
def test_get_session_command_missing_command(
decoy: Decoy,
engine_store: EngineStore,
client: TestClient,
) -> None:
"""It should 404 if you attempt to get a non-existent command."""
key_error = pe_errors.CommandDoesNotExistError("oh no")
decoy.when(engine_store.engine.state_view.commands.get("command-id")).then_raise(
key_error
)
response = client.get("/sessions/session-id/commands/command-id")
verify_response(
response,
expected_status=404,
expected_errors=CommandNotFound(detail=str(key_error)),
)
|
nilq/baby-python
|
python
|
import discord
from discord.ext import commands
import steam
from steam import WebAPI, SteamID
import keys
##TODO: convert to psql or something
import pickle
import asyncio
import os
import re
## Used to track the worst players in MM
class PlayerTracker(commands.Cog):
## Init
def __init__(self, bot):
self.bot = bot
self.fileLock = asyncio.Lock()
self.filePath = "{0}/trackerDB.pickle".format(os.getcwd())
self.loadDatabase()
self.urlRegex = r"\.com/players/(\d+)"
self.api = WebAPI(keys.STEAM_WEBAPI)
## Adds name and comments to the tracker
@commands.command(help="Add a player to the MM Tracker")
async def add(self, ctx, name : str, *, description : str):
print("adding {0} as {1}".format(name, description))
self.database.setdefault(name.lower(), {"name" : name, "description" : []})
self.database[name.lower()]["description"].append(description)
await self.saveDatabase()
await ctx.message.add_reaction('✅')
## searches the tracker for player matches based on query
## this is a rudimentary case insensitive substring search
@commands.command(help="Search the MM Tracker for players")
async def search(self, ctx, *, name : str):
nameL = name.lower()
out = []
async with self.fileLock:
for k in self.database:
if nameL in k:
out.append(self.database[k]["name"])
await ctx.send("\n".join(out))
## display a player and their comments
@commands.command(help="Display information about a specific player in the MM Tracker")
async def display(self, ctx, *, name : str):
async with self.fileLock:
if name.lower() in self.database:
user = self.database[name.lower()]
desc = " **-** {0}".format("\n **-** ".join(user["description"]))
embed = discord.Embed()
embed.color = discord.Colour.purple()
embed.title = user["name"]
embed.add_field(name="Comments", value=desc)
embed.set_footer(text="Added {0} times".format(len(user["description"])))
if("id" in user and not user["id"] is None):
await self.steamEmbed(embed, user["id"])
await ctx.send(embed=embed)
else:
await ctx.send("`{0}` not found".format(name))
async def steamEmbed(self, embed, steamId):
profile = await self.getProfile(steamId)
embed.set_thumbnail(url=profile["avatarfull"])
embed.url = profile["profileurl"]
embed.description = embed.title
embed.title = profile["personaname"]
##75419738
## associate an actual steam profile
@commands.command(help="Associate a steam profile with a given MM Tracker profile.\n <identifier> can be a Steam ID 32/64, Steam Profile url, or DotaBuff/Opendota link.")
async def addProfile(self, ctx, name : str, identifier : str):
if name.lower() in self.database:
user = self.database[name.lower()]
identifier = self.resolveIdentifier(identifier)
try:
self.api.ISteamUser.GetPlayerSummaries(steamids=identifier)
except:
await ctx.send("Error retrieving Steam profile.")
return
user["id"] = SteamID(identifier).as_64
await self.saveDatabase()
await ctx.message.add_reaction('✅')
else:
await ctx.send("`{0}` not found".format(name))
def resolveIdentifier(self, identifier):
m = re.search(self.urlRegex, identifier)
if(m):
return(int(m.group(1)))
ident = steam.steamid.steam64_from_url(identifier)
if(ident):
return(ident)
try:
int(identifier)
return(identifier)
except Exception as e:
pass
return
async def getProfile(self, steamId):
return(self.api.ISteamUser.GetPlayerSummaries(steamids=SteamID(steamId).as_64)["response"]["players"][0])
## Load the tracker database (TODO: convert to psql)
def loadDatabase(self):
self.database = {}
if(os.path.isfile(self.filePath)):
with open(self.filePath, "rb") as f:
self.database = pickle.load(f)
## Save the tracker database (TODO: convert to psql)
async def saveDatabase(self):
async with self.fileLock:
with open(self.filePath, "wb") as f:
pickle.dump(self.database, f)
def setup(bot):
bot.add_cog(PlayerTracker(bot))
|
nilq/baby-python
|
python
|
__all__ = ['ioc_api', 'ioc_common', 'ioc_et', 'xmlutils']
|
nilq/baby-python
|
python
|
import modin.pandas as pd
import swifter # Do not remove - this modified bindings for modin
import sys, os
import datetime
import csv
import random
import h5py
import numpy as np
import ipaddress
import datetime as datetime
def write_single_graph(f, graph_id, x, edge_index, y, attrs=None, **kwargs):
'''
store into hdf5 file
'''
f.create_dataset(f'{graph_id}/x', data=x, dtype = 'float32')
f.create_dataset(f'{graph_id}/edge_index', data=edge_index, dtype = 'int64')
f.create_dataset(f'{graph_id}/y', data=y, dtype = 'uint8')
for key in kwargs:
f.create_dataset(f'{graph_id}/{key}', data=kwargs[key])
if attrs is not None:
for key in attrs:
f[f'{graph_id}'].attrs[key] = attrs[key]
return None
def ip2int(ip):
'''
convert x.x.x.x into a number
'''
try:
ip = ip.split(',')[0]
ip = ipaddress.ip_address(ip)
ip = int(ip)
return ip
except:
return random.randint(0, 1<<32)
def search_dict(IP, IP_dict):
'''
use a dictionary to renumber the IPs into 0,1,2,...
'''
if IP not in IP_dict:
IP_dict[IP] = len(IP_dict)
return IP_dict[IP]
def prepare_background_(f, start_time, stop_time, NPARTS=30):
#read data
df = pd.read_csv(f, sep = '@')#, nrows = 10000)#
df.columns = ["time", "srcIP", "dstIP"]
# contains per-minute logs
#filter time
df['time'] = df['time'].swifter.set_npartitions(NPARTS).apply(lambda x: datetime.datetime.strptime(x[:21], "%b %d, %Y %H:%M:%S"))
if start_time is not None:
start_time_formated = datetime.datetime.strptime(start_time, "%Y%m%d%H%M%S")
df = df[ df.time >= start_time_formated]
if stop_time is not None:
stop_time_formated = datetime.datetime.strptime(stop_time, "%Y%m%d%H%M%S")
df = df[ df.time < stop_time_formated]
#transform time and IP address into formal type
df["srcIP"] = df["srcIP"].swifter.set_npartitions(NPARTS).apply(ip2int)
df["dstIP"] = df["dstIP"].swifter.set_npartitions(NPARTS).apply(ip2int)
#aggregate nodes according to /20, build dictionary
df['srcIP'] = df['srcIP'].swifter.set_npartitions(NPARTS).apply(lambda x: x >> 12)
df['dstIP'] = df['dstIP'].swifter.set_npartitions(NPARTS).apply(lambda x: x >> 12)
# Drop time column and get rid of duplicates
# Convert to pandas to drop (faster)
df = df._to_pandas()
df = df.drop(columns=['time'])
df = df.drop_duplicates()
# shared dictionary, using across threads will mess it up
#renumber into 0, 1, 2, ..
IP_dict = {}
df["srcIP"] = df["srcIP"].apply(lambda x : search_dict(x, IP_dict))
df["dstIP"] = df["dstIP"].apply(lambda x : search_dict(x, IP_dict))
#write into h5py files
num_nodes = len(IP_dict)
num_edges = df.shape[0]
edge_index = np.array(df[["srcIP", "dstIP"]]).T
return edge_index, num_nodes, num_edges
def prepare_background(f, dst_dir, dst_name, graph_id, start_time, stop_time):
'''
Transform txt files into standard hdf5 format
arg = [txt_file_name, subgroup of graphs]
'''
edge_index, num_nodes, num_edges = prepare_background_(f, start_time, stop_time)
f_h5py = h5py.File(os.path.join(dst_dir,dst_name), 'a')
write_single_graph(f_h5py,
graph_id = graph_id,
x = np.ones([num_nodes, 1]),
edge_index = edge_index,
y = np.zeros(num_nodes),
attrs={'num_nodes': num_nodes, 'num_edges': num_edges, 'num_evils':0})
f_h5py.close()
if __name__ == '__main__':
# prepare_background('equinix-nyc.dirA.20181220-131256.UTC.anon.pcap', '.', 'tmp.hdf5', 0, '20181220081256', '20181220081257')
prepare_background('/p/adversarialml/as9rw/datasets/raw_botnet/temp.tmp',
'/p/adversarialml/as9rw/datasets/raw_botnet', 'tmp.hdf5', 0, None, None)
|
nilq/baby-python
|
python
|
# Class to generate batches of image data to be fed to model
# inclusive of both original data and augmented data
# https://gist.github.com/devxpy/a73744bab1b77a79bcad553cbe589493
# example
# train_gen = PersonDataGenerator(
# train_df,
# batch_size=32,
# aug_list=[
# ImageDataGenerator(rotation_range=45),
# ImageDataGenerator(horizontal_flip=True),
# ImageDataGenerator(vertical_flip=True),
# ],
# incl_orig=True, # Whether to include original images
# )
from __future__ import division
import os
import os.path as path
import numpy as np
import keras
import cv2
from keras.preprocessing.image import ImageDataGenerator, img_to_array
class PersonDataGenerator(keras.utils.Sequence):
def __init__(self, df, batch_size=32, shuffle=True, aug_list=[], incl_orig=True):
""" Ground truth data batch generator """
self.df = df
self.batch_size=batch_size
self.shuffle = shuffle
self.on_epoch_end()
self.aug_list = aug_list
self.incl_orig = incl_orig
self.orig_len = int(np.floor(self.df.shape[0] / self.batch_size))
# Label columns per attribute
self._gender_cols_ = [col for col in df.columns if col.startswith("gender")]
self._imagequality_cols_ = [col for col in df.columns if col.startswith("imagequality")]
self._age_cols_ = [col for col in df.columns if col.startswith("age")]
self._weight_cols_ = [col for col in df.columns if col.startswith("weight")]
self._carryingbag_cols_ = [col for col in df.columns if col.startswith("carryingbag")]
self._footwear_cols_ = [col for col in df.columns if col.startswith("footwear")]
self._emotion_cols_ = [col for col in df.columns if col.startswith("emotion")]
self._bodypose_cols_ = [col for col in df.columns if col.startswith("bodypose")]
def __len__(self):
"""
Number of batches in the Sequence(i.e per epoch).
"""
if self.incl_orig:
delta = 1
else:
delta = 0
return self.orig_len * (len(self.aug_list) + delta)
def __getitem__(self, index):
"""
Gets batches of images - generates sets of images
based on augementation strategies, can include
original images as well - Original images will be
rescaled while generating batches
fetch batches of image data and targets
"""
if not self.incl_orig :
index += self.orig_len - 1
if index > self.orig_len - 1:
aug = self.aug_list[index // self.orig_len - 1]
index %= self.orig_len
else:
aug = None
batch_slice = slice(index * self.batch_size, (index + 1) * self.batch_size)
items = self.df.iloc[batch_slice]
images = np.stack([cv2.imread(item["image_path"]) for _, item in items.iterrows()])
if aug is not None:
images = aug.flow(images, shuffle=False).next()
target = {
"gender_output" : items[self._gender_cols_].values,
"image_quality_output" : items[self._imagequality_cols_].values,
"age_output" : items[self._age_cols_].values,
"weight_output" : items[self._weight_cols_].values,
"bag_output" : items[self._carryingbag_cols_].values,
"pose_output" : items[self._bodypose_cols_].values,
"footwear_output" : items[self._footwear_cols_].values,
"emotion_output" : items[self._emotion_cols_].values,
}
return images, target
def on_epoch_end(self):
"""
Shuffles/sample the df and thereby
updates indexes after each epoch
Method called at the end of every epoch.
"""
if self.shuffle == True:
self.df = self.df.sample(frac=1).reset_index(drop=True)
|
nilq/baby-python
|
python
|
from ._abstract import AbstractSearcher
from ._result import RecipeLink, SearchResult
import urllib.parse
from typing import List
class NyTimes(AbstractSearcher):
def __init__(self):
AbstractSearcher.__init__(self)
@classmethod
def host(cls):
return "https://cooking.nytimes.com"
def build_url(self, keyword, index):
query = urllib.parse.quote_plus(keyword)
return f'https://cooking.nytimes.com/search?q={query}&page={index}'
def parse_results(self, soup) -> List[RecipeLink]:
# Simple HTML lookups.
recipes = soup.find_all('article', class_='recipe-card')
results : List[RecipeLink] = []
for recipe in recipes:
title_block = recipe.find('div', class_='card-info-wrapper').find('a', class_='card-link')
link = self.parse_link(title_block.get('href'))
title = title_block.find('h3').string
results.append(RecipeLink(title.strip(), link, self.host()))
return results
|
nilq/baby-python
|
python
|
import json
import requests
import time
import hmac
import hashlib
from requests.exceptions import HTTPError
SDK_VERSION = '1.0.0'
CLOCK_DRIFT = 300
class HTTPClient(object):
def request(self, method, url, headers, data=None, auth=None):
raise NotImplementedError('subclass must implement request')
class RequestsClient(HTTPClient):
def request(self, method, url, headers, data=None, auth=None):
return requests.request(method, url, headers=headers, data=data, auth=auth)
class UnexpectedResponse(Exception):
def __init__(self, status, reason, message):
self.status = status
self.reason = reason
self.message = message
@staticmethod
def from_response(data):
return UnexpectedResponse(data.get('status'), data.get('reason'), data.get('message'))
class HostedTransactionResponse(object):
def __init__(self, tokens, hosted_url):
self.tokens = tokens
self.hosted_url = hosted_url
class Tokens(object):
def __init__(self, refresh_token, access_token, client_token, expiry, transaction_id, response):
self.access_token = access_token
self.client_token = client_token
self.refresh_token = refresh_token
self.expiry = expiry
self.transaction_id = transaction_id
self.response = response
def refresh(self, access_token, client_token, expiry, transaction_id):
self.access_token = access_token
self.client_token = client_token
self.expiry = expiry
self.transaction_id = transaction_id
def needs_refresh(self):
return self.access_token is None or self.expiry is None or self.expiry < time.time()
@staticmethod
def from_refresh(refresh_token):
return Tokens(refresh_token, None, None, None, None, None)
class Client(object):
def __init__(self, api_secret=None, **kwargs):
self.api_secret = api_secret
self.api_host = kwargs.get('api_host', 'https://api.berbix.com')
self.http_client = kwargs.get('http_client', RequestsClient())
if self.api_secret is None:
self.api_secret = kwargs.get('client_secret')
if self.api_secret is None:
raise ValueError(
'api_secret must be provided when instantiating the Berbix client')
def __fetch_tokens(self, path, payload):
try:
headers = {
'Content-Type': 'application/json',
'User-Agent': 'BerbixPython/' + SDK_VERSION,
}
result = self.http_client.request(
method='POST',
url='{}{}'.format(self.api_host, path),
headers=headers,
data=json.dumps(payload),
auth=(self.api_secret, ''))
if result.status_code < 200 or result.status_code >= 300:
raise UnexpectedResponse.from_response(
json.loads(result.content))
data = json.loads(result.content)
return Tokens(
data.get('refresh_token'),
data.get('access_token'),
data.get('client_token'),
data.get('expires_in') + time.time(),
data.get('transaction_id'),
data)
except HTTPError as err:
raise err
def create_transaction(self, **kwargs):
payload = {}
if 'email' in kwargs:
payload['email'] = kwargs['email']
if 'phone' in kwargs:
payload['phone'] = kwargs['phone']
if 'customer_uid' in kwargs:
payload['customer_uid'] = str(kwargs['customer_uid'])
else:
raise ValueError(
'customer_uid must be provided when creating a transaction')
if 'template_key' in kwargs:
payload['template_key'] = kwargs['template_key']
else:
raise ValueError(
'template_key must be provided when creating a transaction')
if 'hosted_options' in kwargs:
payload['hosted_options'] = kwargs['hosted_options']
return self.__fetch_tokens('/v0/transactions', payload)
def create_hosted_transaction(self, **kwargs):
if 'hosted_options' not in kwargs:
kwargs['hosted_options'] = {}
tokens = self.__fetch_tokens('/v0/transactions', kwargs)
return HostedTransactionResponse(tokens, tokens.response['hosted_url'])
def refresh_tokens(self, tokens):
return self.__fetch_tokens('/v0/tokens', {
'refresh_token': tokens.refresh_token,
'grant_type': 'refresh_token',
})
def refresh_if_necessary(self, tokens):
if tokens.needs_refresh():
refreshed = self.refresh_tokens(tokens)
tokens.refresh(refreshed.access_token, refreshed.client_token,
refreshed.expiry, refreshed.transaction_id)
def __token_auth_request(self, method, tokens, path, payload=None):
self.refresh_if_necessary(tokens)
try:
headers = {
'Authorization': 'Bearer {0}'.format(tokens.access_token),
'User-Agent': 'BerbixPython/' + SDK_VERSION,
}
data = None
if payload != None:
data = json.dumps(payload)
headers["Content-Type"] = "application/json"
result = self.http_client.request(
method=method,
url='{}{}'.format(self.api_host, path),
headers=headers,
data=data)
if result.status_code < 200 or result.status_code >= 300:
raise UnexpectedResponse.from_response(
json.loads(result.content))
elif result.status_code == 204:
return
return json.loads(result.content)
except HTTPError as err:
raise err
def fetch_transaction(self, tokens):
return self.__token_auth_request('GET', tokens, '/v0/transactions')
def delete_transaction(self, tokens):
return self.__token_auth_request('DELETE', tokens, '/v0/transactions')
def update_transaction(self, tokens, **kwargs):
payload = {}
if 'action' in kwargs:
payload['action'] = kwargs['action']
if 'note' in kwargs:
payload['note'] = kwargs['note']
return self.__token_auth_request('PATCH', tokens, '/v0/transactions', payload)
def override_transaction(self, tokens, **kwargs):
payload = {}
if 'response_payload' in kwargs:
payload['response_payload'] = kwargs['response_payload']
if 'flags' in kwargs:
payload['flags'] = kwargs['flags']
if 'override_fields' in kwargs:
payload['override_fields'] = kwargs['override_fields']
return self.__token_auth_request('PATCH', tokens, '/v0/transactions/override', payload)
def validate_signature(self, secret, body, header):
parts = header.split(',')
# Version (parts[0]) is currently unused
timestamp = parts[1]
signature = parts[2]
if int(timestamp) < time.time() - CLOCK_DRIFT:
return False
message = '{},{},{}'.format(timestamp, secret, body).encode('ascii')
digest = hmac.new(
str.encode(secret),
msg=message,
digestmod=hashlib.sha256
).hexdigest()
return digest == signature
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import subprocess
if subprocess.Popen(['./problem']).wait() != 0:
print("Wow, you\'ve crushed this")
flagfile = open('flag')
if not flagfile:
print("Flag is missing, tell admin")
else:
print(flagfile.read())
|
nilq/baby-python
|
python
|
"""
Sudo2 is for Loomgild.py
"""
import time
from time import sleep
# Command Functions
def help():
print("Hello!")
print("Welcome to Loomgild, a script that imitates a command line.")
print("This is one of the few commands that you can use.")
print("We will now load the commands you can use..")
print("\n")
sleep(2.00)
print("Sys commands: exit")
print("Core commands: none")
print("Utility commands: help")
print("Misc commands: none")
def output():
prompt1 = input("Please provide the input to output: ")
print(prompt1)
def output_h():
print("Description: A command that outputs the user's input.")
print("Usage: output")
print("Tags: input, interactive")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script to hold temporary routines created during the beamtime.
Everything added here is star (*) imported into the IPython shell after the
``SplitAndDelay`` object has succesfully instantiated. Therefore, it is
recommended to run the specific unit-test to quickly ensure your inserted code
is syntactically correct. More specifically, it will test if this script is
importable. Of course this will not guarantee that the code works as intended,
but it will pick up on any 'easy' mistakes, like a mismatched parenthesi. To
run the test, in the top level directory, first source the snd environment:
source snd_env.sh
Then run the pytest script with the following command:
python run_tests.py hxrsnd/tests/test_scripts.py
The script will run (at least) one test and if your code was written correctly,
it will pass.
"""
# Imports from the Python standard library go here
import logging
# Imports from the third-party modules go here
import numpy as np
from ophyd import Component as Cmp
from ophyd import Device, EpicsSignal
from ophyd.sim import hw
from ophyd.status import wait as status_wait
# Imports from the HXRSnD module go here
import snd_devices
# Imports from other SLAC modules go here
# Default logger
logger = logging.getLogger(__name__)
###############################################################################
# Good Design Practices #
###############################################################################
# # Replace all print() statements with logger.info() statements # #
###############################################################################
# The Main reason for this is the IPython shell will log everything you log in
# log files IFF you use the logger methods, while also printing to the console.
# Even better, is if you include various logger levels. To use the logger,
# simply make the following substitution:
# print("text") --> logger.info("text")
# It is that simple, that the message will now be archived in the info level
# (HXRSnD/logs/info.log) and debug level (HXRSnD/logs/debug.log) log files.
# # Leave Comments # #
###############################################################################
# This seems like it may not be that important, but the purpose of this file is
# to temporarily hold scripts developed during beamtime to then be migrated by
# us (PCDS) into the module. By leaving comments, you make it easier for
# everyone to understand what the code is doing.
###############################################################################
# Insert Code Below #
###############################################################################
hw = hw() # Fake hardware for testing
fake_motor = hw.motor
class NotepadScanStatus(Device):
istep = Cmp(EpicsSignal, ":ISTEP")
isscan = Cmp(EpicsSignal, ":ISSCAN")
nshots = Cmp(EpicsSignal, ":NSHOTS")
nsteps = Cmp(EpicsSignal, ":NSTEPS")
var0 = Cmp(EpicsSignal, ":SCANVAR00")
var1 = Cmp(EpicsSignal, ":SCANVAR01")
var2 = Cmp(EpicsSignal, ":SCANVAR02")
var0_max = Cmp(EpicsSignal, ":MAX00")
var1_max = Cmp(EpicsSignal, ":MAX01")
var2_max = Cmp(EpicsSignal, ":MAX02")
var0_min = Cmp(EpicsSignal, ":MIN00")
var1_min = Cmp(EpicsSignal, ":MIN01")
var2_min = Cmp(EpicsSignal, ":MIN02")
def clean_fields(self):
for sig_name in self.signal_names:
sig = getattr(self, sig_name)
val = sig.value
if isinstance(val, (int, float)):
sig.put(0)
elif isinstance(val, str):
sig.put('')
notepad_scan_status = NotepadScanStatus('XCS:SCAN', name='xcs_scan_status')
def ascan(motor, start, stop, num, events_per_point=360, record=False,
controls=None, **kwargs):
"""
Quick re-implementation of old python for the transition
"""
daq = snd_devices.daq
events = events_per_point
status = notepad_scan_status
status.clean_fields()
if controls is None:
controls = {}
start_pos = motor.position
def get_controls(motor, extra_controls):
out_arr = {motor.name: motor}
out_arr.update(extra_controls)
return out_arr
try:
scan_controls = get_controls(motor, controls)
daq.configure(record=record, controls=scan_controls)
status.isscan.put(1)
status.nshots.put(events_per_point)
status.nsteps.put(num)
status.var0.put(motor.name)
status.var0_max.put(max((start, stop)))
status.var0_min.put(min((start, stop)))
for i, step in enumerate(np.linspace(start, stop, num)):
logger.info('Beginning step {}'.format(step))
try:
mstat = motor.set(step, verify_move=False, **kwargs)
except TypeError:
mstat = motor.set(step, **kwargs)
status.istep.put(i)
status_wait(mstat)
scan_controls = get_controls(motor, controls)
daq.begin(events=events, controls=scan_controls)
logger.info('Waiting for {} events ...'.format(events))
daq.wait()
finally:
logger.info('DONE!')
status.clean_fields()
daq.end_run()
daq.disconnect()
try:
motor.set(start_pos, verify_move=False, **kwargs)
except TypeError:
motor.set(start_pos, **kwargs)
|
nilq/baby-python
|
python
|
from random import randint
while True:
print("----------\n[j] para jogar o dado\n[e] para fechar")
res = str(input().replace(" ", "").lower())
if res == "j":
print(f"\nvalor do dado: {randint(1, 6)}")
if res == "e":
break
print("----------\n")
|
nilq/baby-python
|
python
|
import contextlib
import logging
import os
from pathlib import Path
import shutil
from subprocess import CalledProcessError, check_output
import sys
from tempfile import NamedTemporaryFile
from typing import cast
from spython.main import Client
from lm_zoo import errors
from lm_zoo.backends import ContainerBackend
from lm_zoo.constants import STATUS_CODES
from lm_zoo.models import Model, SingularityModel
L = logging.getLogger(__name__)
@contextlib.contextmanager
def modified_environ(*remove, **update):
"""
Temporarily updates the ``os.environ`` dictionary in-place.
The ``os.environ`` dictionary is updated in-place so that the modification
is sure to work in all situations.
:param remove: Environment variables to remove.
:param update: Dictionary of environment variables and values to add/update.
"""
# https://stackoverflow.com/a/34333710/176075
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
def is_cuda_available():
"""
Hacky method to check whether CUDA is available for use on this host.
"""
if shutil.which("nvidia-smi") is None:
return False
try:
output = check_output(["nvidia-smi", "-L"])
has_gpus = bool(output.strip())
return has_gpus
except CalledProcessError:
return False
class SingularityBackend(ContainerBackend):
@classmethod
def is_compatible(cls, model):
return len(set(model.platforms) & {"singularity", "shub", "library"}) > 0
def image_exists(self, model):
# TODO library, shub
result = Client.inspect(model.reference)
if result.get("return_code", 0) != 0:
return False
return True
def pull_image(self, model, progress_stream=sys.stderr):
if len(set(model.platforms) & {"shub", "library"}) == 0:
if "singularity" in model.platforms:
# It's a local image. Just check that it exists, and raise if
# not.
if not self.image_exists(model):
raise ValueError("Could not find local Singularity image at %s" % (model.reference,))
else:
raise ValueError("Only know how to pull from shub:// and library://"
" . This Singularity model does not come from "
"either repository.")
return Client.pull(image="%s://%s" % (model.repository, model.reference))
def run_command(self, model: Model, command_str,
mounts=None, environment=None,
stdin=None, stdout=sys.stdout, stderr=sys.stderr,
raise_errors=True):
model = cast(SingularityModel, model)
if mounts is None:
mounts = []
if environment is None:
environment = {}
# Support custom checkpoint loading
if model.checkpoint is not None:
host_checkpoint_path = Path(model.checkpoint).absolute()
# Mount given checkpoint read-only within the guest
guest_checkpoint_path = "/opt/lmzoo_checkpoint"
mounts.append((host_checkpoint_path, guest_checkpoint_path, "ro"))
# Update relevant environment variable
environment["LMZOO_CHECKPOINT_PATH"] = guest_checkpoint_path
binds = ["%s:%s:%s" % (host, guest, mode)
for host, guest, mode in mounts]
# TODO make configurable
nv = is_cuda_available()
command = command_str.split(" ")
if stdin is not None:
stdin_f = NamedTemporaryFile("w")
stdin_f.write(stdin.read())
stdin_f.flush()
binds.append("%s:/host_stdin:ro" % stdin_f.name)
command = ["sh", "-c", 'cat /host_stdin | %s' % " ".join(command)]
# TODO no separate stderr support :( manually reroute stderr for now
command.append("2>/dev/null")
# Prepare environment variables for export
environment = {"SINGULARITYENV_%s" % key: value
for key, value in environment.items()}
try:
with modified_environ(**environment):
exec_options = []
# Maximally isolate container from host -- this resolves some
# parallel execution issues we've observed in the past.
exec_options.append("--containall")
result = Client.execute(image=model.reference, command=command,
nv=nv, bind=binds, stream=True,
options=exec_options)
for line in result:
stdout.write(line)
except CalledProcessError as e:
if raise_errors:
if e.returncode == STATUS_CODES["unsupported_feature"]:
feature = command_str.split(" ")[0]
raise errors.UnsupportedFeatureError(feature=feature,
model=str(model))
else:
raise
if stdin is not None:
stdin_f.close()
return result
|
nilq/baby-python
|
python
|
#importamos la libreria forms:
from django import forms
#creamos una lista de las posibles opciones del select:
from .pqrsf import PQRSF_CHOICES
#creamos la estructura dl formulario
class ContactFrom(forms.Form):
"""creamos los campos"""
email = forms.EmailField(label="correo electrónico", widget=forms.EmailInput(attrs={'class':'form-control'}), required=True)
tipom = forms.ChoiseField(choices = PQRSF_CHOICES, label="Tipo de atencion requerida", initial='', widget=forms.Select(attrs={'class':'form-control'}), required=True)
nombre = forms.CharField(label="nombre", required=True, widget=forms.TextInput(attrs={'class':'form-control'}))
msj = forms.CharField(label="Mensaje", widget=forms.Textarea(attrs={'class':'form-control', 'rows':'3'}), required=True)
|
nilq/baby-python
|
python
|
def test_load(session, inline):
inline("PASS")
|
nilq/baby-python
|
python
|
TAG_MAP = {
('landuse', 'forest'): {"TYPE": "forest", "DRAW_TYPE": "plane"},
('natural', 'wood'): {"TYPE": "forest", "SUBTYPE": "natural", "DRAW_TYPE": "plane"}
}
def find_type(tags):
keys = list(tags.items())
return [TAG_MAP[key] for key in keys if key in TAG_MAP]
|
nilq/baby-python
|
python
|
from twilio.twiml.voice_response import VoiceResponse, Dial
def generate_wait():
twiml_response = VoiceResponse()
wait_message = (
'Thank you for calling. Please wait in line for a few seconds.'
' An agent will be with you shortly.'
)
wait_music = 'http://com.twilio.music.classical.s3.amazonaws.com/BusyStrings.mp3'
twiml_response.say(wait_message)
twiml_response.play(wait_music)
return str(twiml_response)
def generate_connect_conference(call_sid, wait_url, start_on_enter, end_on_exit):
twiml_response = VoiceResponse()
dial = Dial()
dial.conference(
call_sid,
start_conference_on_enter=start_on_enter,
end_conference_on_exit=end_on_exit,
wait_url=wait_url,
)
return str(twiml_response.append(dial))
|
nilq/baby-python
|
python
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from helper import find_csv
grid = plt.GridSpec(2, 2, wspace=0.4, hspace=0.3)
ax1 = plt.subplot(grid[0,0])
ax2= plt.subplot(grid[0,1])
ax3= plt.subplot(grid[1,:])
for i in find_csv():
df = pd.read_csv(i,header=None)
df_forward = df[:int(len(df)/2)]
forward_peak = df_forward[0].iloc[df_forward[1].idxmin()]
df_backward = df[int(len(df)/2):]
df_backward = df_backward.reset_index(drop=True) #Use drop to discard the old index
backward_peak = df_backward[0].iloc[df_backward[1].idxmax()]
#ax1.axvline(x=forward_peak,alpha=0.7)
#ax2.axvline(x=backward_peak,alpha=0.7)
df_forward.plot(x=0,y=1, ax=ax1)
df_backward.plot(x=0,y=1,ax=ax2)
df.plot(x=0,y=1,ax=ax3)
ax1.legend(find_csv(),loc=0)
ax2.legend(find_csv(),loc=0)
ax3.legend(find_csv(),loc=0)
ax1.set_title('Forward scan')
ax2.set_title('Reverse Scan')
ax3.set_title('CV')
plt.show()
|
nilq/baby-python
|
python
|
import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class SetMaterialRgbParamNode(Node, ArmLogicTreeNode):
'''Set material rgb param node'''
bl_idname = 'LNSetMaterialRgbParamNode'
bl_label = 'Set Material RGB Param'
bl_icon = 'GAME'
def init(self, context):
self.inputs.new('ArmNodeSocketAction', 'In')
self.inputs.new('NodeSocketShader', 'Material')
self.inputs.new('NodeSocketString', 'Node')
self.inputs.new('NodeSocketColor', 'Color')
self.outputs.new('ArmNodeSocketAction', 'Out')
add_node(SetMaterialRgbParamNode, category='Action')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# encoding: utf-8
from smisk.mvc import *
from smisk.serialization import data
import datetime, time
# Importing the serializers causes them to be registered
import my_xml_serializer
import my_text_serializer
# Some demo data
DEMO_STRUCT = dict(
string = "Doodah",
items = ["A", "B", 12, 32.1, [1, 2, 3]],
float = 0.1,
integer = 728,
dict = dict(
str = "<hello & hi there!>",
unicode = u'M\xe4ssig, Ma\xdf',
true_value = True,
false_value = False,
),
data = data("<binary gunk>"),
more_data = data("<lots of binary gunk>" * 10),
date = datetime.datetime.fromtimestamp(time.mktime(time.gmtime())),
)
# Our controller tree
class root(Controller):
def __call__(self, *args, **params):
'''Return some data
'''
return DEMO_STRUCT
def echo(self, *va, **kw):
'''Returns the structure received
'''
if not kw and va:
kw['arguments'] = va
return kw
if __name__ == '__main__':
from smisk.config import config
config.loads('"logging": {"levels":{"":DEBUG}}')
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""Stp - Stock Patterns
Usage: stp_mgr
stp_mgr insider
"""
from docopt import docopt
import stp
print(stp.__file__)
from stp import feed
from stp.feed.insidertrading import data
import sys
def insider():
records = data.get_records()
for record in records:
print(record)
def main():
args = docopt(__doc__)
if args["insider"]:
insider()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# --------------------------------------------------------
# CRPN
# Written by Linjie Deng
# --------------------------------------------------------
import yaml
import caffe
import numpy as np
from fast_rcnn.config import cfg
from fast_rcnn.nms_wrapper import nms
from quad.quad_convert import whctrs, mkanchors, quad_2_aabb, obb_2_quad, dual_roi
from quad.quad_2_obb import quad_2_obb
DEBUG = False
class Corner(object):
# Corner property
def __init__(self, name):
self.name = name
# position
self.pos = None
# probability
self.prb = None
# class of link direction
self.cls = None
class ProposalLayer(caffe.Layer):
# Corner-based Region Proposal Network
# Input: prob map of each corner
# Output: quadrilateral region proposals
def setup(self, bottom, top):
# top: (ind, x1, y1, x2, y2, x3, y3, x4, y4)
layer_params = yaml.load(self.param_str)
self._feat_stride = layer_params['feat_stride']
num_rois = 2 if cfg.DUAL_ROI else 1
top[0].reshape(num_rois, 9)
if len(top) > 1:
top[1].reshape(num_rois, 5)
def forward(self, bottom, top):
# params
cfg_key = self.phase # either 'TRAIN' or 'TEST'
if cfg_key == 0:
cfg_ = cfg.TRAIN
else:
cfg_ = cfg.TEST
# corner params
pt_thres = cfg_.PT_THRESH
pt_max_num = cfg.PT_MAX_NUM
pt_nms_range = cfg.PT_NMS_RANGE
pt_nms_thres = cfg.PT_NMS_THRESH
# proposal params
ld_interval = cfg.LD_INTERVAL
ld_um_thres = cfg.LD_UM_THRESH
# rpn params
# min_size = cfg_.RPN_MIN_SIZE
nms_thresh = cfg_.RPN_NMS_THRESH
pre_nms_topN = cfg_.RPN_PRE_NMS_TOP_N
post_nms_topN = cfg_.RPN_POST_NMS_TOP_N
im_info = bottom[0].data[0, :]
score_tl = bottom[1].data[0, :].transpose((1, 2, 0))
score_tr = bottom[2].data[0, :].transpose((1, 2, 0))
score_br = bottom[3].data[0, :].transpose((1, 2, 0))
score_bl = bottom[4].data[0, :].transpose((1, 2, 0))
scores = np.concatenate([score_tl[:, :, :, np.newaxis],
score_tr[:, :, :, np.newaxis],
score_br[:, :, :, np.newaxis],
score_bl[:, :, :, np.newaxis]], axis=3)
map_info = scores.shape[:2]
# 1. sample corner candidates from prob maps
tl, tr, br, bl = _corner_sampling(scores, pt_thres, pt_max_num, pt_nms_range, pt_nms_thres)
# 2. assemble corner candidates into proposals
proposals = _proposal_sampling(tl, tr, br, bl, map_info, ld_interval, ld_um_thres)
# 3. filter
proposals = filter_quads(proposals)
scores = proposals[:, 8]
proposals = proposals[:, :8]
# 3. rescale quads into raw image space
proposals = proposals * self._feat_stride
# 4. quadrilateral non-max surpression
order = scores.ravel().argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
keep = nms(np.hstack((proposals, scores[:, np.newaxis])).astype(np.float32, copy=False), nms_thresh)
proposals = proposals[keep, :]
scores = scores[keep]
if post_nms_topN > 0:
proposals = proposals[:post_nms_topN, :]
scores = scores[:post_nms_topN]
if proposals.shape[0] == 0:
# add whole image to avoid error
print 'NO PROPOSALS!'
proposals = np.array([[0, 0, im_info[1], 0, im_info[1], im_info[0], 0, im_info[0]]])
scores = np.array([0.0])
# output
# top[0]: quads(x1, y1, x2, y2, x3, y3, x4, y4)
# top[1]: rois(xmin, ymin, xmax, ymax, theta)
# top[2]: scores
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
top[0].reshape(*blob.shape)
top[0].data[...] = blob
if len(top) > 1:
if cfg.DUAL_ROI:
rois = quad_2_obb(np.array(proposals, dtype=np.float32))
rois = dual_roi(rois)
else:
rois = quad_2_obb(np.array(proposals, dtype=np.float32))
batch_inds = np.zeros((rois.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, rois.astype(np.float32, copy=False)))
top[1].reshape(*blob.shape)
top[1].data[...] = blob
if len(top) > 2:
scores = np.vstack((scores, scores)).transpose()
top[2].reshape(*scores.shape)
top[2].data[...] = scores
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def _map_2_corner(pred_map, thresh, max_num, nms_range, nms_thres):
pos_map = 1 - pred_map[:, :, 0]
pts_cls = np.argmax(pred_map[:, :, 1:], 2) + 1
ctr_y, ctr_x = np.where(pos_map >= thresh)
ctr_pts = np.vstack((ctr_x, ctr_y)).transpose()
ws = np.ones(ctr_x.shape) * nms_range
hs = np.ones(ctr_y.shape) * nms_range
anchors = np.hstack((mkanchors(ws, hs, ctr_x, ctr_y), get_value(ctr_pts, pos_map)))
keep = nms(anchors, nms_thres)
if max_num > 0:
keep = keep[:max_num]
pos = ctr_pts[keep, :]
prb = pos_map
cls = pts_cls
return pos, prb, cls
def _corner_sampling(maps, thresh, max_num, nms_range, nms_thres):
tl = Corner('top_left')
tl.pos, tl.prb, tl.cls = _map_2_corner(maps[:, :, :, 0], thresh, max_num, nms_range, nms_thres)
tr = Corner('top_right')
tr.pos, tr.prb, tr.cls = _map_2_corner(maps[:, :, :, 1], thresh, max_num, nms_range, nms_thres)
br = Corner('bot_right')
br.pos, br.prb, br.cls = _map_2_corner(maps[:, :, :, 2], thresh, max_num, nms_range, nms_thres)
bl = Corner('bot_left')
bl.pos, bl.prb, bl.cls = _map_2_corner(maps[:, :, :, 3], thresh, max_num, nms_range, nms_thres)
return tl, tr, br, bl
def _gen_diags(a, b, theta_invl=15, max_diff=1):
max_label = round(360.0 / theta_invl)
idx_a = np.arange(0, a.pos.shape[0])
idx_b = np.arange(0, b.pos.shape[0])
idx_a, idx_b = np.meshgrid(idx_a, idx_b)
idx_a = idx_a.ravel()
idx_b = idx_b.ravel()
diag_pos = np.hstack((a.pos[idx_a, :], b.pos[idx_b, :]))
#
keep = np.where((diag_pos[:, 0] != diag_pos[:, 2]) | (diag_pos[:, 1] != diag_pos[:, 3]))[0]
diag_pos = diag_pos[keep, :]
prac_label = compute_link(diag_pos[:, 0:2], diag_pos[:, 2:4], theta_invl)
pred_label = get_value(diag_pos[:, 0:2], a.cls)
diff_label_a = diff_link(prac_label, pred_label, max_label)
#
prac_label = np.mod(prac_label + max_label / 2, max_label)
pred_label = get_value(diag_pos[:, 2:4], b.cls)
diff_label_b = diff_link(prac_label, pred_label, max_label)
keep = np.where((diff_label_a <= max_diff) & (diff_label_b <= max_diff))[0]
diag_pos = diag_pos[keep, :]
diag_prb = np.hstack((get_value(diag_pos[:, 0:2], a.prb), get_value(diag_pos[:, 2:4], b.prb)))
return diag_pos, diag_prb
def _gen_trias(diag_pos, diag_prb, c, theta_invl=15, max_diff=1):
max_label = 360 / theta_invl
idx_a = np.arange(0, diag_pos.shape[0])
idx_b = np.arange(0, c.pos.shape[0])
idx_a, idx_b = np.meshgrid(idx_a, idx_b)
idx_a = idx_a.ravel()
idx_b = idx_b.ravel()
tria_pos = np.hstack((diag_pos[idx_a, :], c.pos[idx_b, :]))
tria_prb = np.hstack((diag_prb[idx_a, :], get_value(c.pos[idx_b, :], c.prb)))
#
areas = compute_tria_area(tria_pos[:, 0:2], tria_pos[:, 2:4], tria_pos[:, 4:6])
keep = np.where(areas != 0)[0]
tria_pos = tria_pos[keep, :]
tria_prb = tria_prb[keep, :]
ws, hs, ctr_x, ctr_y = whctrs(tria_pos[:, 0:4])
prac_theta = compute_theta(tria_pos[:, 4:6], np.vstack((ctr_x, ctr_y)).transpose())
prac_label = np.floor(prac_theta / theta_invl) + 1
pred_label = get_value(tria_pos[:, 4:6], c.cls)
diff_label = diff_link(prac_label, pred_label, max_label)
keep = np.where(diff_label <= max_diff)[0]
tria_pos = tria_pos[keep, :]
tria_prb = tria_prb[keep, :]
prac_theta = prac_theta[keep]
#
prac_theta = np.mod(prac_theta + 180.0, 360.0) / 180.0 * np.pi
len_diag = np.sqrt(np.sum(np.square(tria_pos[:, 0:2] - tria_pos[:, 2:4]), axis=1)) / 2.
dist_x = len_diag * np.cos(prac_theta[:, 0])
dist_y = len_diag * np.sin(prac_theta[:, 0])
ws, hs, ctr_x, ctr_y = whctrs(tria_pos[:, 0:4])
tria_pos[:, 4:6] = np.vstack((ctr_x + dist_x, ctr_y - dist_y)).astype(np.int32, copy=False).transpose()
return tria_pos, tria_prb
def _get_last_one(tria, d):
map_shape = d.prb.shape[:2]
ws, hs, ctr_x, ctr_y = whctrs(tria[:, 0:4])
pos = np.vstack((2 * ctr_x - tria[:, 4], 2 * ctr_y - tria[:, 5])).transpose()
pos[:, 0] = np.maximum(np.minimum(pos[:, 0], map_shape[1] - 1), 0)
pos[:, 1] = np.maximum(np.minimum(pos[:, 1], map_shape[0] - 1), 0)
pos = np.array(pos, dtype=np.int32)
prb = get_value(pos, d.prb)
return pos, prb
def _clip_trias(tria_pos, tria_prb, c, map_info):
tria_pos[:, 4] = np.maximum(np.minimum(tria_pos[:, 4], map_info[1] - 1), 0)
tria_pos[:, 5] = np.maximum(np.minimum(tria_pos[:, 5], map_info[0] - 1), 0)
tria_prb[:, 2:] = get_value(tria_pos[:, 4:6], c.prb)
return tria_pos, tria_prb
def _proposal_sampling(tl, tr, br, bl, map_info, theta_invl=15, max_diff=1):
# DIAG: [top_left, bot_right]
diag_pos, diag_prb = _gen_diags(tl, br, theta_invl, max_diff)
# TRIA: [DIAG, top_right]
tria_pos, tria_prb = _gen_trias(diag_pos, diag_prb, tr, theta_invl, max_diff)
# QUAD: [TRIA, bot_left]
temp_pos, temp_prb = _get_last_one(tria_pos, bl)
# refine top_right
tria_pos, tria_prb = _clip_trias(tria_pos, tria_prb, tr, map_info)
# assemble
score = compute_score(np.hstack((tria_prb, temp_prb)))
quads = np.hstack((tria_pos[:, 0:2], tria_pos[:, 4:6], tria_pos[:, 2:4], temp_pos))
quads = np.hstack((quads, score[:, np.newaxis]))
# TRIA: [DIAG, bot_left]
tria_pos, tria_prb = _gen_trias(diag_pos, diag_prb, bl, theta_invl, max_diff)
# QUAD: [TRIA, top_right]
temp_pos, temp_prb = _get_last_one(tria_pos, tr)
# refine bot_left
tria_pos, tria_prb = _clip_trias(tria_pos, tria_prb, bl, map_info)
# assemble
score = compute_score(np.hstack((tria_prb, temp_prb)))
quad = np.hstack((tria_pos[:, 0:2], temp_pos, tria_pos[:, 2:4], tria_pos[:, 4:6]))
quad = np.hstack((quad, score[:, np.newaxis]))
quads = np.vstack((quads, quad))
# DIAG: [bot_left, top_right]
diag_pos, diag_prb = _gen_diags(bl, tr, theta_invl, max_diff)
# TRIA: [DIAG, top_left]
tria_pos, tria_prb = _gen_trias(diag_pos, diag_prb, tl, theta_invl, max_diff)
# QUAD: [TRIA, bot_right]
temp_pos, temp_prb = _get_last_one(tria_pos, br)
# refine top_left
tria_pos, tria_prb = _clip_trias(tria_pos, tria_prb, tl, map_info)
# assemble
score = compute_score(np.hstack((tria_prb, temp_prb)))
quad = np.hstack((tria_pos[:, 4:6], tria_pos[:, 2:4], temp_pos, tria_pos[:, 0:2]))
quad = np.hstack((quad, score[:, np.newaxis]))
quads = np.vstack((quads, quad))
# TRIA: [DIAG, bor_right]
tria_pos, tria_prb = _gen_trias(diag_pos, diag_prb, br, theta_invl, max_diff)
# QUAD: [TRIA, top_left]
temp_pos, temp_prb = _get_last_one(tria_pos, tl)
# refine bor_right
tria_pos, tria_prb = _clip_trias(tria_pos, tria_prb, br, map_info)
# assemble
score = compute_score(np.hstack((tria_prb, temp_prb)))
quad = np.hstack((tria_pos[:, 0:2], temp_pos, tria_pos[:, 2:4], tria_pos[:, 4:6]))
quad = np.hstack((quad, score[:, np.newaxis]))
quads = np.vstack((quads, quad))
return quads
def get_value(pts, maps):
vals = maps[pts[:, 1], pts[:, 0]]
return vals[:, np.newaxis]
def compute_score(scores):
score = scores[:, 0] * scores[:, 1] * scores[:, 2] * scores[:, 3]
return score
def compute_theta(p1, p2):
dx = p2[:, 0] - p1[:, 0]
dy = p2[:, 1] - p1[:, 1]
val = dx / np.sqrt(dx * dx + dy * dy)
val = np.maximum(np.minimum(val, 1), -1)
theta = np.arccos(val) / np.pi * 180
idx = np.where(dy > 0)[0]
theta[idx] = 360 - theta[idx]
return theta[:, np.newaxis]
def compute_link(p1, p2, interval):
theta = compute_theta(p1, p2)
label = np.floor(theta / interval) + 1
return label
def diff_link(t1, t2, max_orient):
dt = np.abs(t2 - t1)
dt = np.minimum(dt, max_orient - dt)
return dt
def compute_tria_area(p1, p2, p3):
area = (p2[:, 0] - p1[:, 0]) * (p3[:, 1] - p1[:, 1]) - \
(p2[:, 1] - p1[:, 1]) * (p3[:, 0] - p1[:, 0])
return area
def filter_quads(quads):
area_1 = compute_tria_area(quads[:, 0:2], quads[:, 2:4], quads[:, 4:6])
area_2 = compute_tria_area(quads[:, 0:2], quads[:, 2:4], quads[:, 6:8])
area_3 = compute_tria_area(quads[:, 0:2], quads[:, 4:6], quads[:, 6:8])
area_4 = compute_tria_area(quads[:, 2:4], quads[:, 4:6], quads[:, 6:8])
areas = area_1 * area_2 * area_3 * area_4
keep = np.where(areas != 0)[0]
quads = quads[keep, :]
return quads
|
nilq/baby-python
|
python
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from yandex.cloud.access import access_pb2 as yandex_dot_cloud_dot_access_dot_access__pb2
from yandex.cloud.containerregistry.v1 import registry_pb2 as yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__pb2
from yandex.cloud.containerregistry.v1 import registry_service_pb2 as yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2
from yandex.cloud.operation import operation_pb2 as yandex_dot_cloud_dot_operation_dot_operation__pb2
class RegistryServiceStub(object):
"""A set of methods for managing Registry resources.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Get = channel.unary_unary(
'/yandex.cloud.containerregistry.v1.RegistryService/Get',
request_serializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.GetRegistryRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__pb2.Registry.FromString,
)
self.List = channel.unary_unary(
'/yandex.cloud.containerregistry.v1.RegistryService/List',
request_serializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.ListRegistriesRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.ListRegistriesResponse.FromString,
)
self.Create = channel.unary_unary(
'/yandex.cloud.containerregistry.v1.RegistryService/Create',
request_serializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.CreateRegistryRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Update = channel.unary_unary(
'/yandex.cloud.containerregistry.v1.RegistryService/Update',
request_serializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.UpdateRegistryRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Delete = channel.unary_unary(
'/yandex.cloud.containerregistry.v1.RegistryService/Delete',
request_serializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.DeleteRegistryRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.ListAccessBindings = channel.unary_unary(
'/yandex.cloud.containerregistry.v1.RegistryService/ListAccessBindings',
request_serializer=yandex_dot_cloud_dot_access_dot_access__pb2.ListAccessBindingsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_access_dot_access__pb2.ListAccessBindingsResponse.FromString,
)
self.SetAccessBindings = channel.unary_unary(
'/yandex.cloud.containerregistry.v1.RegistryService/SetAccessBindings',
request_serializer=yandex_dot_cloud_dot_access_dot_access__pb2.SetAccessBindingsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.UpdateAccessBindings = channel.unary_unary(
'/yandex.cloud.containerregistry.v1.RegistryService/UpdateAccessBindings',
request_serializer=yandex_dot_cloud_dot_access_dot_access__pb2.UpdateAccessBindingsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
class RegistryServiceServicer(object):
"""A set of methods for managing Registry resources.
"""
def Get(self, request, context):
"""Returns the specified Registry resource.
To get the list of available Registry resources, make a [List] request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""Retrieves the list of Registry resources in the specified folder.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request, context):
"""Creates a registry in the specified folder.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Updates the specified registry.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Deletes the specified registry.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListAccessBindings(self, request, context):
"""access
Lists access bindings for the specified registry.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetAccessBindings(self, request, context):
"""Sets access bindings for the specified registry.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateAccessBindings(self, request, context):
"""Updates access bindings for the specified registry.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RegistryServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.GetRegistryRequest.FromString,
response_serializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__pb2.Registry.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.ListRegistriesRequest.FromString,
response_serializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.ListRegistriesResponse.SerializeToString,
),
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.CreateRegistryRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.UpdateRegistryRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.DeleteRegistryRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'ListAccessBindings': grpc.unary_unary_rpc_method_handler(
servicer.ListAccessBindings,
request_deserializer=yandex_dot_cloud_dot_access_dot_access__pb2.ListAccessBindingsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_access_dot_access__pb2.ListAccessBindingsResponse.SerializeToString,
),
'SetAccessBindings': grpc.unary_unary_rpc_method_handler(
servicer.SetAccessBindings,
request_deserializer=yandex_dot_cloud_dot_access_dot_access__pb2.SetAccessBindingsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'UpdateAccessBindings': grpc.unary_unary_rpc_method_handler(
servicer.UpdateAccessBindings,
request_deserializer=yandex_dot_cloud_dot_access_dot_access__pb2.UpdateAccessBindingsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'yandex.cloud.containerregistry.v1.RegistryService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
nilq/baby-python
|
python
|
#-----------------------------------------------------------------------
#Copyright 2019 Centrum Wiskunde & Informatica, Amsterdam
#
#Author: Daniel M. Pelt
#Contact: D.M.Pelt@cwi.nl
#Website: http://dmpelt.github.io/msdnet/
#License: MIT
#
#This file is part of MSDNet, a Python implementation of the
#Mixed-Scale Dense Convolutional Neural Network.
#-----------------------------------------------------------------------
"""
Module for defining and processing validation sets.
"""
from . import store
from . import operations
from . import loss
import abc
import numpy as np
class Validation(abc.ABC):
"""Base class for processing a validation set."""
@abc.abstractmethod
def validate(self, n):
"""Compute validation metrics.
:param n: :class:`.network.Network` to validate with
:return: True if validation metric is lower than best validation error encountered, False otherwise.
"""
pass
@abc.abstractmethod
def to_dict(self):
"""Compute validation metrics."""
pass
@abc.abstractmethod
def load_dict(self, dct):
"""Return a dictionary containing all network variables and parameters.
:return: all network variables and parameters
"""
pass
@classmethod
@abc.abstractmethod
def from_dict(cls, dct):
"""Initialize Validation object from dictionary.
:param dct: dictionary with all parameters
"""
pass
@classmethod
def from_file(cls, fn):
"""Initialize Validation object from file.
:param fn: filename
"""
dct = store.get_dict(fn, 'validation')
return cls.from_dict(dct)
def to_file(self, fn):
"""Save all Validation object parameters to file.
:param fn: filename
"""
store.store_dict(fn, 'validation', self.to_dict())
class LossValidation(Validation):
"""Validation object that computes simple difference metrics.
:param data: list of :class:`.data.DataPoint` objects to validate with.
:param keep: (optional) whether to keep the best, worst, and typical result in memory.
"""
def __init__(self, data, loss=None, keep=True):
self.d = data
self.keep = keep
self.best = np.Inf
self.loss = loss
def errorfunc(self, output, target, msk):
"""Error function used for validation.
:param output: network output image.
:param target: target image.
:param mask: mask image to indicate where to compute error function for.
:return: error function value.
"""
lv = self.loss.lossvalue(output, target, msk)
if msk is None:
npix = target.size
else:
npix = target.shape[0]*(msk>0).sum()
return lv/npix
def getbest(self):
"""Return the input, target, and network output for best result.
:return: list of images (input, target, network output)
"""
d = self.d[self.idx[0]]
out = []
out.append(d.input)
out.append(d.target)
if self.keep:
out.append(self.outputs[0])
else:
out.append(self.n.forward(d.input))
return out
def getworst(self):
"""Return the input, target, and network output for worst result.
:return: list of images (input, target, network output)
"""
d = self.d[self.idx[1]]
out = []
out.append(d.input)
out.append(d.target)
if self.keep:
out.append(self.outputs[1])
else:
out.append(self.n.forward(d.input))
return out
def getmedian(self):
"""Return the input, target, and network output for median result.
:return: list of images (input, target, network output)
"""
d = self.d[self.idx[2]]
out = []
out.append(d.input)
out.append(d.target)
if self.keep:
out.append(self.outputs[2])
else:
out.append(self.n.forward(d.input))
return out
def validate(self, n):
self.n = n
errs = np.zeros(len(self.d))
if self.keep:
self.outputs = [0,0,0]
low = np.Inf
high = -np.Inf
self.idx = [0,0,0]
for i,d in enumerate(self.d):
out = self.n.forward(d.input)
err = self.errorfunc(out, d.target, d.mask)
errs[i] = err
if err<low:
low = err
self.idx[0] = i
if self.keep:
self.outputs[0] = out
if err>high:
high = err
self.idx[1] = i
if self.keep:
self.outputs[1] = out
median = np.argsort(errs)[errs.shape[0]//2]
self.idx[2] = median
if self.keep:
if median==self.idx[0]:
self.outputs[2] = self.outputs[0]
elif median==self.idx[1]:
self.outputs[2] = self.outputs[1]
else:
self.outputs[2] = self.n.forward(self.d[median].input)
error = errs.mean()
self.curerr = error
if error<self.best:
self.best = error
return True
return False
def to_dict(self):
dct = {}
dct['best'] = self.best
dct['keep'] = self.keep
return dct
def load_dict(self, dct):
self.best = dct['best']
self.keep = dct['keep']
@classmethod
def from_dict(cls, dct):
v = cls(None, None)
v.load_dict(dct)
return v
# For backwards compatibility, uses L2 norm
class MSEValidation(LossValidation):
def __init__(self, data, keep=True):
super().__init__(data, loss=loss.L2Loss(), keep=keep)
|
nilq/baby-python
|
python
|
import numpy as np
from utils.misc import arr2grid
from planner.astar import AStar
from planner.dijkstra import Dijkstra
from planner.bestfirst import BestFirst
from planner.breadthfirst import BreadthFirst
from planner.bi_astar import BiAStar
from planner.bi_dijkstra import BiDijkstra
from planner.bi_bestfirst import BiBestFirst
# e.g [[1, 0, 1],
# [1, 0, 1],
# [1, 1, 1]]
img = np.array([[1,1,1],[1,0,1],[1,0,1]])
# convert array to networkx graph
grid = arr2grid(img, diagonal=True)
source = (2,0)
target = (2,2)
#target = [(1,1),(2,2),(2,1)]
rp = BiAStar()
route = rp.multi_plan([((2,0), (2,2)), ((2,0), (0,0))], graph=grid)
#route = rp.plan(source, target, grid)
print(route)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# coding: utf8
# Author: Lenz Furrer, 2017
'''
Formatter base classes.
'''
import os
import io
from lxml import etree
class Formatter:
'''
Base class for all formatters.
'''
ext = None
binary = False # text or binary format?
def __init__(self, config, fmt_name):
self.config = config
self.fmt_name = fmt_name
def export(self, content):
'''
Write this content to disk.
'''
open_params = self._get_open_params(content)
try:
f = open(**open_params)
except FileNotFoundError:
# An intermediate directory didn't exist.
# Create it and try again.
# (Use exist_ok because of race conditions -- another
# worker might have created it in the meantime.)
os.makedirs(os.path.dirname(open_params['file']), exist_ok=True)
f = open(**open_params)
with f:
self.write(f, content)
def write(self, stream, content):
'''
Write this content to an open file.
'''
raise NotImplementedError()
def dump(self, content):
'''
Serialise the content to str or bytes.
'''
raise NotImplementedError()
def _get_open_params(self, content):
path = self.config.get_out_path(content.id_, content.basename,
self.fmt_name, self.ext)
if self.binary:
return dict(file=path, mode='wb')
else:
return dict(file=path, mode='w', encoding='utf8')
class MemoryFormatter(Formatter):
'''
Abstract formatter with a primary dump method.
Subclasses must override dump(), on which write() is based.
'''
def write(self, stream, content):
stream.write(self.dump(content))
class StreamFormatter(Formatter):
'''
Abstract formatter with a primary write method.
Subclasses must override write(), on which dump() is based.
'''
def dump(self, content):
if self.binary:
buffer = io.BytesIO()
else:
buffer = io.StringIO()
self.write(buffer, content)
return buffer.getvalue()
class XMLMemoryFormatter(MemoryFormatter):
'''
Formatter for XML-based output.
Subclasses must define a method _dump() which returns
an lxml.etree.Element node.
'''
ext = 'xml'
binary = True
def dump(self, content):
node = self._dump(content)
return self._tostring(node)
def _dump(self, content):
raise NotImplementedError()
@staticmethod
def _tostring(node, **kwargs):
kwargs.setdefault('encoding', "UTF-8")
kwargs.setdefault('xml_declaration', True)
kwargs.setdefault('pretty_print', True)
return etree.tostring(node, **kwargs)
|
nilq/baby-python
|
python
|
# 첫 도착지
# 짝수: 1 -> 2
# 홀수: 1 -> 3
# 두번째 도착지
# 짝수: 1 -> 3
# 홀수: 1 -> 2
# 3번째 도착지
# 작은 아이가 두번째 아이에게 얹힌다. (마지막을 제외하고 다시 뭉친다.)
# 4번째
# 3번째로 큰 아이가 빈 곳으로 간다.
# 5번째
# 작은 아이가 두번째 + 3번째 아이에게 얹힌다. (마지막을 제외하고 다시 뭉친다.)
import sys
n = int(sys.stdin.readline())
count = 0
# 매개변수 : 총 개수, 시작, 목표, other,...?
def hanoi(total, start, destination, other, count):
# base Case - 남은 원반의 개수가 없을 시 종료한다.
if total == 1:
count += 1
return
print(start , '->' , destination)
hanoi(total-1, start, other, destination)
print(start ,'-->', destination)
hanoi(total-1, other, destination, start)
print(start, '--->', destination)
hanoi(n, 1, 3, 2, 0)
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals
from .common import InfoExtractor
class YouJizzIE(InfoExtractor):
_VALID_URL = r'https?://(?:\w+\.)?youjizz\.com/videos/(?:[^/#?]+)?-(?P<id>[0-9]+)\.html(?:$|[?#])'
_TESTS = [{
'url': 'http://www.youjizz.com/videos/zeichentrick-1-2189178.html',
'md5': '78fc1901148284c69af12640e01c6310',
'info_dict': {
'id': '2189178',
'ext': 'mp4',
'title': 'Zeichentrick 1',
'age_limit': 18,
}
}, {
'url': 'http://www.youjizz.com/videos/-2189178.html',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
# YouJizz's HTML5 player has invalid HTML
webpage = webpage.replace('"controls', '" controls')
age_limit = self._rta_search(webpage)
video_title = self._html_search_regex(
r'<title>\s*(.*)\s*</title>', webpage, 'title')
info_dict = self._parse_html5_media_entries(url, webpage, video_id)[0]
info_dict.update({
'id': video_id,
'title': video_title,
'age_limit': age_limit,
})
return info_dict
|
nilq/baby-python
|
python
|
# see https://docs.python.org/3/reference/expressions.html#operator-precedence
# '|' is the least binding numeric operator
# '^'
# OK: 1 | (2 ^ 3) = 1 | 1 = 1
# BAD: (1 | 2) ^ 3 = 3 ^ 3 = 0
print(1 | 2 ^ 3)
# '&'
# OK: 3 ^ (2 & 1) = 3 ^ 0 = 3
# BAD: (3 ^ 2) & 1 = 1 & 1 = 1
print(3 ^ 2 & 1)
# '<<', '>>'
# OK: 2 & (3 << 1) = 2 & 6 = 2
# BAD: (2 & 3) << 1 = 2 << 1 = 4
print(2 & 3 << 1)
# OK: 6 & (4 >> 1) = 6 & 2 = 2
# BAD: (6 & 4) >> 1 = 2 >> 1 = 1
print(6 & 4 >> 1)
# '+', '-'
# OK: 1 << (1 + 1) = 1 << 2 = 4
# BAD: (1 << 1) + 1 = 2 + 1 = 3
print(1 << 1 + 1)
# '*', '/', '//', '%'
# OK: 2 + (2 * 2) = 2 + 4 = 6
# BAD: (2 + 2) * 2 = 4 * 2 = 8
print(2 + 2 * 2)
# '+x', '-x', '~x'
# '**'
# OK: -(2**2) = -4
# BAD: (-2)**2 = 4
print(-2**2)
# OK: 2**(-1) = 0.5
print(2**-0)
# (expr...)
print((2 + 2) * 2)
|
nilq/baby-python
|
python
|
import sys
import os
def jeff():
print('quick attack the enemy press a to attack b to block c for super ')
bob=10
alice=60
turn1=0
turn2=2
spr=5
mod1=0
mod2=0
speed=0
c1=print('bob health is ',bob)
c2=print('alice health is ',alice)
print(c1,c2)
while(alice>0):
print(spr,'spr is')
print('bob health is ',bob)
print('alice health is ',alice)
a4=input("a to attack b to block c for super h for gaining health ")
if a4=='a':
print('bob attacks alice')
alice-=5
turn1+=1
spr+=1
mod1=turn1%2
print('bob health is ',bob)
print('alice health is ',alice)
if mod1 > 0:
print("Alice counter attacks")
bob-=1
spr+=1
speed-=1
print('bob health is ',bob)
print('alice health is ',alice)
else:
print("successful attack")
print('bob health is ',bob)
print('alice health is ',alice)
elif bob < 0:
print('''game over Bob died
press 1 to continue anything else to
quit''')
a5= input("1 or quit")
if a5 == '1' :
jeff()
else:
sys.exit(0)
#break
elif a4=='b':
print('Bob blocks')
if speed=='0':
print('Block unsucessful')
bob-=2
print('bob health is ',bob)
print('alice health is ',alice)
speed+=1
turn2+=1
spr+=1
mod2=turn1%2
print('bob health is ',bob)
print('alice health is ',alice)
if mod2>0:
print('Bob counterattacks')
alice-=10
spr+=1
print('bob health is ',bob)
print('alice health is ',alice)
elif(a4=='c'):
if spr != 0 :#have to be un quoted
print('super attack')
alice-=15
spr=0
speed=0
print(spr,'spr is')
else:
print("No super attack charge up")
print("alice attacks")
bob-=1
speed+=1
turn1+=1
turn2+=1
else:
print("please select a proper option")
turn1+=1
mod1=turn1%2
if mod1 >0 :
print('recovered 2 HP')
bob+=2
mod1=turn2
turn1=turn2
turn2=mod1
print('bob health is ',bob)
print('alice health is ',alice)
jeff()
a3=input("QUICK DRAW YOU gun and shoot 1 for left and 2 for right")
if a3 == '1':
print("you died")
elif a3 == '2':
print("You killed rebel leader and escaped the facility")
else:
print("a swordsman appeared and killed the guard and took your character out")
a = """
A rebellion is rising
i neeed your help
Then you followed him
"""
print(a)
print("game over")
|
nilq/baby-python
|
python
|
wild = "https://image.ibb.co/dPStdz/wild.png"
wild_plus_four = "https://image.ibb.co/jKctdz/wild_4.png"
red = {
'0': 'https://image.ibb.co/gnmtB8/red_0.png',
'1': 'https://image.ibb.co/hvRFPT/red_1.png',
'2': 'https://image.ibb.co/f9xN4T/red_2.png',
'3': 'https://image.ibb.co/hDB4Jo/red_3.png',
'4': 'https://image.ibb.co/m5RFPT/red_4.png',
'5': 'https://image.ibb.co/bSVLr8/red_5.png',
'6': 'https://image.ibb.co/dkRFPT/red_6.png',
'7': 'https://image.ibb.co/grPfr8/red_7.png',
'8': 'https://image.ibb.co/jxM4Jo/red_8.png',
'9': 'https://image.ibb.co/j6vydo/red_9.png',
'skip': 'https://image.ibb.co/cHBDTo/red_skip.png',
'reverse': 'https://image.ibb.co/mdoGg8/red_reverse.png',
'+2': 'https://image.ibb.co/hORDTo/red_2.png',
}
green = {
'0': 'https://image.ibb.co/gXUS4T/green_0.png',
'1': 'https://image.ibb.co/iDZGyo/green_1.png',
'2': 'https://image.ibb.co/f1RUJo/green_2.png',
'3': 'https://image.ibb.co/h6fBW8/green_3.png',
'4': 'https://image.ibb.co/mE8byo/green_4.png',
'5': 'https://image.ibb.co/fgOn4T/green_5.png',
'6': 'https://image.ibb.co/iAU5r8/green_7.png',
'7': 'https://image.ibb.co/naituT/green_7.png',
'8': 'https://image.ibb.co/fyMJB8/green_8.png',
'9': 'https://image.ibb.co/iqjido/green_9.png',
'skip': 'https://image.ibb.co/btj6g8/green_skip.png',
'reverse': 'https://image.ibb.co/jDeTuT/green_reverse.png',
'+2': 'https://image.ibb.co/b0gmg8/green_2.png',
}
blue = {
'0': 'https://image.ibb.co/im3vPT/blue_0.png',
'1': 'https://image.ibb.co/k4Aydo/blue_1.png',
'2': 'https://image.ibb.co/efuPJo/blue_2.png',
'3': 'https://image.ibb.co/fyqLr8/blue_3.png',
'4': 'https://image.ibb.co/kbOJdo/blue_4.png',
'5': 'https://image.ibb.co/mHnuJo/blue_5.png',
'6': 'https://image.ibb.co/bW81yo/blue_6.png',
'7': 'https://image.ibb.co/dLwodo/blue_7.png',
'8': 'https://image.ibb.co/nqyejT/blue_8.png',
'9': 'https://image.ibb.co/kRrMyo/blue_9.png',
'skip': 'https://image.ibb.co/buExM8/blue_skip.png',
'reverse': 'https://image.ibb.co/cuQ418/blue_reverse.png',
'+2': 'https://image.ibb.co/nx2TTo/blue_2.png',
}
yellow = {
'0': 'https://image.ibb.co/kf5ZjT/yellow_0.png',
'1': 'https://image.ibb.co/d6o9Jo/yellow_1.png',
'2': 'https://image.ibb.co/ghf0PT/yellow_2.png',
'3': 'https://image.ibb.co/eHZido/yellow_3.png',
'4': 'https://image.ibb.co/d9tLPT/yellow_4.png',
'5': 'https://image.ibb.co/b6CEjT/yellow_5.png',
'6': 'https://image.ibb.co/bzFZjT/yellow_6.png',
'7': 'https://image.ibb.co/eD0ZjT/yellow_7.png',
'8': 'https://image.ibb.co/mfa0PT/yellow_8.png',
'9': 'https://image.ibb.co/h4JLPT/yellow_9.png',
'skip': 'https://image.ibb.co/btpmET/yellow_skip.png',
'reverse': 'https://image.ibb.co/kbt2oo/yellow_reverse.png',
'+2': 'https://image.ibb.co/dvVtuT/yellow_2.png',
}
|
nilq/baby-python
|
python
|
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
# Copyright 2020 Tecnativa - Pedro M. Baeza
from odoo import api, fields, models
class SaleOrder(models.Model):
_inherit = "sale.order"
type_id = fields.Many2one(
comodel_name="sale.order.type",
string="Type",
compute="_compute_sale_type_id",
store=True,
readonly=True,
states={
"draft": [("readonly", False)],
"sent": [("readonly", False)],
},
default=lambda so: so._default_type_id(),
ondelete="restrict",
copy=True,
)
@api.model
def _default_type_id(self):
return self.env["sale.order.type"].search([], limit=1)
@api.depends("partner_id", "company_id")
def _compute_sale_type_id(self):
for record in self:
if not record.partner_id:
record.type_id = self.env["sale.order.type"].search(
[("company_id", "in", [self.env.company.id, False])], limit=1
)
else:
sale_type = (
record.partner_id.with_company(record.company_id).sale_type
or record.partner_id.commercial_partner_id.with_company(
record.company_id
).sale_type
)
if sale_type:
record.type_id = sale_type
@api.onchange("type_id")
def onchange_type_id(self):
# TODO: To be changed to computed stored readonly=False if possible in v14?
vals = {}
for order in self:
order_type = order.type_id
# Order values
vals = {}
if order_type.payment_term_id:
vals.update({"payment_term_id": order_type.payment_term_id})
if order_type.pricelist_id:
vals.update({"pricelist_id": order_type.pricelist_id})
if vals:
order.update(vals)
@api.model
def create(self, vals):
if vals.get("name", "/") == "/" and vals.get("type_id"):
sale_type = self.env["sale.order.type"].browse(vals["type_id"])
if sale_type.sequence_id:
vals["name"] = sale_type.sequence_id.next_by_id()
return super(SaleOrder, self).create(vals)
def _prepare_invoice(self):
res = super(SaleOrder, self)._prepare_invoice()
if self.type_id.journal_id:
res["journal_id"] = self.type_id.journal_id.id
if self.type_id:
res["sale_type_id"] = self.type_id.id
return res
|
nilq/baby-python
|
python
|
import importlib
from functools import partial
from multiprocessing import Process, Queue
from flask import Flask, request
app = Flask("serverfull")
bees = ["a", "b"] # TODO: get this from somewhere
workers = {}
def bee_loop(handler, inq, outq):
request = inq.get()
print("Got request")
outq.put(handler(request))
def generic_handler(bee_path):
_, inq, outq = workers[bee_path]
print(f"Putting {request.args}")
inq.put(request.args)
return outq.get()
for bee in bees:
bee_path = f"bees.{bee}"
print(f"Importing {bee_path}")
bee_mod = importlib.import_module(bee_path)
bee_mod = importlib.reload(bee_mod) # TODO: be smarter, but who cares
print(f"/bees/{bee} => {bee_mod.handler}")
inq = Queue()
outq = Queue()
proc = Process(target=bee_loop, args=(bee_mod.handler, inq, outq))
proc.start()
workers[bee_path] = [proc, inq, outq]
app.add_url_rule(f"/bees/{bee}", f"bee.{bee}", partial(generic_handler, (bee_path)))
|
nilq/baby-python
|
python
|
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
facedetection = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
while True:
ret, frame = cap.read()
gry = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face = facedetection.detectMultiScale(gry,1.3,5)
for (x,y,w,h) in face:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0), 5)
cv2.imshow('frame',frame)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
nilq/baby-python
|
python
|
# Copyright 2017 VMware Inc. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import copy
from congress import exception
from congress.library_service import library_service
from congress.tests import base
class TestLibraryService(base.SqlTestCase):
def setUp(self):
super(TestLibraryService, self).setUp()
self.library = library_service.LibraryService('lib-test')
self.library.delete_all_policies() # clear pre-loaded library policies
self.policy1 = {'name': 'policy1', 'abbreviation': 'abbr',
'kind': 'database', 'description': 'descrip',
'rules': [{'rule': 'p(x) :- q(x)',
'comment': 'test comment',
'name': 'testname'}]}
self.policy2 = {'name': 'policy2', 'abbreviation': 'abbr',
'kind': 'database', 'description': 'descrip',
'rules': [{'rule': 'p(x) :- q(x)',
'comment': 'test comment',
'name': 'testname'}]}
self.policy1_meta = copy.deepcopy(self.policy1)
self.policy2_meta = copy.deepcopy(self.policy2)
del self.policy1_meta['rules']
del self.policy2_meta['rules']
def test_create_policy_no_name(self):
self.assertRaises(exception.InvalidPolicyInput,
self.library.create_policy, {'rules': []})
def test_create_policy_no_rules(self):
self.assertRaises(exception.InvalidPolicyInput,
self.library.create_policy, {'name': 'policy1'})
def test_create_policy_other_schema_violations(self):
# name too long (255 limit)
policy_item = {
'name': 'policy2', 'abbreviation': 'abbr',
'kind': 'database', 'description': 'descrip',
'rules': [{
'rule': 'p(x) :- q(x)',
'comment': 'test comment',
'name':
'111111111111111111111111111111111111111111111111111111111'
'111111111111111111111111111111111111111111111111111111111'
'111111111111111111111111111111111111111111111111111111111'
'111111111111111111111111111111111111111111111111111111111'
'11111111111111111111111111111'}]}
self.assertRaises(exception.InvalidPolicyInput,
self.library.create_policy, policy_item)
# comment too long (255 limit)
policy_item = {
'name': 'policy2', 'abbreviation': 'abbr',
'kind': 'database', 'description': 'descrip',
'rules': [{
'rule': 'p(x) :- q(x)',
'comment':
'111111111111111111111111111111111111111111111111111111111'
'111111111111111111111111111111111111111111111111111111111'
'111111111111111111111111111111111111111111111111111111111'
'111111111111111111111111111111111111111111111111111111111'
'11111111111111111111111111111',
'name': 'testname'}]}
self.assertRaises(exception.InvalidPolicyInput,
self.library.create_policy, policy_item)
# rule item missing 'rule' property
policy_item = {
'name': 'policy2', 'abbreviation': 'abbr',
'kind': 'database', 'description': 'descrip',
'rules': [{
'comment': 'test comment',
'name': 'testname'}]}
self.assertRaises(exception.InvalidPolicyInput,
self.library.create_policy, policy_item)
def test_create_policy_bad_name(self):
self.assertRaises(exception.PolicyException,
self.library.create_policy,
{'name': 'disallowed-hyphen', 'rules': []})
def test_create_policy_default(self):
res = self.library.create_policy({'name': 'policy1', 'rules': []})
self.assertEqual(res, {'id': res['id'], 'abbreviation': 'polic',
'kind': 'nonrecursive', 'name': 'policy1',
'description': '', 'rules': []})
def test_create_policy(self):
policy_obj = self.library.create_policy(self.policy1)
self.policy1['id'] = policy_obj['id']
self.assertEqual(policy_obj, self.policy1)
def test_create_policy_duplicate(self):
self.library.create_policy({'name': 'policy1', 'rules': []})
self.assertRaises(KeyError, self.library.create_policy,
{'name': 'policy1', 'rules': []})
res = self.library.get_policies()
self.assertEqual(len(res), 1)
def test_get_policy_empty(self):
res = self.library.get_policies()
self.assertEqual(res, [])
self.assertRaises(KeyError, self.library.get_policy,
'nosuchpolicy')
self.assertRaises(KeyError, self.library.get_policy_by_name,
'nosuchpolicy')
def test_create_get_policy(self):
policy_obj = self.library.create_policy(self.policy1)
self.policy1['id'] = policy_obj['id']
self.policy1_meta['id'] = policy_obj['id']
res = self.library.get_policies()
self.assertEqual(res, [self.policy1])
res = self.library.get_policy(policy_obj['id'])
self.assertEqual(res, self.policy1)
res = self.library.get_policy_by_name(policy_obj['name'])
self.assertEqual(res, self.policy1)
res = self.library.get_policies(include_rules=True)
self.assertEqual(res, [self.policy1])
res = self.library.get_policy(policy_obj['id'], include_rules=False)
self.assertEqual(res, self.policy1_meta)
res = self.library.get_policy_by_name(policy_obj['name'],
include_rules=False)
self.assertEqual(res, self.policy1_meta)
self.assertRaises(KeyError, self.library.get_policy, 'no_such_policy')
self.assertRaises(KeyError, self.library.get_policy_by_name,
'no_such_policy')
def test_delete_policy(self):
self.assertRaises(KeyError, self.library.delete_policy,
'policy1')
policy_obj = self.library.create_policy(self.policy1)
self.policy1['id'] = policy_obj['id']
policy_obj = self.library.create_policy(self.policy2)
self.policy2['id'] = policy_obj['id']
res = self.library.get_policies()
self.assertEqual(len(res), 2)
self.assertTrue(all(p in res
for p in [self.policy1, self.policy2]))
self.assertRaises(KeyError, self.library.delete_policy,
'no_such_policy')
res = self.library.delete_policy(self.policy1['id'])
self.assertEqual(res, self.policy1)
res = self.library.get_policies()
self.assertEqual(len(res), 1)
self.assertEqual(res[0], self.policy2)
res = self.library.delete_policy(self.policy2['id'])
self.assertEqual(res, self.policy2)
res = self.library.get_policies()
self.assertEqual(len(res), 0)
def test_delete_policies(self):
self.library.delete_all_policies()
res = self.library.get_policies()
self.assertEqual(len(res), 0)
self.library.create_policy(
{'name': 'policy1', 'abbreviation': 'abbr', 'kind': 'database',
'description': 'descrip', 'rules': [{'rule': 'p(x) :- q(x)',
'comment': 'test comment',
'name': 'testname'}]})
self.library.create_policy(
{'name': 'policy2', 'abbreviation': 'abbr', 'kind': 'database',
'description': 'descrip', 'rules': [{'rule': 'p(x) :- q(x)',
'comment': 'test comment',
'name': 'testname'}]})
self.library.delete_all_policies()
res = self.library.get_policies()
self.assertEqual(len(res), 0)
def test_replace_policy(self):
policy1 = self.library.create_policy(
{'name': 'policy1', 'abbreviation': 'abbr', 'kind': 'database',
'description': 'descrip', 'rules': [{'rule': 'p(x) :- q(x)',
'comment': 'test comment',
'name': 'testname'}]})
policy2 = self.library.create_policy(
{'name': 'policy2', 'abbreviation': 'abbr', 'kind': 'database',
'description': 'descrip', 'rules': [{'rule': 'p(x) :- q(x)',
'comment': 'test comment',
'name': 'testname'}]})
replacement_policy = {
"name": "new_name",
"description": "new test policy2 description",
"kind": "nonrecursive",
"abbreviation": "newab",
"rules": [{"rule": "r(x) :- c(x)", "comment": "test comment",
"name": "test name"}]
}
# update non-existent item
self.assertRaises(KeyError,
self.library.replace_policy, 'no_such_id',
replacement_policy)
# update existing item
self.library.replace_policy(policy2['id'], replacement_policy)
replacement_policy_w_id = copy.deepcopy(replacement_policy)
replacement_policy_w_id['id'] = policy2['id']
ret = self.library.get_policies()
self.assertEqual(len(ret), 2)
self.assertTrue(all(p in ret
for p in [policy1,
replacement_policy_w_id]))
|
nilq/baby-python
|
python
|
# -*- coding: UTF-8 -*-
"""
Automatized configuration and execution of Inspect peptide identification for
a list of spectrum files and a list of reference proteomes. Specifications of
posttranslational modifications can either be directly passed by the user or
assigned to the dataset by its filename (if dataset group is already known).
@author: Anke Penzlin, June 2013
"""
import re
import os
import sys
import optparse
#from InspectParser_FDRcut import parseInspect
from simulation_based_similarity import prepDB, run_inspect
def runInspect_config(spectra,
DBs,
spec_path,
db_path="/data/NG4/anke/proteome/",
inspect_dir = "/home/franziska/bin/Inspect/",
conf = "/data/NG4/anke/Inspect/config_Inspect_py.txt",
user_mods=""):
"""
run Inspect for each pair of spectrum dataset and proteome database using
modifications according to the dataset in the configuration file.
"""
rngDB = range(len(DBs)) # 3 for example
rngSpc = range(len(spectra)) # 2 for example
simMat = [ [0 for i in rngDB] for j in rngSpc ] # initializing output: [[0, 0, 0], [0, 0, 0]]
for i in rngSpc:
specs = spec_path+spectra[i]+".mgf"
for j in rngDB:
db_j = db_path+DBs[j]+"_decoy.trie"
# create trie if necessary (.trie and .index created simultaneously)
if not os.path.exists(db_j):
# a prepare decoyDB input for Inspect (def)
go_on = prepDB(db_path+DBs[j]+"_decoy.fasta", path=inspect_dir) # Convert a protein database into concatenated format.
if not go_on: return
inspect_out = specs[:-4] +"_"+DBs[j]+"_InspectOut.txt" # -4 to remove file extension: .mgf
# prepare configfile for InspecT
conf_out = open(conf,'w')
conf_out.write("spectra,"+specs+"\n")
conf_out.write("instrument,FT-Hybrid\n")
conf_out.write("protease,Trypsin\n")
conf_out.write("DB,"+db_j+"\n")
if not user_mods == "":
conf_out.write(user_mods)
elif re.search("Lacto_131",spectra[i]):
conf_out.write("mod,46.0916,C,fix\n")
conf_out.write("mod,15.994915,M\n")
conf_out.write("# iTraq\n")
conf_out.write("mod,144.1544,K,fix\n")
conf_out.write("mod,144.1544,*,nterminal\n")
print "modifications according to acc. nr. 13105-13162"
sys.stdout.flush()
elif re.search("Shigelladys",spectra[i]):
conf_out.write("mod,46.0916,C,fix\n")
conf_out.write("mod,15.994915,M\n")
print "modifications according to http://www.biomedcentral.com/1471-2180/11/147#sec2"
sys.stdout.flush()
else:
conf_out.write("# Protecting group on cysteine\n")
conf_out.write("mod,57.021464,C,fix\n")
if re.search("Bacicer_113",spectra[i]):
conf_out.write("mod,15.994915,M\n")
print "modifications according to acc. nr. 11339-11362"
sys.stdout.flush()
elif re.search("Bacisub_175",spectra[i]):
conf_out.write("mod,15.994915,M\n")
conf_out.write("mod,119.1423,C\n")
conf_out.write("mod,396.37,C\n")
print "modifications according to acc. nr. 17516-17659"
sys.stdout.flush()
elif re.search("Ecoli_12",spectra[i]):
conf_out.write("mod,32,M,opt\n")
print "modifications according to acc. nr. 12189-12199"
sys.stdout.flush()
elif re.search("Strepyo_1923",spectra[i]):
conf_out.write("mod,15.994915,M\n")
conf_out.write("mod,79.9799,STY\n")
print "modifications according to acc. nr. 19230/19231"
sys.stdout.flush()
elif re.search("CPXV_",spectra[i]):
conf_out.write("mod,15.994915,M\n")#oxidation
conf_out.write("mod,42.010565,*,nterminal\n")#acetylation
print "modifications according to standard configuration (for pox)"
elif re.search("MSSim",spectra[i]):
conf_out.write("mod,0.984016,NQ\n")
conf_out.write("mod,15.994915,M\n")
print "modifications according to (simulation) standard configuration"
sys.stdout.flush()
else:
# conf_out.write("mod,15.994915,M\n")#oxidation
# conf_out.write("mod,42.010565,*,nterminal\n")#acetylation
#conf_out.write("mod,0.984016,NQ\n")
print "modifications according to (unspecified) standard configuration"
sys.stdout.flush()
conf_out.write("mods,2\n")
if re.search("Shigelladys",spectra[i]):
conf_out.write("PMTolerance,1.4\n")
conf_out.write("IonTolerance,0.5\n")
conf_out.write("MultiCharge,3\n")
else:
conf_out.write("ParentPPM,10\n")
conf_out.write("IonTolerance,0.8\n")
conf_out.close()
# run Inspect: match spectra against database
if re.search( "Ecoli_12", spectra[i] ):
AA_file = inspect_dir + "AminoAcidMasses_15N.txt"
if os.path.exists(AA_file):
run_inspect(conf, inspect_out, inspect_dir, "-a "+AA_file)
print "amino acid masses according to 15N (because of special e.coli data set)."
sys.stdout.flush()
else:
run_inspect(conf, inspect_out, inspect_dir)
print "WARNING: file containing amino acid masses according to 15N not found!\nDatabase search using usual file disregarding special e.coli data set)."
sys.stdout.flush()
else:
run_inspect(conf, inspect_out, inspect_dir)
# # evaluate results from Inspect to calculate an FDR-matrix
# simMat[i][j] = parseInspect(inspect_out)[2]
for line in simMat:
print line
if __name__=="__main__":
usage = """%prog SPECTRA DB_LIST -s SPEC_DIR -d DB_DIR
run InsPecT (for multiple spectrum datasets and references),
using known modification options (assigned by filename),
and calculate FDR-corrected identification counts from InsPecT output.
SPECTRA: ','-separated spectrum-filenames (mgf-format) without file extension
DB_LIST: ','-separated proteome-filenames (fasta-format) without file extension
Using the easy mode (--easy) to have a quick understand of this function.
"""
# configure the parser
optparser = optparse.OptionParser(usage=usage)
optparser.add_option('-s', '--specdir', type='string', dest='spec_dir', default="/data/NG4/anke/spectra/", help='directory of specFiles (absolute path!). [default: %default]')
optparser.add_option('-d', '--dbdir', type='string', dest='db_dir', default="/data/NG4/anke/proteome/", help='directory of proteinDBs (absolute path!). [default: %default]')
optparser.add_option('-c', '--configfile', type='string', dest='config', default="/data/NG4/anke/Inspect/config_Inspect_py.txt", help='a txt-file for Inspect configuration, will be written. [default: %default]')
optparser.add_option('-m', '--mods', type='string', dest='mods', default="", help='a string containing all modifications in question, modification choice by filename if "". [default: %default]')
optparser.add_option('-i', '--inspect_dir', type='string', dest='ins_dir', default="/home/franziska/bin/Inspect", help='directory of Inspect.exe. [default: %default]')
optparser.add_option('-e', '--easy', type='string', dest='easy', default=None, help='For the beginner, setting the value to True to run this script. Use true or True to activate.')
# parse options and arguments
options, args = optparser.parse_args()
if not options.easy:
if len(args) == 2:
spectra = args[0].split(',')
db_list = args[1].split(',')
else:
optparser.print_help()
sys.exit(1)
'''db_path = options.db_dir
spec_path = options.spec_dir
configfile = options.config # Inspect configuration file
mods = options.mods
inspect_dir = options.ins_dir'''
runInspect_config(spectra=spectra, DBs=db_list, spec_path=options.spec_dir, db_path=options.db_dir, inspect_dir=options.ins_dir, conf=options.config, user_mods=options.mods)
# Easy mode
if (options.easy).lower() == 'true':
runInspect_config(
spectra=['example'],
DBs=['species1', 'species2'],
spec_path='../data/spectra/',
db_path='../data/reference/',
inspect_dir='./inspect/',
conf='./config_files/config_Inspect_py.txt',
user_mods='')
|
nilq/baby-python
|
python
|
from django.db import models
# Create your models here.
class TipoElectrodomestico(models.Model):
nombre = models.CharField(max_length=200)
foto = models.ImageField(null =True, blank=True)
def __str__(self):
#Identificar un objeto
return self.nombre
def numProductos(self):
pass
class Producto(models.Model):
nombre = models.CharField(max_length=200)
tipo = models.ForeignKey(TipoElectrodomestico, on_delete=models.CASCADE)
precio = models.IntegerField()
descripcion = models.TextField()
foto = models.ImageField(blank = True, null=True)
calificacion = models.FloatField(default=0)
marca = models.CharField(max_length=20, default="")
ref = models.CharField(max_length=100, default="")
@property #=> convierte un método en un atributo
def tipoEl(self):
#infoTipo = {"nombre": "Televisores", "id":2, "foto":None}
from Productos.serializers import TipoSerial
return TipoSerial(self.tipo).data
def __str__(self):
return self.nombre
@property
def calcularCalificacion(self):
comentarios = self.comentario_set.all()
calificacion = 0
for comentario in comentarios:
calificacion += comentario.calificacion
return calificacion/len(comentarios)
class Comentario(models.Model):
usuario = models.CharField(max_length=100)
producto = models.ForeignKey(Producto, on_delete=models.CASCADE)
calificacion = models.FloatField()
fecha = models.DateField(auto_now_add=True) #16/09/2021
#DateTimeField() 16/09/2021 - 3:13:40 p.m.
#TimeField()
contenido = models.TextField()
def __str__(self):
return self.usuario + " - " + self.producto.nombre
|
nilq/baby-python
|
python
|
# Copyright 2021 Lucas Fidon and Suprosanna Shit
"""
Data loader for a single case.
This is typically used for inference.
"""
import torch
from monai.data import Dataset, DataLoader
def single_case_dataloader(inference_transform, input_path_dict):
"""
:param inference_transform
:param input_path_dict: dict; keys=image_keys, values=paths
:return:
"""
data_dicts = [input_path_dict]
ds = Dataset(
data=data_dicts,
transform=inference_transform,
)
loader = DataLoader(
ds,
batch_size=1, # image-level batch to the sliding window method, not the window-level batch
num_workers=0, # you can set it to a value higher than 0 to activate parallel preprocessing; for me it leads to an error...
pin_memory=torch.cuda.is_available(),
)
return loader
|
nilq/baby-python
|
python
|
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
import threading
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
class WriteToCallback(beam.PTransform):
def __init__(self, callback, lock):
self._callback = callback
self._lock = lock
def expand(self, pcoll):
return pcoll | beam.io.iobase.Write(_CallbackSink(self._callback, self._lock))
class _CallbackSink(beam.io.iobase.Sink):
def __init__(self, callback, lock):
self._callback = callback
self._lock = lock
def initialize_write(self):
pass
def open_writer(self, init_result, uid):
return _CallbackWriter(self._callback, self._lock)
def finalize_write(self, init_result, writer_results):
pass
class _CallbackWriter(beam.io.iobase.Writer):
def __init__(self, callback, lock):
self._callback = callback
self._lock = lock
self._working_data = []
def write(self, record):
self._working_data.append(record)
def close(self):
with self._lock:
self._callback(self._working_data)
def make_dump_to_list(visible_list):
def dump(internal_list):
logging.info("Dumping %s" % internal_list)
visible_list.extend(internal_list)
return dump
input = [1, 2, 3]
visible_list = []
lock = threading.Lock()
p = beam.Pipeline(options=PipelineOptions())
data = p | 'CreateInput' >> beam.Create(input)
data | 'DumpToList' >> WriteToCallback(
make_dump_to_list(visible_list), lock)
result = p.run()
result.wait_until_finish()
logging.info("Pipeline finished.")
logging.info("Input: %s", input)
with lock:
logging.info("Visible output: %s", visible_list)
assert input == visible_list
|
nilq/baby-python
|
python
|
from sys import path
path.append('/home/joerojas/Desarrollo/Curso-Basico-Python/101_misModulos/modules')
import modulo2
zeroes = [0 for i in range(5)]
ones = [1 for i in range(5)]
print(modulo2.suma(zeroes))
print(modulo2.producto(ones))
|
nilq/baby-python
|
python
|
'''
Classes from the 'LinkPresentation' framework.
'''
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
LPMultipleMetadataPresentationTransformer = _Class('LPMultipleMetadataPresentationTransformer')
LPLinkHTMLTextGenerator = _Class('LPLinkHTMLTextGenerator')
LPLinkMetadataStoreTransformer = _Class('LPLinkMetadataStoreTransformer')
LPTestingOverrides = _Class('LPTestingOverrides')
LPResources = _Class('LPResources')
LPAnimatedImageTranscoder = _Class('LPAnimatedImageTranscoder')
LPLinkMetadataObserver = _Class('LPLinkMetadataObserver')
LPPresentationSpecializations = _Class('LPPresentationSpecializations')
LPYouTubeURLComponents = _Class('LPYouTubeURLComponents')
LPStatistics = _Class('LPStatistics')
LPLinkMetadataPresentationTransformer = _Class('LPLinkMetadataPresentationTransformer')
LPYouTubePlayerScriptMessageHandler = _Class('LPYouTubePlayerScriptMessageHandler')
LPiTunesMediaURLComponents = _Class('LPiTunesMediaURLComponents')
LPAudio = _Class('LPAudio')
LPAudioProperties = _Class('LPAudioProperties')
LPLinkViewComponents = _Class('LPLinkViewComponents')
LPMessagesPayload = _Class('LPMessagesPayload')
RichLinkAttachmentSubstituter = _Class('RichLinkAttachmentSubstituter')
LPTheme = _Class('LPTheme')
LPThemeParametersObserver = _Class('LPThemeParametersObserver')
LPTapToLoadViewStyle = _Class('LPTapToLoadViewStyle')
LPCaptionBarStyle = _Class('LPCaptionBarStyle')
LPMusicPlayButtonStyle = _Class('LPMusicPlayButtonStyle')
LPVideoViewStyle = _Class('LPVideoViewStyle')
LPVideoPlayButtonStyle = _Class('LPVideoPlayButtonStyle')
LPGlyphStyle = _Class('LPGlyphStyle')
LPImageViewStyle = _Class('LPImageViewStyle')
LPButtonStyle = _Class('LPButtonStyle')
LPShadowStyle = _Class('LPShadowStyle')
LPCaptionBarAccessoryStyle = _Class('LPCaptionBarAccessoryStyle')
LPVerticalTextStackViewStyle = _Class('LPVerticalTextStackViewStyle')
LPTextRowStyle = _Class('LPTextRowStyle')
LPTextViewStyle = _Class('LPTextViewStyle')
LPPadding = _Class('LPPadding')
LPSize = _Class('LPSize')
LPPointUnit = _Class('LPPointUnit')
LPStreamingAudioPlayer = _Class('LPStreamingAudioPlayer')
LPVideo = _Class('LPVideo')
LPVideoAttachmentSubstitute = _Class('LPVideoAttachmentSubstitute')
LPVideoProperties = _Class('LPVideoProperties')
LPEventTimeline = _Class('LPEventTimeline')
LPEvent = _Class('LPEvent')
LPiTunesMediaStorefrontMappings = _Class('LPiTunesMediaStorefrontMappings')
LPAppLinkPresentationProperties = _Class('LPAppLinkPresentationProperties')
LPMetadataProviderSpecializationContext = _Class('LPMetadataProviderSpecializationContext')
LPWebLinkPresentationProperties = _Class('LPWebLinkPresentationProperties')
LPCardHeadingPresentationProperties = _Class('LPCardHeadingPresentationProperties')
LPFullScreenVideoController = _Class('LPFullScreenVideoController')
LPMetadataProvider = _Class('LPMetadataProvider')
LPMIMETypeRegistry = _Class('LPMIMETypeRegistry')
LPiTunesMediaMovieBundleUnresolvedMetadata = _Class('LPiTunesMediaMovieBundleUnresolvedMetadata')
LPiTunesMediaMovieUnresolvedMetadata = _Class('LPiTunesMediaMovieUnresolvedMetadata')
LPiTunesMediaTVShowUnresolvedMetadata = _Class('LPiTunesMediaTVShowUnresolvedMetadata')
LPiTunesMediaTVSeasonUnresolvedMetadata = _Class('LPiTunesMediaTVSeasonUnresolvedMetadata')
LPiTunesMediaTVEpisodeUnresolvedMetadata = _Class('LPiTunesMediaTVEpisodeUnresolvedMetadata')
LPiTunesMediaPodcastUnresolvedMetadata = _Class('LPiTunesMediaPodcastUnresolvedMetadata')
LPiTunesMediaPodcastEpisodeUnresolvedMetadata = _Class('LPiTunesMediaPodcastEpisodeUnresolvedMetadata')
LPiTunesMediaAudioBookUnresolvedMetadata = _Class('LPiTunesMediaAudioBookUnresolvedMetadata')
LPiTunesMediaBookUnresolvedMetadata = _Class('LPiTunesMediaBookUnresolvedMetadata')
LPiTunesMediaSoftwareUnresolvedMetadata = _Class('LPiTunesMediaSoftwareUnresolvedMetadata')
LPiTunesMediaRadioUnresolvedMetadata = _Class('LPiTunesMediaRadioUnresolvedMetadata')
LPiTunesMediaPlaylistUnresolvedMetadata = _Class('LPiTunesMediaPlaylistUnresolvedMetadata')
LPiTunesMediaArtistUnresolvedMetadata = _Class('LPiTunesMediaArtistUnresolvedMetadata')
LPiTunesMediaMusicVideoUnresolvedMetadata = _Class('LPiTunesMediaMusicVideoUnresolvedMetadata')
LPiTunesMediaAlbumUnresolvedMetadata = _Class('LPiTunesMediaAlbumUnresolvedMetadata')
LPiTunesMediaSongUnresolvedMetadata = _Class('LPiTunesMediaSongUnresolvedMetadata')
LPiTunesMediaAsset = _Class('LPiTunesMediaAsset')
LPMediaPlaybackManager = _Class('LPMediaPlaybackManager')
LPiTunesMediaOffer = _Class('LPiTunesMediaOffer')
LPiTunesMediaLookupItemArtwork = _Class('LPiTunesMediaLookupItemArtwork')
LPiTunesStoreInformation = _Class('LPiTunesStoreInformation')
LPSettings = _Class('LPSettings')
LPSharingMetadataWrapper = _Class('LPSharingMetadataWrapper')
LPImagePresentationProperties = _Class('LPImagePresentationProperties')
LPCaptionBarPresentationProperties = _Class('LPCaptionBarPresentationProperties')
LPCaptionRowPresentationProperties = _Class('LPCaptionRowPresentationProperties')
LPCaptionPresentationProperties = _Class('LPCaptionPresentationProperties')
LPCaptionButtonPresentationProperties = _Class('LPCaptionButtonPresentationProperties')
LPVideoViewConfiguration = _Class('LPVideoViewConfiguration')
LPApplicationCompatibilityQuirks = _Class('LPApplicationCompatibilityQuirks')
LPURLSuffixChecker = _Class('LPURLSuffixChecker')
LPFetcherGroup = _Class('LPFetcherGroup')
LPFetcherGroupTask = _Class('LPFetcherGroupTask')
LPFetcherConfiguration = _Class('LPFetcherConfiguration')
LPLinkMetadataStatusTransformer = _Class('LPLinkMetadataStatusTransformer')
LPAssociatedApplicationMetadata = _Class('LPAssociatedApplicationMetadata')
LPSpecializationMetadata = _Class('LPSpecializationMetadata')
LPSummarizedLinkMetadata = _Class('LPSummarizedLinkMetadata')
LPAppStoreStoryMetadata = _Class('LPAppStoreStoryMetadata')
LPWalletPassMetadata = _Class('LPWalletPassMetadata')
LPBusinessChatMetadata = _Class('LPBusinessChatMetadata')
LPSharingStatusMetadata = _Class('LPSharingStatusMetadata')
LPApplePhotosStatusMetadata = _Class('LPApplePhotosStatusMetadata')
LPApplePhotosMomentMetadata = _Class('LPApplePhotosMomentMetadata')
LPAppleTVMetadata = _Class('LPAppleTVMetadata')
LPAppleNewsMetadata = _Class('LPAppleNewsMetadata')
LPFileMetadata = _Class('LPFileMetadata')
LPMapCollectionPublisherMetadata = _Class('LPMapCollectionPublisherMetadata')
LPMapCollectionMetadata = _Class('LPMapCollectionMetadata')
LPMapMetadata = _Class('LPMapMetadata')
LPiCloudFamilyInvitationMetadata = _Class('LPiCloudFamilyInvitationMetadata')
LPGameCenterInvitationMetadata = _Class('LPGameCenterInvitationMetadata')
LPiCloudSharingMetadata = _Class('LPiCloudSharingMetadata')
LPiTunesMediaMovieBundleMetadata = _Class('LPiTunesMediaMovieBundleMetadata')
LPiTunesMediaMovieMetadata = _Class('LPiTunesMediaMovieMetadata')
LPAppleMusicTVShowMetadata = _Class('LPAppleMusicTVShowMetadata')
LPiTunesMediaTVSeasonMetadata = _Class('LPiTunesMediaTVSeasonMetadata')
LPiTunesMediaTVEpisodeMetadata = _Class('LPiTunesMediaTVEpisodeMetadata')
LPiTunesMediaPodcastMetadata = _Class('LPiTunesMediaPodcastMetadata')
LPiTunesMediaPodcastEpisodeMetadata = _Class('LPiTunesMediaPodcastEpisodeMetadata')
LPiTunesMediaAudioBookMetadata = _Class('LPiTunesMediaAudioBookMetadata')
LPiTunesMediaBookMetadata = _Class('LPiTunesMediaBookMetadata')
LPiTunesMediaSoftwareMetadata = _Class('LPiTunesMediaSoftwareMetadata')
LPiTunesMediaRadioMetadata = _Class('LPiTunesMediaRadioMetadata')
LPiTunesMediaPlaylistMetadata = _Class('LPiTunesMediaPlaylistMetadata')
LPiTunesUserProfileMetadata = _Class('LPiTunesUserProfileMetadata')
LPiTunesMediaArtistMetadata = _Class('LPiTunesMediaArtistMetadata')
LPiTunesMediaMusicVideoMetadata = _Class('LPiTunesMediaMusicVideoMetadata')
LPiTunesMediaAlbumMetadata = _Class('LPiTunesMediaAlbumMetadata')
LPiTunesMediaSongMetadata = _Class('LPiTunesMediaSongMetadata')
LPAudioMetadata = _Class('LPAudioMetadata')
LPVideoMetadata = _Class('LPVideoMetadata')
LPArtworkMetadata = _Class('LPArtworkMetadata')
LPImageMetadata = _Class('LPImageMetadata')
LPIconMetadata = _Class('LPIconMetadata')
LPLinkMetadata = _Class('LPLinkMetadata')
LPPlaceholderLinkMetadata = _Class('LPPlaceholderLinkMetadata')
LPLinkHTMLGenerator = _Class('LPLinkHTMLGenerator')
LPApplicationIdentification = _Class('LPApplicationIdentification')
LPImageRemoteURLRepresentation = _Class('LPImageRemoteURLRepresentation')
LPImage = _Class('LPImage')
LPImageAttachmentSubstitute = _Class('LPImageAttachmentSubstitute')
LPImageProperties = _Class('LPImageProperties')
LPMetadataProviderSpecialization = _Class('LPMetadataProviderSpecialization')
LPAppleMapsMetadataProviderSpecialization = _Class('LPAppleMapsMetadataProviderSpecialization')
LPFileMetadataProviderSpecialization = _Class('LPFileMetadataProviderSpecialization')
LPiCloudSharingMetadataProviderSpecialization = _Class('LPiCloudSharingMetadataProviderSpecialization')
LPAppStoreStoryMetadataProviderSpecialization = _Class('LPAppStoreStoryMetadataProviderSpecialization')
LPAppleTVMetadataProviderSpecialization = _Class('LPAppleTVMetadataProviderSpecialization')
LPApplePhotosMetadataProviderSpecialization = _Class('LPApplePhotosMetadataProviderSpecialization')
LPiTunesMediaMetadataProviderSpecialization = _Class('LPiTunesMediaMetadataProviderSpecialization')
LPAppleNewsMetadataProviderSpecialization = _Class('LPAppleNewsMetadataProviderSpecialization')
LPRedditMetadataProviderSpecialization = _Class('LPRedditMetadataProviderSpecialization')
LPStreamingMediaMetadataProviderSpecialization = _Class('LPStreamingMediaMetadataProviderSpecialization')
LPInlineMediaPlaybackInformation = _Class('LPInlineMediaPlaybackInformation')
LPLinkMetadataPreviewTransformer = _Class('LPLinkMetadataPreviewTransformer')
LPFetcherResponse = _Class('LPFetcherResponse')
LPFetcherClipMetadataResponse = _Class('LPFetcherClipMetadataResponse')
LPFetcherErrorResponse = _Class('LPFetcherErrorResponse')
LPFetcherURLResponse = _Class('LPFetcherURLResponse')
LPFetcherStringResponse = _Class('LPFetcherStringResponse')
LPFetcherJSONResponse = _Class('LPFetcherJSONResponse')
LPFetcherImageResponse = _Class('LPFetcherImageResponse')
LPFetcherAccessibilityEnabledImageResponse = _Class('LPFetcherAccessibilityEnabledImageResponse')
LPFetcherAudioResponse = _Class('LPFetcherAudioResponse')
LPFetcherAccessibilityEnabledAudioResponse = _Class('LPFetcherAccessibilityEnabledAudioResponse')
LPFetcherVideoResponse = _Class('LPFetcherVideoResponse')
LPFetcherAccessibilityEnabledVideoResponse = _Class('LPFetcherAccessibilityEnabledVideoResponse')
LPFetcher = _Class('LPFetcher')
LPAssociatedApplicationMetadataFetcher = _Class('LPAssociatedApplicationMetadataFetcher')
LPURLFetcher = _Class('LPURLFetcher')
LPMediaAssetFetcher = _Class('LPMediaAssetFetcher')
LPCSSResolver = _Class('LPCSSResolver')
LPCSSVariable = _Class('LPCSSVariable')
LPHTMLComponent = _Class('LPHTMLComponent')
LPEmailCompatibleHTMLCaptionBarRowComponent = _Class('LPEmailCompatibleHTMLCaptionBarRowComponent')
LPHTMLTextComponent = _Class('LPHTMLTextComponent')
LPEmailCompatibleHTMLVerticalTextStackComponent = _Class('LPEmailCompatibleHTMLVerticalTextStackComponent')
LPEmailCompatibleHTMLQuoteComponent = _Class('LPEmailCompatibleHTMLQuoteComponent')
LPHTMLImageComponent = _Class('LPHTMLImageComponent')
LPHTMLTapToLoadComponent = _Class('LPHTMLTapToLoadComponent')
LPEmailCompatibleHTMLCaptionBarItemComponent = _Class('LPEmailCompatibleHTMLCaptionBarItemComponent')
LPEmailCompatibleHTMLTextComponent = _Class('LPEmailCompatibleHTMLTextComponent')
LPEmailCompatibleHTMLLinkComponent = _Class('LPEmailCompatibleHTMLLinkComponent')
LPHTMLVideoComponent = _Class('LPHTMLVideoComponent')
LPHTMLLinkComponent = _Class('LPHTMLLinkComponent')
LPHTMLImageContainerComponent = _Class('LPHTMLImageContainerComponent')
LPEmailCompatibleHTMLTableComponent = _Class('LPEmailCompatibleHTMLTableComponent')
LPHTMLCaptionBarAccessoryComponent = _Class('LPHTMLCaptionBarAccessoryComponent')
LPHTMLMultipleImageComponent = _Class('LPHTMLMultipleImageComponent')
LPHTMLIconComponent = _Class('LPHTMLIconComponent')
LPHTMLHorizontalCaptionPairComponent = _Class('LPHTMLHorizontalCaptionPairComponent')
LPHTMLCaptionBarComponent = _Class('LPHTMLCaptionBarComponent')
LPHTMLGlyphComponent = _Class('LPHTMLGlyphComponent')
LPEmailCompatibleHTMLInnerLinkComponent = _Class('LPEmailCompatibleHTMLInnerLinkComponent')
LPHTMLVerticalTextStackComponent = _Class('LPHTMLVerticalTextStackComponent')
LPEmailCompatibleHTMLImageComponent = _Class('LPEmailCompatibleHTMLImageComponent')
LPEmailCompatibleHTMLIconComponent = _Class('LPEmailCompatibleHTMLIconComponent')
LPHTMLQuoteComponent = _Class('LPHTMLQuoteComponent')
LPEmailCompatibleHTMLCaptionBarComponent = _Class('LPEmailCompatibleHTMLCaptionBarComponent')
LPActionDisablingCALayerDelegate = _Class('LPActionDisablingCALayerDelegate')
LPiTunesMediaLookupTask = _Class('LPiTunesMediaLookupTask')
LPHighlightGestureRecognizer = _Class('LPHighlightGestureRecognizer')
LPYouTubePlayerView = _Class('LPYouTubePlayerView')
LPLinkView = _Class('LPLinkView')
LPPlayButtonShapeView = _Class('LPPlayButtonShapeView')
LPAnimationMaskView = _Class('LPAnimationMaskView')
LPFlippedView = _Class('LPFlippedView')
LPComponentView = _Class('LPComponentView')
LPTapToLoadView = _Class('LPTapToLoadView')
LPCaptionBarButtonView = _Class('LPCaptionBarButtonView')
LPDomainNameIndicator = _Class('LPDomainNameIndicator')
LPCaptionBarView = _Class('LPCaptionBarView')
LPPlayButtonView = _Class('LPPlayButtonView')
LPCaptionBarAccessoryView = _Class('LPCaptionBarAccessoryView')
LPTextView = _Class('LPTextView')
LPImageView = _Class('LPImageView')
LPImageStackView = _Class('LPImageStackView')
LPVerticalTextStackView = _Class('LPVerticalTextStackView')
LPIndeterminateProgressSpinnerView = _Class('LPIndeterminateProgressSpinnerView')
LPMultipleImageView = _Class('LPMultipleImageView')
LPHorizontalCaptionPairView = _Class('LPHorizontalCaptionPairView')
LPVideoView = _Class('LPVideoView')
LPYouTubeVideoView = _Class('LPYouTubeVideoView')
LPStreamingVideoView = _Class('LPStreamingVideoView')
LPPlayButtonControl = _Class('LPPlayButtonControl')
LPPlaceholderPlayButtonControl = _Class('LPPlaceholderPlayButtonControl')
LPStreamingAudioPlayButtonControl = _Class('LPStreamingAudioPlayButtonControl')
LPiTunesPlayButtonControl = _Class('LPiTunesPlayButtonControl')
LPAVPlayerViewController = _Class('LPAVPlayerViewController')
|
nilq/baby-python
|
python
|
### Worker threads and payloads for sending web requests
from enum import Enum
from threading import Thread
import requests
from requests.adapters import HTTPAdapter
from bs4 import BeautifulSoup as bs
from urllib.parse import urljoin
# The Worker class is reused for both stages of execution, so it needs to be able to differentiate itself.
class WorkerType(Enum):
CSS_SCRAPER = 1
CSS_GETTER = 2
class HTTPWorker(Thread):
"""Worker thread to send GET requests to all websites, gathering their styling"""
def __init__(self, request_queue, type):
Thread.__init__(self)
# Request queue to store links
self.queue = request_queue
# Different worker types have different payloads to execute on their data
self.type = type
if type == WorkerType.CSS_SCRAPER:
self.payload = scrape_style
elif type == WorkerType.CSS_GETTER:
self.payload = scrape_urls
else:
# If somehow something else is passed in, throw an error
raise TypeError("Invalid Worker Type")
self.results = {}
# Initialising HTTP session to get websites, with a real-looking useragent and 5 maximum retries to keep it quick
self.session = requests.Session()
self.session.mount("https://", HTTPAdapter(max_retries=3))
self.session.mount("https://", HTTPAdapter(max_retries=3))
self.session.headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36 Edg/100.0.1185.50"
def run(self):
# Each worker thread remains alive until there are no links left
while not self.queue.empty():
content = self.queue.get()
try:
# Slightly different handling is required for different types due to datatypes
# Could probably be fixed
if self.type == WorkerType.CSS_SCRAPER:
response = self.payload(self.session,content)
self.results[content] = response
elif self.type == WorkerType.CSS_GETTER:
response = self.payload(self.session, content)
self.results[content[0]] = response
except:
continue
finally:
# Mark the website as complete even if error occurs so other threads to not try to get it repeatedly
self.queue.task_done()
def join(self,timeout=None):
# Gracefull exit thread by closing http session, would probably be done automatically anyway
self.session.close()
Thread.join(self, timeout)
### PAYLOADS FUNCTIONS ###
def scrape_style(session, url):
# Returns tuple of external css links and code from <style> tags
print("Getting "+url)
html = session.get(url,timeout=3).text
# Using beautifulsoup to parse html and extra style/link tags
soup = bs(html,'html.parser')
css_files = []
css_tags = []
# Grab all in-html styling
#!TODO Implement some inline styling, not sure if many websites use this
for css in soup.find_all("style"):
css_tags.append(css.text)
for css in soup.find_all("link"):
if css.attrs.get("href"):
css_url=urljoin(url, css.attrs.get("href"))
if "css" in css_url.lower():
css_files.append(css_url)
# Return both results in a tuple, the function that receives the results can deal with that
return (css_files,css_tags)
def scrape_urls(session,data):
# Simply request all external stylesheets and add contents to one long string for processing later
print("Getting external styles for "+data[0])
res = ""
for url in data[1]:
res += session.get(url, timeout=3).text + "\n"
return res
|
nilq/baby-python
|
python
|
import os
from datetime import datetime
from flask import Flask, request, flash, url_for, redirect, \
render_template, abort, send_from_directory
from doctor_api.app import doctor_api_bp
from patient_api.app import patient_api_bp
app = Flask(__name__)
app.config.from_pyfile('flaskapp.cfg')
app.register_blueprint(doctor_api_bp, url_prefix='/doctor-api')
app.register_blueprint(patient_api_bp, url_prefix='/patient-api')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/<path:resource>')
def serveStaticResource(resource):
return send_from_directory('static/', resource)
@app.route("/test")
def test():
return "<strong>It's Alive!</strong>"
if __name__ == '__main__':
app.run(app.config['IP'], app.config['PORT'], debug=True)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# trump-net (c) Ian Dennis Miller
from flask_security import ConfirmRegisterForm
from flask_wtf.recaptcha import RecaptchaField
class ExtendedRegisterForm(ConfirmRegisterForm):
recaptcha = RecaptchaField()
def validate(self):
rv = ConfirmRegisterForm.validate(self)
if not rv:
return False
return True
|
nilq/baby-python
|
python
|
from crestdsl.model import * # bad practice, but used for the evaluation of commands
from .simulator import Simulator
import logging
logger = logging.getLogger(__name__)
import io
try:
import colored
from colored import stylize
color_enabled = True
except ImportError:
color_enabled = False
except io.UnsupportedOperation:
color_enabled = False
logger.error("There is an error in the 'colored' package. They use 'fileno'. I guess we have to wait for a fix.")
import random
import sys
class InteractiveSimulator(Simulator):
"""
This is a simulator will stop every time two transitions are enabled
in the same entity at the same time and prompt the user for what to do.
Next to choosing a transition,uUsers can perform various actions
(e.g. inspect variables, plot the system or stop the simulation.
"""
def select_transition_to_trigger(self, entity):
""" Override the (random) transition selection procedure. This one asks the user for input."""
transitions_from_current_state = [t for t in get_transitions(entity) if t.source is entity.current]
enabled_transitions = [t for t in transitions_from_current_state if self._get_transition_guard_value(t)]
if len(enabled_transitions) == 1:
return enabled_transitions[0]
elif len(enabled_transitions) > 1:
if color_enabled:
return self.prompt_transition_selection(entity, enabled_transitions)
else:
return self.prompt_transition_selection_no_colored(entity, enabled_transitions)
else:
return None
def prompt_transition_selection_no_colored(self, entity, enabled_transitions):
pad = 1 if len(enabled_transitions) <= 10 else 2
transitions_texts = [idx.rjust(pad) + f" ... {trans._name} (transition to '{trans.target._name}')" for idx, trans in enumerate(enabled_transitions)]
transitions_list = "\n".join(transitions_texts)
longtext = f"""
Non-Determinism detected
There are multiple enabled transitions in entity: {str(entity)}
(Current time: {self.global_time} -- Current automaton state: {entity.current._name})
Choose one of the following transitions by entering the according number:
{transitions_list}
Other commands:
r ... choose a transition randomly
p ... plot the system
pe ... plot the entity in which non-determinism occurs
q! ... to exit the script (not recommended in Jupyter mode)
Any other input will be interpreted.
This means you can use it to e.g. inspect ports values.
The entity {str(entity)} is bound to the variable 'entity'.
Example: entity.my_port.value will print the value of port my_port.
"""
print(longtext)
while True:
prompt = "Your choice: "
userinput = input(prompt).strip() # read input
if userinput == "p":
self.plot()
elif userinput == "pe":
self.plot(entity=entity)
elif userinput == "r":
return random.choice(enabled_transitions)
elif userinput == "q!":
sys.exit()
elif userinput in [str(idx) for idx in range(len(enabled_transitions))]:
choice = int(userinput)
return enabled_transitions[choice] # <<<<< This is the exit of the function, otherwise we're trapped !!
else:
try:
print(eval(userinput))
except:
text = f"I don't understand the input: " + \
userinput + \
f" (Please try again!)"
print(text)
def prompt_transition_selection(self, entity, enabled_transitions):
pad = 1 if len(enabled_transitions) <= 10 else 2
transitions_texts = [stylize(idx, colored.attr("bold")).rjust(pad) + f" ... {trans._name} (transition to '{trans.target._name}')" for idx, trans in enumerate(enabled_transitions)]
transitions_list = "\n".join(transitions_texts)
longtext = f"""
{stylize(' Non-Determinism detected ', colored.fg('black') + colored.bg('dark_orange') + colored.attr('bold'))}
There are multiple enabled transitions in entity: {stylize(' '+str(entity)+' ', colored.fg('black') + colored.bg('yellow_1') + colored.attr('bold'))}
(Current time: {stylize(self.global_time, colored.attr("bold"))} -- Current automaton state: {stylize(entity.current._name, colored.attr("bold"))})
{stylize('Choose one of the following transitions by entering the according number:', colored.attr('underlined'))}
{transitions_list}
{stylize('Other commands:', colored.attr('underlined'))}
{stylize('r', colored.attr("bold"))} ... choose a transition randomly
{stylize('p', colored.attr("bold"))} ... plot the system
{stylize('pe', colored.attr("bold"))} ... plot the entity in which non-determinism occurs
{stylize('q!', colored.attr("bold"))} ... to exit the script (not recommended in Jupyter mode)
{stylize('Any other input will be interpreted.', colored.attr('underlined'))}
This means you can use it to e.g. inspect ports values.
The entity {stylize(str(entity), colored.attr('bold'))} is bound to the variable {stylize('entity', colored.attr('bold'))}.
{stylize('Example:', colored.attr('underlined'))} entity.my_port.value will print the value of port my_port.
"""
print(longtext)
while True:
prompt = "Your choice: "
userinput = input(prompt).strip() # read input
if userinput == "p":
self.plot()
elif userinput == "pe":
self.plot(entity=entity)
elif userinput == "r":
return random.choice(enabled_transitions)
elif userinput == "q!":
sys.exit()
elif userinput in [str(idx) for idx in range(len(enabled_transitions))]:
choice = int(userinput)
return enabled_transitions[choice] # <<<<< This is the exit of the function, otherwise we're trapped !!
else:
try:
print(eval(userinput))
except:
text = stylize(f"I don't understand the input: ", colored.fg("red") + colored.attr("bold")) + \
userinput + \
stylize(f" (Please try again!)", colored.fg("red") + colored.attr("bold"))
print(text)
|
nilq/baby-python
|
python
|
#
# Constant Price Market Making Simulator
#
# simulate different liquidity provision and trading strategies
#
from typing import Tuple
import csv
import numpy as np
import pandas as pd
from numpy.random import binomial, default_rng
# TODO: switch to decimal type and control quantization. numeric errors will kill us quickly
class CPMM(object):
def __init__(self, fee_fraction = 0, fee_to_liquidity_fraction = 0) -> None:
# assert(fee_fraction >= fee_to_liquidity_fraction)
# amount of initial liquidity provided
self.initial_liquidity = 0
# total amount of liquidity
self.liquidity = 0
# total amount of collateral token
self.lp_token = 0
# yes tokens in the pool
self.lp_yes = 0
# no tokens in the pool
self.lp_no = 0
# outstanding tokens held by LP
self.outstanding_yes = 0
self.outstanding_no = 0
self.fee_pool = 0
self.history = []
self.fee_fraction = fee_fraction
self.fee_to_liquidity_fraction = fee_to_liquidity_fraction # how much from the fee is reinvested to liqudity provision
def create_event(self, intial_liquidity, initial_yes_to_no = 1) -> Tuple[int, float]:
assert(initial_yes_to_no > 0)
self.initial_liquidity = intial_liquidity
rv = self._add_liquidity(intial_liquidity, initial_yes_to_no)
n_p = self.lp_yes / self.lp_no
# print(f"invariant P {initial_yes_to_no} {n_p}")
assert(abs(initial_yes_to_no - n_p) < 0.000001)
return rv
def add_liquidity(self, amount) -> Tuple[int, float]:
assert(self.lp_token > 0)
# yes to no must be invariant when liquidity is added
p = self.lp_yes / self.lp_no
rv = self._add_liquidity(amount, p)
n_p = self.lp_yes / self.lp_no
# assert invariant, we use float and disregard rounding so must be within e ~ 0
# print(f"invariant P {p} {n_p}")
assert(abs(p - n_p) < 0.000001)
return rv
def _add_liquidity(self, amount, yes_to_no) -> Tuple[int, float]:
# print("adding liquidity:", amount)
self.liquidity += amount
self.lp_token += amount
# get token type from the ratio
type = 1 if yes_to_no >= 1 else 0
if type:
# more into YES bucket, NO is returned
old_lp_no = self.lp_no
self.lp_no = (amount + self.lp_yes) / yes_to_no
self.lp_yes += amount
tokens_return = amount + old_lp_no - self.lp_no
self.outstanding_no += tokens_return
else:
# more into NO bucket, YES is returned
old_lp_yes = self.lp_yes
self.lp_yes = (amount + self.lp_no) * yes_to_no
self.lp_no += amount
tokens_return = amount + old_lp_yes - self.lp_yes
self.outstanding_yes += tokens_return
entry = ["add", "liquidity", amount, 0, yes_to_no, 0, tokens_return, self.lp_yes, self.lp_no, self.lp_token, self.liquidity, self.fee_pool, 0 ,0]
self._add_history(entry)
# should return amount of outcome token
return (type, amount)
# def remove_liquidity(amount):
def buy_token(self, type, original_amount) -> Tuple[int, float]: #yes=1 | no = 0
# take fee before any operation and store in fee_pool
fee = original_amount * self.fee_fraction
amount = original_amount - fee
self.fee_pool += fee
# adding fee_to_liquidity fraction to liquidity fee pool
# note: liquidity is provided before buy such that added liquidity is available for current transaction
if (self.fee_to_liquidity_fraction > 0):
reinvest_fee = fee * self.fee_to_liquidity_fraction
self.add_liquidity(reinvest_fee)
# keep invariant
k = (self.lp_yes * self.lp_no)
# add liquidity
self.lp_token += amount
if type:
tokens_return, x = self.calc_buy(type, amount)
buy_price_yes = amount / tokens_return
# calc slippage
slippage_yes = self.calc_slippage(type, amount)
assert (slippage_yes > 0), f"slippage_yes {slippage_yes} <= 0"
# remove returned token form the pool, keep all no tokens
self.lp_yes += x
self.lp_no += amount
entry = ["buy", "yes", original_amount, fee, buy_price_yes, slippage_yes, tokens_return, self.lp_yes, self.lp_no, self.lp_token, self.liquidity, self.fee_pool, 0, 0]
else:
tokens_return, x = self.calc_buy(type, amount)
buy_price_no = amount / tokens_return
slippage_no = self.calc_slippage(type, amount)
assert (slippage_no > 0), f"slippage_no {slippage_no} <= 0"
# remove returned token form the pool, keep all yes tokens
self.lp_no += x
self.lp_yes += amount
entry = ["buy", "no", original_amount, fee, buy_price_no, slippage_no, tokens_return, self.lp_yes, self.lp_no, self.lp_token, self.liquidity, self.fee_pool, 0, 0]
# assert invariant, we use float and disregard rounding so must be within e ~ 0
inv_div = abs(k - (self.lp_yes * self.lp_no))
# use variable epsilon - float numbers suck due to scaling
inv_eps = min(self.lp_no, self.lp_yes) / 100000000
if inv_div > inv_eps :
print(f"invariant K {k} {self.lp_yes * self.lp_no} == {inv_div}, lp_yes {self.lp_yes} lp_no {self.lp_no} eps {inv_eps}")
assert(inv_div < inv_eps)
impermanent_loss = self.calc_impermanent_loss()
assert(impermanent_loss >= 0)
# outstanding yes/no token may be converted at event outcome to reward or immediately traded
outstanding_token = self.calc_outstanding_token()
# impermanent loss at last position in history entry
entry[-2] = impermanent_loss
entry[-1] = outstanding_token[1]
self._add_history(entry)
return (type, tokens_return)
def calc_withdrawable_liquidity(self) -> float:
# collateral taken from the pool and tokens returned when adding liquidity
return min(self.lp_yes + self.outstanding_yes, self.lp_no + self.outstanding_no)
def calc_payout(self) -> float:
# how big is reward after all liquidity is removed
return self.lp_token - self.calc_withdrawable_liquidity()
def calc_outstanding_token(self) -> Tuple[int, float]:
# outcome tokens going to LP on top of removed liquidity
withdraw_token = self.calc_withdrawable_liquidity()
total_yes = self.lp_yes + self.outstanding_yes
total_no = self.lp_no + self.outstanding_no
if total_yes > total_no:
outstanding_token = (1, total_yes - withdraw_token)
else:
outstanding_token = (0, total_no - withdraw_token)
return outstanding_token
def calc_impermanent_loss(self) -> float:
withdraw_token = self.calc_withdrawable_liquidity()
return self.liquidity - withdraw_token
def calc_buy(self, type, amount) -> Tuple[float, float]:
k = (self.lp_yes * self.lp_no)
if type:
x = k / (self.lp_no + amount) - self.lp_yes
else:
x = k / (self.lp_yes + amount) - self.lp_no
# (tokens returned to the user, amm pool delta)
return amount - x, x
def calc_marginal_price(self, type) -> float:
pool_total = (self.lp_no + self.lp_yes)
return (self.lp_no if type else self.lp_yes) / pool_total
def calc_slippage(self, type, amount) -> float:
tokens_return, _ = self.calc_buy(type, amount)
buy_price = amount / tokens_return
marginal_price = self.calc_marginal_price(type)
return (buy_price - marginal_price) / buy_price
@staticmethod
def calc_british_odds(returned_tokens, amount) -> float:
# british odds https://www.investopedia.com/articles/investing/042115/betting-basics-fractional-decimal-american-moneyline-odds.asp
# shows the reward on top of stake as a decimal fraction to the stake
# (TODO: we could use Fraction class of python for nice odds representation)
# may be negative when due to cpmm inefficiencies
return (returned_tokens - amount) / amount
# def sell_token(type, amount):
# def get_buy_price_yes():
# def get_sell_price_yes():
_csv_headers = [
"activity", "type", "amount", "fee", "token_buy_sell_price",
"slippage", "returned tokens", "lp_yes", "lp_no", "lp_token",
"liquidity", "fee_pool", "impermanent_loss", "loss_outstanding_tokens"
]
@property
def history_as_dataframe(self) -> pd.DataFrame:
return pd.DataFrame(data=self.history, columns=CPMM._csv_headers)
def save_history(self, name) -> None:
df = self.history_as_dataframe
with open(name, "wt") as f:
df.to_csv(f, index=False, quoting=csv.QUOTE_NONNUMERIC)
def _add_history(self, entry) -> None:
# check entry size
assert(len(entry) == len(CPMM._csv_headers))
self.history.append(entry)
def run_experiment(name, cpmm: CPMM, n, prior_dist, betting_dist):
# TODO: must have realistic model for betting behavior, for example
# total bets volume cannot cross % of liquidity
# individual bet cannot have slippage > 1% etc.
bet_outcomes = prior_dist(n)
bet_amounts = betting_dist(n)
print(f"{name}: bet outcomes N/Y {np.bincount(bet_outcomes)}")
for b, amount in zip(bet_outcomes, bet_amounts):
cpmm.buy_token(b, amount)
# print(cpmm.history)
cpmm.save_history(f"{name}.csv")
def main():
rng = default_rng()
# experiment 1
# 1000 rounds, initial liquidity 50:50 1000 EVNT, betters prior 50:50, bets integer uniform range [1, 100]
cpmm = CPMM()
cpmm.create_event(1000)
run_experiment(
"experiment1",
cpmm,
1000,
lambda size: rng.binomial(1, 0.5, size),
lambda size: rng.integers(1, 100, endpoint=True, size=size)
)
# experiment 2
# 1000 rounds, initial liquidity 50:50 1000 EVNT, betters prior 70:30, bets integer uniform range [1, 100]
cpmm = CPMM()
cpmm.create_event(1000)
run_experiment(
"experiment2",
cpmm,
1000,
lambda size: rng.binomial(1, 0.7, size),
lambda size: rng.integers(1, 100, endpoint=True, size=size)
)
# experiment 3
# 1000 rounds, initial liquidity 50:50 1000 EVNT, betters prior 70:30, bets integer uniform range [1, 100]
# fee 2% taken and not added to liquidity pool
cpmm = CPMM(fee_fraction=0.02)
cpmm.create_event(1000)
run_experiment(
"experiment3",
cpmm,
1000,
lambda size: rng.binomial(1, 0.7, size),
lambda size: rng.integers(1, 100, endpoint=True, size=size)
)
# experiment 4
# 1000 rounds, initial liquidity 50:50 1000 EVNT, betters prior 50:50, bets integer uniform range [1, 100]
# fee 2% taken and 50% added to liquidity pool
cpmm = CPMM(fee_fraction=0.02, fee_to_liquidity_fraction=0.5)
cpmm.create_event(1000)
run_experiment(
"experiment4",
cpmm,
1000,
lambda size: rng.binomial(1, 0.5, size),
lambda size: rng.integers(1, 100, endpoint=True, size=size)
)
# experiment 5
# 1000 rounds, initial liquidity 1:3 1000 EVNT, betters prior 50:50, bets integer uniform range [1, 100]
# fee 2% taken and 50% added to liquidity pool
cpmm = CPMM(fee_fraction=0.02, fee_to_liquidity_fraction=0.5)
cpmm.create_event(1000)
run_experiment(
"experiment5",
cpmm,
1000,
lambda size: rng.binomial(1, 0.5, size),
lambda size: rng.integers(1, 100, endpoint=True, size=size)
)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
load("@bazel_gazelle//:deps.bzl", "go_repository")
def load_external_go_repositories():
########## Server request handling ###############
go_repository(
name = "com_github_andybalholm_brotli",
importpath = "github.com/andybalholm/brotli",
commit = "1d750214c25205863625bb3eb8190a51b2cef26d", # Sep 22, 2021
)
go_repository(
name = "com_github_valyala_bytebufferpool",
importpath = "github.com/valyala/bytebufferpool",
commit = "18533face0dfe7042f8157bba9010bd7f8df54b1", # Nov 4, 2020
)
go_repository(
name = "com_github_klauspost_compress",
importpath = "github.com/klauspost/compress",
tag = "v1.14.2", # Jan 25, 2022
)
go_repository(
name = "com_github_valyala_fasthttp",
importpath = "github.com/valyala/fasthttp",
tag = "v1.33.0",
)
go_repository(
name = "com_github_buaazp_fasthttprouter",
importpath = "github.com/buaazp/fasthttprouter",
tag = "979d6e516ec324575737805deabe0303794c58bd", # Jan 9, 2019
)
########## Logging ###############
go_repository(
name = "org_uber_go_atomic",
importpath = "go.uber.org/atomic",
tag = "v1.9.0", # Jul 15, 2021
)
go_repository(
name = "org_uber_go_multierr",
importpath = "go.uber.org/multierr",
tag = "v1.7.0", # May 6, 2021
)
go_repository(
name = "org_uber_go_zap",
importpath = "go.uber.org/zap",
tag = "v1.21.0", # Feb 7, 2022
)
|
nilq/baby-python
|
python
|
from datapackage_pipelines.wrapper import ingest, spew
from datapackage_pipelines.utilities.resources import PROP_STREAMING
from nli_z3950.load_marc_data import get_marc_records_schema, parse_record
from pymarc.marcxml import parse_xml_to_array
import datetime, json
def get_resource(parameters, stats):
stats['search_rows'] = 0
for filenum in parameters['filenums']:
filepath = parameters['files-path-template'].format(filenum=filenum)
search_id = 'neaman{}'.format(filenum)
with open(filepath) as f:
for record_num, record in enumerate(parse_xml_to_array(f)):
row = parse_record(record)
migdar_id = '{}-{}'.format(search_id, record_num)
row.update(migdar_id=migdar_id, first_ccl_query='neaman{}.xml'.format(filenum),
last_query_datetime=datetime.datetime.now(),
json=json.loads(row['json']))
stats['search_rows'] += 1
yield row
def get_resources(resources, parameters, stats):
for resource in resources:
yield resource
yield get_resource(parameters, stats)
def get_datapackage(datapackage):
schema = get_marc_records_schema()
schema['fields'] += [{'name': 'migdar_id', 'type': 'string'},
{'name': 'first_ccl_query', 'type': 'string'},
{'name': 'last_query_datetime', 'type': 'datetime'},
{'name': 'json', 'type': 'object'}]
datapackage['resources'].append({'name': 'search_haifa_files',
'path': 'search_haifa_files.csv',
PROP_STREAMING: True,
'schema': schema})
return datapackage
def main():
parameters, datapackage, resources, stats = ingest() + ({},)
spew(get_datapackage(datapackage),
get_resources(resources, parameters, stats),
stats)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import os
import sqlite3
class DatabaseRepository:
def __init__(self, database_file, schema_file):
db_is_new = not os.path.exists(database_file)
self.connection = sqlite3.connect(database_file, check_same_thread=False)
if db_is_new:
with open(schema_file, 'rt') as f:
schema = f.read()
self.connection.executescript(schema)
def close_db(self):
self.connection.commit()
self.connection.close()
|
nilq/baby-python
|
python
|
"""Utility methods for interacting with Kubernetes API server.
This module is merged into the `metalk8s_kubernetes` execution module,
by virtue of its `__virtualname__`.
"""
from __future__ import absolute_import
from salt.exceptions import CommandExecutionError
import salt.utils.files
import salt.utils.templates
import salt.utils.yaml
MISSING_DEPS = []
try:
import kubernetes.client
from kubernetes.client.rest import ApiException
except ImportError:
MISSING_DEPS.append("kubernetes.client")
try:
import kubernetes.config
except ImportError:
MISSING_DEPS.append("kubernetes.config")
try:
from urllib3.exceptions import HTTPError
except ImportError:
MISSING_DEPS.append("urllib3")
__virtualname__ = "metalk8s_kubernetes"
def __virtual__():
if MISSING_DEPS:
return False, "Missing dependencies: {}".format(", ".join(MISSING_DEPS))
return __virtualname__
def get_kubeconfig(**kwargs):
"""
Get the kubeconfig and context from args or directly pillar or from
salt-master configuration.
Pillar value from `metalk8s.api_server.kubeconfig` and
`metalk8s.api_server.context`
Salt master config from `kubernetes.kubeconfig` and `kubernetes.context`
CLI Examples:
.. code-block:: bash
salt-call metalk8s_kubernetes.get_kubeconfig kubeconfig="/etc/kubernetes/admin.conf"
salt-call metalk8s_kubernetes.get_kubeconfig
Code Example:
.. code-block:: python
kubeconfig, context = __salt__['metalk8s_kubernetes.get_kubeconfig'](**kwargs)
"""
pillar_dict = __pillar__.get("metalk8s", {}).get("api_server", {})
kubeconfig = (
kwargs.get("kubeconfig")
or pillar_dict.get("kubeconfig")
or __salt__["config.option"]("kubernetes.kubeconfig")
)
context = (
kwargs.get("context")
or pillar_dict.get("context")
or __salt__["config.option"]("kubernetes.context")
or None
)
return kubeconfig, context
def get_version_info(**kwargs):
"""Retrieve the API server version information, as a dict.
The result contains various version details to be as exhaustive as
possible.
CLI Example:
salt '*' metalk8s_kubernetes.get_version_info
"""
kubeconfig, context = get_kubeconfig(**kwargs)
api_client = kubernetes.config.new_client_from_config(
config_file=kubeconfig, context=context
)
api_instance = kubernetes.client.VersionApi(api_client=api_client)
try:
version_info = api_instance.get_code()
except (ApiException, HTTPError) as exc:
raise CommandExecutionError("Failed to get version info") from exc
return version_info.to_dict()
def ping(**kwargs):
"""Check connection with the API server.
Returns True if a request could be made, False otherwise.
CLI Example:
salt '*' metalk8s_kubernetes.ping
"""
try:
get_version_info(**kwargs)
except CommandExecutionError:
return False
return True
def read_and_render_yaml_file(source, template, context=None, saltenv="base"):
"""
Read a yaml file and, if needed, renders that using the specifieds
templating. Returns the python objects defined inside of the file.
"""
sfn = __salt__["cp.cache_file"](source, saltenv)
if not sfn:
raise CommandExecutionError("Source file '{0}' not found".format(source))
if not context:
context = {}
with salt.utils.files.fopen(sfn, "r") as src:
contents = src.read()
if template:
if template in salt.utils.templates.TEMPLATE_REGISTRY:
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
contents,
from_str=True,
to_str=True,
context=context,
saltenv=saltenv,
grains=__grains__,
pillar=__pillar__,
salt=__salt__,
opts=__opts__,
)
if not data["result"]:
# Failed to render the template
raise CommandExecutionError(
"Failed to render file path with error: "
"{0}".format(data["data"])
)
contents = data["data"].encode("utf-8")
else:
raise CommandExecutionError(
"Unknown template specified: {0}".format(template)
)
return salt.utils.yaml.safe_load(contents)
def get_service_endpoints(service, namespace, kubeconfig):
error_tpl = "Unable to get kubernetes endpoints for {} in namespace {}"
try:
endpoint = __salt__["metalk8s_kubernetes.get_object"](
name=service,
kind="Endpoints",
apiVersion="v1",
namespace=namespace,
kubeconfig=kubeconfig,
)
if not endpoint:
raise CommandExecutionError("Endpoint not found")
except CommandExecutionError as exc:
raise CommandExecutionError(error_tpl.format(service, namespace)) from exc
try:
result = []
for address in endpoint["subsets"][0]["addresses"]:
# Extract hostname, ip and node_name
res_ep = {
k: v for k, v in address.items() if k in ["hostname", "ip", "node_name"]
}
# Add ports info to result dict
res_ep["ports"] = {
port["name"]: port["port"] for port in endpoint["subsets"][0]["ports"]
}
result.append(res_ep)
except (AttributeError, IndexError, KeyError, TypeError) as exc:
raise CommandExecutionError(error_tpl.format(service, namespace)) from exc
return result
|
nilq/baby-python
|
python
|
import logging.config
import configparser
import metrics
from datetime import datetime
import urllib.request
from unittest import mock
from requests import request
from aiohttp.web import Response
from jiracollector import JiraCollector
import pytest
config = configparser.ConfigParser()
config.read('metrics.ini')
logging.config.fileConfig(config.get('logging'
,'config_file'), defaults=None, disable_existing_loggers=True)
logger = logging.getLogger()
# Asserts that "happy-path" works, i.e the returned metrics from JiraCollector
# is correctly converted and internal metrics are added to the cached metrics
@mock.patch('jiracollector.JiraCollector.__init__',mock.Mock(return_value=None))
@mock.patch('jiracollector.JiraCollector.collect')
def test_collect_metrics(mock_collector):
metrics_dict = {
"jira_total_done{project_name=\"BIP\"}":"42"
}
mock_collector.return_value = metrics_dict
metrics.serviceIsReady = False
metrics.collectJiraMetrics()
assert metrics.serviceIsReady == True
assert metrics.cachedMetrics == "jira_total_done{project_name=\"BIP\"} 42\njira_total_number_of_metrics 1\njira_total_execution_time_seconds 0\n"
# Asserts that an exception is raised if init or collect from Jiracollector raises an exception
@mock.patch('jiracollector.JiraCollector.__init__',side_effect=mock.Mock(side_effect=Exception("Just for testing exception Exception")),
)
def test_collect_metrics_raises_exception_if_exception_from_jiracollector(mock_collector):
metrics.serviceIsReady = False
with pytest.raises(Exception):
metrics.collectJiraMetrics()
@mock.patch('requests.request')
def test_alive_always_returns_200(mock_request):
response = metrics.alive(mock_request)
assert response.status == 200
@mock.patch('requests.request')
def test_ready_returns_503_or_200_depending_on_serviceIsReady(mock_request):
response = metrics.ready(mock_request)
assert response.status == 503
metrics.serviceIsReady = True
response = metrics.ready(mock_request)
assert response.status == 200
|
nilq/baby-python
|
python
|
"""
MIT License
mift - Copyright (c) 2021 Control-F
Author: Mike Bangham (Control-F)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software, 'mift', and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from PyQt5.QtCore import pyqtSignal, QThread
import os
from os.path import join as pj
from os.path import basename, abspath
import zipfile
import tarfile
import logging
from src.utils import resource_path
class ExtractArchiveThread(QThread):
finishedSignal = pyqtSignal(str)
progressSignal = pyqtSignal(str)
def __init__(self, parent, files_to_extract, save_dir, archive, maintain_dir_structure=False, key_dir=None):
QThread.__init__(self, parent)
self.files_to_extract = files_to_extract
self.save_dir = save_dir
self.archive = archive
self.maintain_dir_structure = maintain_dir_structure
self.key_dir = key_dir
def run(self):
os.makedirs(self.save_dir, exist_ok=True)
if zipfile.is_zipfile(self.archive):
self.progressSignal.emit('Archive is zipfile, processing members...')
with zipfile.ZipFile(self.archive, 'r') as zip_obj:
archive_members = zip_obj.namelist()
if not self.maintain_dir_structure:
for file_member in self.files_to_extract: # get the index of the file in the archive members
file_idxs = [i for i, archive_member in enumerate(archive_members)
if file_member in archive_member]
if file_idxs:
self.progressSignal.emit('Found {} to extract from the archive. '
'Extracting...'.format(len(file_idxs)))
for idx in file_idxs:
if len(basename(archive_members[idx])) != 0:
file = pj(self.save_dir, '{}'.format(basename(archive_members[idx])))
with open(file, 'wb') as file_out:
zip_fmem = zip_obj.read(archive_members[idx])
file_out.write(zip_fmem)
else:
self.progressSignal.emit('Extracting files with base dir: {}/'.format(self.key_dir))
for archive_member in archive_members:
if self.key_dir in archive_member:
if archive_member.endswith('/'):
os.makedirs(self.save_dir+'/'+archive_member, exist_ok=True)
else:
file = abspath(self.save_dir+'/{}'.format(archive_member))
try:
with open(file, 'wb') as file_out:
zip_fmem = zip_obj.read(archive_member)
file_out.write(zip_fmem)
except:
logging.error('cant copy file: {}'.format(file))
else:
self.progressSignal.emit('Archive is tarfile, processing members...')
if not self.maintain_dir_structure:
with tarfile.open(self.archive, 'r') as tar_obj:
archive_members = tar_obj.getnames()
for file_member in self.files_to_extract: # get the index of the file in the archive members
file_idxs = [i for i, archive_member in enumerate(archive_members)
if file_member in archive_member]
if file_idxs:
self.progressSignal.emit('Found {} to extract from the archive. '
'Extracting...'.format(len(file_idxs)))
for idx in file_idxs:
if len(basename(archive_members[idx])) != 0:
file = pj(self.save_dir, '{}'.format(basename(archive_members[idx])))
with open(file, 'wb') as file_out:
tar_fmem = tar_obj.extractfile(archive_members[idx])
file_out.write(tar_fmem.read())
else:
self.progressSignal.emit('Extracting files with base dir: {}/'.format(self.key_dir))
with tarfile.open(self.archive, 'r') as tar_obj:
for member in tar_obj:
if self.key_dir in member.name:
if member.isdir():
os.makedirs(self.save_dir+'/'+member.name.replace(':', ''), exist_ok=True)
else:
file = self.save_dir+'/{}'.format(member.name.replace(':', ''))
try:
with open(file, 'wb') as file_out:
tar_fmem = tar_obj.extractfile(member)
file_out.write(tar_fmem.read())
except:
logging.error('cant copy file: {}'.format(file))
self.finishedSignal.emit('Archive processed!')
|
nilq/baby-python
|
python
|
from constant_sum import *
if __name__ == "__main__":
t = 56
n = 510
s = 510
for i in range(500):
l = (T_len(t,n-i,s))
if l>0:
print(log(l,2),t, n-i, s, t*n - s+i, t*n )
#for b in range(t, 1, -1):
# p = 0
# k = min(n*(t - b + 1), s)
# #k = s
# print("b\ts\tlen\tbj\tbj")
# for i in range(0, k+1):
# aux = T_len(b-1, n, s- i)
# p += aux
# bjota = bj(i, b-1, n, s)
# zjota = zj(i, b-1, n, s)
# print("%d\t%d\t%d\t%d\t%d\t%d"%(b-1, s-i, aux, p, bjota, zjota))
"""
t(45,145,952)
Cv = 952
Cs =
Cg =
vs
t(45, 72, 1006)
Cv = 1006
Cs =
Cg =
"""
|
nilq/baby-python
|
python
|
from .statement_base import Statement
import sasoptpy
class DropStatement(Statement):
@sasoptpy.class_containable
def __init__(self, *elements):
super().__init__()
for i in elements:
self.elements.append(i)
self.keyword = 'drop'
def append(self, element):
pass
def _defn(self):
s = f'{self.keyword} '
cons = []
for c in self.elements:
cons.extend(c._get_name_list())
s += ' '.join(cons) + ';'
return s
@classmethod
def model_drop_constraint(cls, _, c):
if sasoptpy.core.util.is_droppable(c):
st = DropStatement(c)
return st
@classmethod
def drop_constraint(cls, *constraints):
if all([sasoptpy.core.util.is_droppable(c) for c in constraints]):
st = DropStatement(*constraints)
class RestoreStatement(DropStatement):
def __init__(self, *elements):
super().__init__(*elements)
self.keyword = 'restore'
@classmethod
def restore_constraint(cls, *constraints):
if all([sasoptpy.core.util.is_droppable(c) for c in constraints]):
st = RestoreStatement(*constraints)
|
nilq/baby-python
|
python
|
R = int(input())
print(2*3.141592653589793*R)
|
nilq/baby-python
|
python
|
import numpy as np
import pandas as pd
import sys
from typing import Literal
from arch import arch_model
import warnings
from sklearn.exceptions import ConvergenceWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
def get_rolling_vol_forecasts(return_series,
model,
horizon : int=21,
fitting_end_date : str = "2021-01-01",
#type_forecast : Literal['rolling','recursive'] = 'rolling'
):
print(f"\nFitting rolling {model.volatility} model with a {model.distribution}.")
index = return_series.index
start_loc = 0
end_loc = np.where(index > fitting_end_date)[0].min()
n_forecasts = 2+ np.where(index == index[-1])[0].min() - end_loc # find number of forecasts to make
forecasts = {}
print(f"Number of forecasts: {n_forecasts}")
for i in range(n_forecasts):
sys.stdout.write(".")
sys.stdout.flush()
#if type_forecast == 'rolling':
res = model.fit(first_obs=i, last_obs=i + end_loc, disp="off")
#else:
# res = model.fit(last_obs=i + end_loc, disp="off")
temp = np.sqrt(res.forecast(horizon=horizon, reindex=False).variance)
fcast = temp.iloc[0]
forecasts[fcast.name] = fcast
vol_forecasts = pd.DataFrame(forecasts).T.multiply(np.sqrt(252))
return vol_forecasts
|
nilq/baby-python
|
python
|
from django.templatetags.static import static as get_static_url
from django.shortcuts import redirect
from .exceptions import UnknownMessageTypeError
from .models import Dispatch
from .signals import sig_unsubscribe_failed, sig_mark_read_failed
def _generic_view(message_method, fail_signal, request, message_id, dispatch_id, hashed, redirect_to=None):
if redirect_to is None:
redirect_to = '/'
try:
dispatch = Dispatch.objects.select_related('message').get(pk=dispatch_id)
if int(message_id) != dispatch.message_id:
raise ValueError()
message = dispatch.message
except (Dispatch.DoesNotExist, ValueError):
pass
else:
try:
message_type = message.get_type()
expected_hash = message_type.get_dispatch_hash(dispatch_id, message_id)
method = getattr(message_type, message_method)
return method(
request, message, dispatch,
hash_is_valid=(expected_hash == hashed),
redirect_to=redirect_to
)
except UnknownMessageTypeError:
pass
fail_signal.send(None, request=request, message=message_id, dispatch=dispatch_id)
return redirect(redirect_to)
def unsubscribe(request, message_id, dispatch_id, hashed, redirect_to=None):
"""Handles unsubscribe request.
:param Request request:
:param int message_id:
:param int dispatch_id:
:param str hashed:
:param str redirect_to:
:return:
"""
return _generic_view(
'handle_unsubscribe_request', sig_unsubscribe_failed,
request, message_id, dispatch_id, hashed, redirect_to=redirect_to
)
def mark_read(request, message_id, dispatch_id, hashed, redirect_to=None):
"""Handles mark message as read request.
:param Request request:
:param int message_id:
:param int dispatch_id:
:param str hashed:
:param str redirect_to:
:return:
"""
if redirect_to is None:
redirect_to = get_static_url('img/sitemessage/blank.png')
return _generic_view(
'handle_mark_read_request', sig_mark_read_failed,
request, message_id, dispatch_id, hashed, redirect_to=redirect_to
)
|
nilq/baby-python
|
python
|
"""
Modules for prediciting topological properties
"""
|
nilq/baby-python
|
python
|
"""
Class of water block
"""
import os
from .block import Block
import math
class CurrentWaterRight(Block):
"""
Represents the block of water
"""
def __init__(
self,
settings: any,
path: str = 'advancing_hero/images/blocks/water3.png',
):
super().__init__(os.path.abspath(path),
settings,
settings.WATER,
interactable=True)
def player_interaction(self, player, *args, **kwargs):
super().player_interaction(player)
player.in_water = True
player.speed = player.speed_base
dx = 1
dy = 0
for tile in player.stage.tile_list:
# Check only blocks which are on screen and are interactable
if tile[1].bottom > 0 and tile[
1].top < player.settings.screen_height and tile[
2].is_interactable:
# Then check if it's solid. We do it on that order in case
# the block changes the player's speed.
if tile[2].is_solid and (dx or dy):
# Check collision in x direction
delta_x = 1 * dx / math.sqrt(dx * dx + dy * dy)
delta_y = 1 * dy / math.sqrt(dx * dx + dy * dy)
if tile[1].colliderect(player.rect.x + delta_x, player.rect.y,
player.rect.width, player.rect.height):
dx = 0
# Check for collision in y direction
if tile[1].colliderect(player.rect.x, player.rect.y + delta_y,
player.rect.width, player.rect.height):
dy = 0
if dx or dy:
player.rect.x += 1 * dx / math.sqrt(dx * dx + dy * dy)
player.rect.y += 1 * dy / math.sqrt(dx * dx + dy * dy)
if player.rect.bottom > player.settings.screen_height:
player.rect.bottom = player.settings.screen_height
if player.rect.top < 0:
player.rect.top = 0
if player.rect.right > player.settings.screen_width:
player.rect.right = player.settings.screen_width
if player.rect.left < 0:
player.rect.left = 0
class CurrentWaterLeft(Block):
"""
Represents the block of water
"""
def __init__(
self,
settings: any,
path: str = 'advancing_hero/images/blocks/water3.png',
):
super().__init__(os.path.abspath(path),
settings,
settings.WATER,
interactable=True)
def player_interaction(self, player, *args, **kwargs):
super().player_interaction(player)
player.in_water = True
player.speed = player.speed_base
dx = -1
dy = 0
for tile in player.stage.tile_list:
# Check only blocks which are on screen and are interactable
if tile[1].bottom > 0 and tile[
1].top < player.settings.screen_height and tile[
2].is_interactable:
# Then check if it's solid. We do it on that order in case
# the block changes the player's speed.
if tile[2].is_solid and (dx or dy):
# Check collision in x direction
delta_x = 1 * dx / math.sqrt(dx * dx + dy * dy)
delta_y = 1 * dy / math.sqrt(dx * dx + dy * dy)
if tile[1].colliderect(player.rect.x + delta_x, player.rect.y,
player.rect.width, player.rect.height):
dx = 0
# Check for collision in y direction
if tile[1].colliderect(player.rect.x, player.rect.y + delta_y,
player.rect.width, player.rect.height):
dy = 0
if dx or dy:
player.rect.x += 1 * dx / math.sqrt(dx * dx + dy * dy)
player.rect.y += 1 * dy / math.sqrt(dx * dx + dy * dy)
if player.rect.bottom > player.settings.screen_height:
player.rect.bottom = player.settings.screen_height
if player.rect.top < 0:
player.rect.top = 0
if player.rect.right > player.settings.screen_width:
player.rect.right = player.settings.screen_width
if player.rect.left < 0:
player.rect.left = 0
class CurrentWaterDown(Block):
"""
Represents the block of water
"""
def __init__(
self,
settings: any,
path: str = 'advancing_hero/images/blocks/water3.png',
):
super().__init__(os.path.abspath(path),
settings,
settings.WATER,
interactable=True)
def player_interaction(self, player, *args, **kwargs):
super().player_interaction(player)
player.in_water = True
player.speed = player.speed_base
dy = 1
dx = 0
for tile in player.stage.tile_list:
# Check only blocks which are on screen and are interactable
if tile[1].bottom > 0 and tile[
1].top < player.settings.screen_height and tile[
2].is_interactable:
# Then check if it's solid. We do it on that order in case
# the block changes the player's speed.
if tile[2].is_solid and (dx or dy):
# Check collision in x direction
delta_x = 1 * dx / math.sqrt(dx * dx + dy * dy)
delta_y = 1 * dy / math.sqrt(dx * dx + dy * dy)
if tile[1].colliderect(player.rect.x + delta_x, player.rect.y,
player.rect.width, player.rect.height):
dx = 0
# Check for collision in y direction
if tile[1].colliderect(player.rect.x, player.rect.y + delta_y,
player.rect.width, player.rect.height):
dy = 0
if dx or dy:
player.rect.x += 1 * dx / math.sqrt(dx * dx + dy * dy)
player.rect.y += 1 * dy / math.sqrt(dx * dx + dy * dy)
if player.rect.bottom > player.settings.screen_height:
player.rect.bottom = player.settings.screen_height
if player.rect.top < 0:
player.rect.top = 0
if player.rect.right > player.settings.screen_width:
player.rect.right = player.settings.screen_width
if player.rect.left < 0:
player.rect.left = 0
class CurrentWaterUp(Block):
"""
Represents the block of water
"""
def __init__(
self,
settings: any,
path: str = 'advancing_hero/images/blocks/water3.png',
):
super().__init__(os.path.abspath(path),
settings,
settings.WATER,
interactable=True)
def player_interaction(self, player, *args, **kwargs):
super().player_interaction(player)
player.in_water = True
player.speed = player.speed_base
dy = -1
dx = 0
for tile in player.stage.tile_list:
# Check only blocks which are on screen and are interactable
if tile[1].bottom > 0 and tile[
1].top < player.settings.screen_height and tile[
2].is_interactable:
# Then check if it's solid. We do it on that order in case
# the block changes the player's speed.
if tile[2].is_solid and (dx or dy):
# Check collision in x direction
delta_x = 1 * dx / math.sqrt(dx * dx + dy * dy)
delta_y = 1 * dy / math.sqrt(dx * dx + dy * dy)
if tile[1].colliderect(player.rect.x + delta_x, player.rect.y,
player.rect.width, player.rect.height):
dx = 0
# Check for collision in y direction
if tile[1].colliderect(player.rect.x, player.rect.y + delta_y,
player.rect.width, player.rect.height):
dy = 0
if dx or dy:
player.rect.x += 1 * dx / math.sqrt(dx * dx + dy * dy)
player.rect.y += 1 * dy / math.sqrt(dx * dx + dy * dy)
if player.rect.bottom > player.settings.screen_height:
player.rect.bottom = player.settings.screen_height
if player.rect.top < 0:
player.rect.top = 0
if player.rect.right > player.settings.screen_width:
player.rect.right = player.settings.screen_width
if player.rect.left < 0:
player.rect.left = 0
|
nilq/baby-python
|
python
|
# Importar las dependencias de flask
from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for
# Importar clave/ayudantes de encriptacion
from werkzeug import check_password_hash, generate_password_hash
# Importar FireBase
from firebase import firebase
# Importar el objeto de base de datos desde el modulo principal de la aplicacion
from app import db
# Importar modulo de formulario
from app.mod_auth.forms import LoginForm
# Importar modulo de usuario (i.e. User)
from app.mod_auth.models import User
# Definir la coneccion a los nodos de FireBase
firebase = firebase.FirebaseApplication('https://ecclesiapp-fe5b2.firebaseio.com', None)
# Definir el blueprint: 'auth', establecer el prefijo de la url: app.url/auth
mod_auth = Blueprint('auth', __name__, url_prefix='/auth')
# Establecer las rutas y metodos aceptados
@mod_auth.route('/signin/', methods=['GET', 'POST'])
def signin():
# Si el formulario de acceso es mandado
form = LoginForm(request.form)
# Verificar el formulario de acceso
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and check_password_hash(user.password, form.password.data):
session['user_id'] = user.id
flash('Welcome %s' % user.name)
return redirect(url_for('auth.home'))
flash('Wrong email or password', 'error-message')
return render_template("auth/signin.html", form=form)
@mod_auth.route('/nada/', methods=['GET', 'POST'])
def nada():
passwd = "1234"
pw_hash = generate_password_hash(passwd)
return pw_hash
@mod_auth.route('/fbase/', methods=['GET', 'POST'])
def fbase():
# Obtener el contendo de la refencia
departamentos = firebase.get('/departamentos/MANAGUA/2/nombre', None)
return str(departamentos)
@mod_auth.route('/mysql/', methods=['GET', 'POST'])
def mysql():
mysql = Nube_Actividad.query.all()
return str(mysql)
|
nilq/baby-python
|
python
|
def ErrorHandler(function):
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except Exception as e: # pragma: no cover
pass
return wrapper
|
nilq/baby-python
|
python
|
import pandas as pd
import numpy as np
import tensorflow as tf
from math import floor
from dataset.date_format import START_CODE, INPUT_FNS, OUTPUT_VOCAB, encodeInputDateStrings, encodeOutputDateStrings, dateTupleToYYYYDashMMDashDD
def generateOrderedDates(minYear: str, maxYear: str) -> list:
daterange = pd.date_range(minYear, maxYear)
dates = []
for single_date in daterange:
date: list = single_date.strftime("%Y-%m-%d").split('-')
for index, value in enumerate(date):
date[index] = int(date[index])
dates.append(date)
return dates
def dateTuplesToTensor(dateTuples, dec_output_one_hot = True):
# Encoder Input
inputs = []
for _, fn in enumerate(INPUT_FNS):
for _, dateTuple in enumerate(dateTuples):
formatedDate = fn(dateTuple)
inputs.append(formatedDate)
encoderInput = encodeInputDateStrings(inputs)
# Decoder Input
isoDates = []
for _, dateTuple in enumerate(dateTuples):
isoDates.append(dateTupleToYYYYDashMMDashDD(dateTuple))
decoderInput = encodeOutputDateStrings(isoDates).astype("float32")
if not dec_output_one_hot:
decoderOutput = decoderInput
decoderOutput = np.tile(decoderOutput, (len(INPUT_FNS), 1)).astype("int32")
# Remove Last column
decoderInput = decoderInput[..., :-1]
# Create a single column with start code
shift = np.full((decoderInput.shape[0], 1), START_CODE, dtype='float32')
# Concat the tensors
decoderInput = np.concatenate((shift, decoderInput), axis=1)
# Tile to match the encoderInput
decoderInput = np.tile(decoderInput, (len(INPUT_FNS), 1))
if dec_output_one_hot:
# Decoder Output
decoderOutput = tf.one_hot(
encodeOutputDateStrings(isoDates),
len(OUTPUT_VOCAB)
)
# Tile to match the encoderInput
decoderOutput = np.tile(decoderOutput, (len(INPUT_FNS), 1, 1)).astype("int32")
return encoderInput, decoderInput, decoderOutput
def generateDataSet(minYear="1950-01-01", maxYear="2050-01-01", trainSplit=0.25, validationSplit=0.15, dec_output_one_hot = True):
dateTuples = generateOrderedDates(minYear, maxYear)
np.random.shuffle(dateTuples)
numTrain = floor(len(dateTuples)*trainSplit)
numValidation = floor(len(dateTuples)*validationSplit)
trainEncoderInput, trainDecoderInput, trainDecoderOutput = dateTuplesToTensor(
dateTuples[0:numTrain], dec_output_one_hot)
valEncoderInput, valDecoderInput, valDecoderOutput = dateTuplesToTensor(
dateTuples[numTrain:numTrain+numValidation], dec_output_one_hot)
testDateTuples = dateTuples[numTrain+numValidation: len(dateTuples)]
return trainEncoderInput, trainDecoderInput, trainDecoderOutput, valEncoderInput, valDecoderInput, valDecoderOutput, testDateTuples
|
nilq/baby-python
|
python
|
from __future__ import division, print_function, absolute_import
# LIBTBX_SET_DISPATCHER_NAME iota.single_image
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT=1
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export BOOST_ADAPTBX_FPE_DEFAULT=1
'''
Author : Lyubimov, A.Y.
Created : 05/31/2018
Last Changed: 01/30/2019
Description : IOTA Single Image: can process single image using DIALS,
with an array of options (i.e. anything from only spotfinding, to indexing,
space group determination, refinement, integration)
'''
import os
import time
from iota.components.iota_init import initialize_single_image
from iota.components.iota_base import ProcessingBase
def parse_command_args():
""" Parses command line arguments (only options for now) """
parser = argparse.ArgumentParser(prog='iota.single_image')
parser.add_argument('path', type=str, nargs = '?', default = None,
help = 'Path to data file')
# parser.add_argument('--backend', type=str, default='dials',
# help='Backend for processing')
parser.add_argument('--paramfile', type=str, default=None,
help='Parameter file for processing')
parser.add_argument('--output_file', type=str, default=None,
help='Output filename')
parser.add_argument('--output_dir', type=str, default=None,
help='Output directory (for BluIce)')
parser.add_argument('--termfile', type=str, default='.stop',
help='Termination signal filename')
parser.add_argument('--index', type=int, default=1,
help='Numerical index of the image')
parser.add_argument('--min_bragg', type=int, default=10,
help='Minimum spots for successful spotfinding result')
parser.add_argument('--nproc', type=int, default=1,
help='Number of processors')
parser.add_argument('--action', type=str, default='spotfinding',
help='Code for how far to go; available codes: '
'spotfind, index, integrate')
parser.add_argument('--verbose', action = 'store_true',
help='Print information to stdout')
return parser
class SingleImageProcessor(ProcessingBase):
def __init__(self, *args, **kwargs):
ProcessingBase.__init__(self, *args, **kwargs)
def process(self):
file_wait_start = time.time()
errors = []
n_spots = 0
n_overloads = 0
res = (99, 99)
n_rings = 0
avg_I = 0
score = 0
sg = None
uc = None
lres = 999
hres = 999
img = self.params.input[0]
img_object = None
while True:
elapsed = time.time() - file_wait_start
if elapsed > 30:
errors.append('{} does not exist'.format(img))
print('DEBUG: ELAPSED = ', time.time() - file_wait_start)
break
if os.path.isfile(img):
input_entry = (1, img)
img_object = self.import_and_process(input_entry)
n_spots = img_object.final['spots']
score = img_object.final['indexed']
hres = img_object.final['res']
lres = img_object.final['lres']
sg = img_object.final['sg']
uc = ' '.join([
'{:.2f}'.format(img_object.final['a']),
'{:.2f}'.format(img_object.final['b']),
'{:.2f}'.format(img_object.final['c']),
'{:.2f}'.format(img_object.final['alpha']),
'{:.2f}'.format(img_object.final['beta']),
'{:.2f}'.format(img_object.final['gamma'])
])
errors.extend(img_object.errors)
break
if img_object:
if self.verbose:
print ('SPOTS FOUND: {}'.format(n_spots))
print ('INDEXING: {} INDEXED SPOTS'.format(score))
if res[0] != 999:
print ('RESOLUTION: {:.2f} - {:.2f}'.format(lres, hres))
if sg and uc:
print ('BRAVAIS LATTICE: {}'.format(sg))
print ('UNIT CELL: {}'.format(uc))
print ('TOTAL PROCESSING TIME: {:.2f} SEC'
''.format(time.time() - file_wait_start))
if errors:
for e in errors:
print (e)
# info = [self.index, len(observed), self.img, sg, uc]
if self.info.obj_list_file:
with open(self.info.obj_list_file, 'a') as outf:
info_line = '{} {} {} {} {}'.format(0, n_spots, img, sg, uc)
outf.write('{}\n'.format(info_line))
if self.verbose:
if errors:
err = errors[0]
print_errors = True
else:
err = ''
print_errors = False
print ('\n__RESULTS__')
print ('{} {} {} {:.2f} {} {} {} {} {{{}}}' .format(n_spots, n_overloads,
score, res[1], n_rings, 0, avg_I, 0, err))
if print_errors:
print ("__ERRORS__")
for e in errors:
print (e)
# ============================================================================ #
if __name__ == "__main__":
import argparse
args, unk_args = parse_command_args().parse_known_args()
info, iparams = initialize_single_image(img=os.path.abspath(args.path),
paramfile=args.paramfile,
output_file=args.output_file,
output_dir=args.output_dir,
min_bragg=args.min_bragg)
interceptor = SingleImageProcessor.for_single_image(info, iparams,
action_code=args.action,
verbose=args.verbose)
if args.output_dir is not None:
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
interceptor.start()
|
nilq/baby-python
|
python
|
import requests
from bs4 import BeautifulSoup
import urllib.request
import pytesseract
from PIL import Image
from PIL import ImageEnhance
def shibie(filepath):
# 打开图片
img = Image.open(filepath)
img = img.convert('RGB')
enhancer = ImageEnhance.Color(img)
enhancer = enhancer.enhance(0)
enhancer = ImageEnhance.Brightness(enhancer)
enhancer = enhancer.enhance(2)
enhancer = ImageEnhance.Contrast(enhancer)
enhancer = enhancer.enhance(8)
enhancer = ImageEnhance.Sharpness(enhancer)
img = enhancer.enhance(20)
# 处理图片,提高图片的识别率
# 转化为灰度图片
img = img.convert('L')
# img.show()
# 对图片进行二值化处理
threshold = 140
table = []
for i in range(256):
if i < threshold:
table.append(0)
else:
table.append(1)
out = img.point(table, '1')
# out.show()
# exit()
# 将图片转化为RGB模式
img = img.convert('RGB')
print(pytesseract.image_to_string(img))
return pytesseract.image_to_string(img)
i = 0
while 1:
# 创建一个会话
s = requests.Session()
# 发送get请求
deng_url = 'https://so.gushiwen.org/user/login.aspx?from=http://so.gushiwen.org/user/collect.aspx'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36',
}
r = s.get(deng_url, headers=headers)
# 生产soup对象
soup = BeautifulSoup(r.text, 'lxml')
# 获取验证码的url
image_src = 'https://so.gushiwen.org' + soup.find('img', id='imgCode')['src']
# 将这个图片下载到本地
r = s.get(image_src, headers=headers)
with open('code1.png', 'wb') as fp:
fp.write(r.content)
# 获取页面中隐藏的两个数据
view_state = soup.find('input', id='__VIEWSTATE')['value']
view_generator = soup.find('input', id='__VIEWSTATEGENERATOR')['value']
code = shibie('code1.png')
# 抓包,抓取post请求,然后通过代码模拟发送post请求
post_url = 'https://so.gushiwen.org/user/login.aspx?from=http%3a%2f%2fso.gushiwen.org%2fuser%2fcollect.aspx'
data = {
'__VIEWSTATE': view_state,
'__VIEWSTATEGENERATOR': view_generator,
'from': 'http://so.gushiwen.org/user/collect.aspx',
'email':
'pwd':
'code': code,
'denglu': '登录',
}
r = s.post(url=post_url, headers=headers, data=data)
i += 1
print('这是第%s次登录' % i)
# print(r.text)
if '退出登录' in r.text:
break
print('登录成功')
|
nilq/baby-python
|
python
|
import random
import uuid
from datetime import timedelta
import re
from discord import AllowedMentions, ButtonStyle, Embed
from squid.bot import CommandContext, SquidPlugin, command
from squid.bot.errors import CommandFailed
from squid.utils import now, parse_time, s
from .views import GiveawayView
class Giveaways(SquidPlugin):
def __init__(self, bot):
self.bot = bot
self.link_re = re.compile(
r"https:\/\/discord.com\/channels\/(\d*)\/(\d*)\/(\d*)"
)
@command()
def giveaway(self, ctx: CommandContext):
"""Create, Manage, and End Giveaways"""
...
@giveaway.subcommand(name="start")
def start(
self,
ctx: CommandContext,
# Required
time: str,
winners: int,
prize: str,
# Optional
amari: int = 0,
mee6: int = 0,
required_roles: str = "",
bypass_roles: str = "",
blacklist_roles: str = "",
booster: bool = None,
dank_lottery: int = None,
):
"""Starts a giveaway"""
if len(time) <= 1:
raise CommandFailed("Invalid time format")
if time.isdigit():
time += "s"
delta = parse_time(time)
if delta > timedelta(weeks=8):
raise CommandFailed(
"Time too long!\nYou cannot set a giveaway for more than 8 weeks"
)
store_key = uuid.uuid4().hex
stamp = int((now() + delta).timestamp())
requirements = {
v: self.bot.requirements[v].convert(ctx, k)
for v, k in {
"required_roles": required_roles,
"bypass_roles": bypass_roles,
"blacklist_roles": blacklist_roles,
"amari": amari,
"mee6": mee6,
"booster": booster,
"danklottery": dank_lottery,
}.items()
if k
}
description = ctx.setting(
"description",
time=f"<t:{stamp}:R>",
stamp=str(stamp),
requirements="\n".join(
[self.bot.requirements[k].display(v) for k, v in requirements.items()]
),
**requirements,
prize=prize,
winners=winners,
host=f"<@{ctx.author.id}>",
donor=f"<@{ctx.author.id}>",
channel_id=ctx.channel_id,
)
message = ctx.send(
embed=Embed(
title=prize,
description=description,
timestamp=now() + delta,
color=self.bot.colors["primary"],
).set_footer(text=f"{int(winners)} winner{s(winners)} | Ends at "),
view=GiveawayView(
key=store_key,
style=ButtonStyle.secondary,
label="Join",
),
)
with ctx.bot.db as db:
db.giveaways.insert_one(
{
"host_id": str(ctx.author.id),
"message_id": str(message["id"]),
"channel_id": str(ctx.channel_id),
"guild_id": str(ctx.guild_id),
"store_key": store_key,
"end": now() + delta,
"winners": int(winners),
"start": now(),
"active": True,
"prize": prize,
"requirements": requirements,
"data": {},
}
)
return ctx.respond(
embed=Embed(
description=f"Started a giveaway for `{delta}`",
color=self.bot.colors["primary"],
),
ephemeral=True,
)
@giveaway.subcommand(name="end")
def end(self, ctx: CommandContext, link: str):
"""
Stop a giveaway
"""
match = self.link_re.match(link)
if not match:
raise CommandFailed("Invalid link")
_, channel_id, message_id = match.groups()
with ctx.bot.db as db:
x = db.giveaways.find_one_and_update(
{
"guild_id": str(ctx.guild_id),
"channel_id": str(channel_id),
"message_id": str(message_id),
},
{"$set": {"end": now()}},
)
if x and x["active"]:
return ctx.respond(
embed=Embed(
description=f"Stopped giveaway for `{x['prize']}`",
color=self.bot.colors["primary"],
),
ephemeral=True,
)
elif x and x["active"]:
return ctx.respond(
embed=Embed(
description=f"giveaway for `{x['prize']}` already ended",
color=self.bot.colors["secondary"],
),
ephemeral=True,
)
else:
raise CommandFailed("I cannot find that giveaway")
@giveaway.subcommand(name="reroll")
def reroll(self, ctx: CommandContext, giveaway_id: str, amount: int = 1) -> None:
"""Rerolls a giveaway"""
if not giveaway_id.isdigit():
raise CommandFailed("Invalid giveaway ID")
with ctx.bot.db as db:
doc = db.giveaways.find_one({"message_id": giveaway_id})
if doc["active"]:
raise CommandFailed("Giveaway is still active")
elif "users" not in doc:
raise CommandFailed("Giveaway has no entrants or is still ending")
users = doc.get("users", [])
random.seed(f'{doc["message_id"]}{self.bot.http.token}')
random.shuffle(users)
next_val = doc.get("next_user_seed_input", 0)
winners = []
print(users)
while users and len(winners) < amount:
user = users[int(next_val % len(users))]
next_val += 1
winners.append(user)
if next_val != 0:
db.giveaways.find_one_and_update(
{"_id": doc["_id"]},
{"$set": {"next_user_seed_input": next_val}},
)
if winners:
winner_str = ", ".join([f"<@{i}>" for i in winners])
else:
winner_str = None
reroll_message = ctx.setting(
"reroll_message",
host=f"<@{ctx.author.id}>",
reroller=ctx.author,
reroll_channel=ctx.channel,
server=ctx.guild,
channel=ctx.channel,
link=f"https://discordapp.com/channels/{ctx.guild_id}/{doc['channel_id']}/{doc['message_id']}",
winners=(winner_str or "Nobody"),
prize=doc["prize"],
)
return ctx.respond(
content=reroll_message, allowed_mentions=AllowedMentions.none()
)
def setup(bot):
bot.add_plugin(Giveaways(bot))
bot.add_handler(GiveawayView)
|
nilq/baby-python
|
python
|
# Copyright 2016, FBPIC contributors
# Authors: Remi Lehe, Manuel Kirchen
# License: 3-Clause-BSD-LBNL
"""
This file is part of the Fourier-Bessel Particle-In-Cell code (FB-PIC)
It defines the structure necessary to implement the moving window.
"""
from fbpic.utils.threading import njit_parallel, prange
# Check if CUDA is available, then import CUDA functions
from fbpic.utils.cuda import cuda_installed
if cuda_installed:
from fbpic.utils.cuda import cuda, cuda_tpb_bpg_2d
class MovingWindow(object):
"""
Class that contains the moving window's variables and methods
"""
def __init__( self, comm, dt, v, time ):
"""
Initializes a moving window object.
Parameters
----------
comm: a BoundaryCommunicator object
Contains information about the MPI decomposition
and about the longitudinal boundaries
dt: float
The timestep of the simulation.
v: float (meters per seconds), optional
The speed of the moving window
time: float (seconds)
The time (in the simulation) at which the moving
window was initialized
"""
# Check that the boundaries are open
if ((comm.rank == comm.size-1) and (comm.right_proc is not None)) \
or ((comm.rank == 0) and (comm.left_proc is not None)):
raise ValueError('The simulation is using a moving window, but '
'the boundaries are periodic.\n Please select open '
'boundaries when initializing the Simulation object.')
# Attach moving window speed
self.v = v
# Attach time of last move
self.t_last_move = time - dt
# Get the positions of the global physical domain
zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax(
local=False, with_damp=False, with_guard=False )
# Attach reference position of moving window (only for the first proc)
# (Determines by how many cells the window should be moved)
if comm.rank == 0:
self.zmin = zmin_global_domain
def move_grids(self, fld, ptcl, comm, time):
"""
Calculate by how many cells the moving window should be moved.
If this is non-zero, shift the fields on the interpolation grid,
and increment the positions between which the continuously-injected
particles will be generated.
Parameters
----------
fld: a Fields object
Contains the fields data of the simulation
ptcl: a list of Particles object
This is passed in order to increment the positions between
which the continuously-injection particles will be generated
comm: an fbpic BoundaryCommunicator object
Contains the information on the MPI decomposition
time: float (seconds)
The global time in the simulation
This is used in order to determine how much the window should move
"""
# To avoid discrepancies between processors, only the first proc
# decides whether to send the data, and broadcasts the information.
dz = comm.dz
if comm.rank==0:
# Move the continuous position of the moving window object
self.zmin += self.v * (time - self.t_last_move)
# Find the number of cells by which the window should move
zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax(
local=False, with_damp=False, with_guard=False )
n_move = int( (self.zmin - zmin_global_domain)/dz )
else:
n_move = None
# Broadcast the information to all proc
if comm.size > 1:
n_move = comm.mpi_comm.bcast( n_move )
# Move the grids
if n_move != 0:
# Move the global domain
comm.shift_global_domain_positions( n_move*dz )
# Shift the fields
Nm = len(fld.interp)
for m in range(Nm):
# Modify the values of the corresponding z's
fld.interp[m].zmin += n_move*fld.interp[m].dz
fld.interp[m].zmax += n_move*fld.interp[m].dz
# Shift/move fields by n_move cells in spectral space
self.shift_spect_grid( fld.spect[m], n_move )
# Because the grids have just been shifted, there is a shift
# in the cell indices that are used for the prefix sum.
for species in ptcl:
if species.use_cuda:
species.prefix_sum_shift += n_move
# This quantity is reset to 0 whenever prefix_sum is recalculated
# Prepare the positions of injection for the particles
# (The actual creation of particles is done when the routine
# exchange_particles of boundary_communicator.py is called)
if comm.rank == comm.size-1:
for species in ptcl:
if species.continuous_injection:
# Increment the positions for the generation of particles
# (Particles are generated when `generate_particles` is called)
species.injector.increment_injection_positions(
self.v, time-self.t_last_move )
# Change the time of the last move
self.t_last_move = time
def shift_spect_grid( self, grid, n_move,
shift_rho=True, shift_currents=True ):
"""
Shift the spectral fields by n_move cells (with respect to the
spatial grid). Shifting is done either on the CPU or the GPU,
if use_cuda is True. (Typically n_move is positive, and the
fields are shifted backwards)
Parameters
----------
grid: an SpectralGrid corresponding to one given azimuthal mode
Contains the values of the fields in spectral space,
and is modified by this function.
n_move: int
The number of cells by which the grid should be shifted
shift_rho: bool, optional
Whether to also shift the charge density
Default: True, since rho is only recalculated from
scratch when the particles are exchanged
shift_currents: bool, optional
Whether to also shift the currents
Default: False, since the currents are recalculated from
scratch at each PIC cycle
"""
if grid.use_cuda:
shift = grid.d_field_shift
# Get a 2D CUDA grid of the size of the grid
tpb, bpg = cuda_tpb_bpg_2d( grid.Ep.shape[0], grid.Ep.shape[1] )
# Shift all the fields on the GPU
shift_spect_array_gpu[tpb, bpg]( grid.Ep, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Em, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Ez, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Bp, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Bm, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Bz, shift, n_move )
if shift_rho:
shift_spect_array_gpu[tpb, bpg]( grid.rho_prev, shift, n_move )
if shift_currents:
shift_spect_array_gpu[tpb, bpg]( grid.Jp, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Jm, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Jz, shift, n_move )
else:
shift = grid.field_shift
# Shift all the fields on the CPU
shift_spect_array_cpu( grid.Ep, shift, n_move )
shift_spect_array_cpu( grid.Em, shift, n_move )
shift_spect_array_cpu( grid.Ez, shift, n_move )
shift_spect_array_cpu( grid.Bp, shift, n_move )
shift_spect_array_cpu( grid.Bm, shift, n_move )
shift_spect_array_cpu( grid.Bz, shift, n_move )
if shift_rho:
shift_spect_array_cpu( grid.rho_prev, shift, n_move )
if shift_currents:
shift_spect_array_cpu( grid.Jp, shift, n_move )
shift_spect_array_cpu( grid.Jm, shift, n_move )
shift_spect_array_cpu( grid.Jz, shift, n_move )
@njit_parallel
def shift_spect_array_cpu( field_array, shift_factor, n_move ):
"""
Shift the field 'field_array' by n_move cells on CPU.
This is done in spectral space and corresponds to multiplying the
fields with the factor exp(i*kz_true*dz)**n_move .
Parameters
----------
field_array: 2darray of complexs
Contains the value of the fields, and is modified by
this function
shift_factor: 1darray of complexs
Contains the shift array, that is multiplied to the fields in
spectral space to shift them by one cell in spatial space
( exp(i*kz_true*dz) )
n_move: int
The number of cells by which the grid should be shifted
"""
Nz, Nr = field_array.shape
# Loop over the 2D array (in parallel over z if threading is enabled)
for iz in prange( Nz ):
power_shift = 1. + 0.j
# Calculate the shift factor (raising to the power n_move ;
# for negative n_move, we take the complex conjugate, since
# shift_factor is of the form e^{i k dz})
for i in range( abs(n_move) ):
power_shift *= shift_factor[iz]
if n_move < 0:
power_shift = power_shift.conjugate()
# Shift the fields
for ir in range( Nr ):
field_array[iz, ir] *= power_shift
if cuda_installed:
@cuda.jit
def shift_spect_array_gpu( field_array, shift_factor, n_move ):
"""
Shift the field 'field_array' by n_move cells on the GPU.
This is done in spectral space and corresponds to multiplying the
fields with the factor exp(i*kz_true*dz)**n_move .
Parameters
----------
field_array: 2darray of complexs
Contains the value of the fields, and is modified by
this function
shift_factor: 1darray of complexs
Contains the shift array, that is multiplied to the fields in
spectral space to shift them by one cell in spatial space
( exp(i*kz_true*dz) )
n_move: int
The number of cells by which the grid should be shifted
"""
# Get a 2D CUDA grid
iz, ir = cuda.grid(2)
# Only access values that are actually in the array
if ir < field_array.shape[1] and iz < field_array.shape[0]:
power_shift = 1. + 0.j
# Calculate the shift factor (raising to the power n_move ;
# for negative n_move, we take the complex conjugate, since
# shift_factor is of the form e^{i k dz})
for i in range( abs(n_move) ):
power_shift *= shift_factor[iz]
if n_move < 0:
power_shift = power_shift.conjugate()
# Shift fields
field_array[iz, ir] *= power_shift
|
nilq/baby-python
|
python
|
from django.shortcuts import render
from swpp.models import Profile
from swpp.serializers import ProfileSerializer
from rest_framework import generics, mixins, permissions
from swpp.permissions import IsOwnerOrReadOnly
class ProfileList(generics.ListAPIView):
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
class ProfileDetails(generics.RetrieveUpdateAPIView):
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
permission_classes = (IsOwnerOrReadOnly, )
def put(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
|
nilq/baby-python
|
python
|
KIND = {
'JOB': 'job',
'DEPLOYMENT': 'deployment'
}
COMMAND = {
'DELETE': 'delete',
'CREATE': 'create'
}
|
nilq/baby-python
|
python
|
# Copied from http://www.djangosnippets.org/snippets/369/
import re
import unicodedata
from htmlentitydefs import name2codepoint
from django.utils.encoding import smart_unicode, force_unicode
from slughifi import slughifi
def slugify(s, entities=True, decimal=True, hexadecimal=True, model=None, slug_field='slug', pk=None):
s = smart_unicode(s)
# we don't want a string > 40 characters
if len(s) > 40:
s = s[:40]
s = slughifi(s)
slug = s
if model:
# return unique slug for a model (appending integer counter)
def get_query():
query = model.objects.filter(**{ slug_field: slug })
if pk:
query = query.exclude(pk=pk)
return query
counter = 2
while get_query():
slug = "%s-%s" % (s, counter)
counter += 1
return slug
|
nilq/baby-python
|
python
|
from typing import Optional
from typing import Tuple
import attr
@attr.s(auto_attribs=True)
class SlotAttentionParams:
# model configs
resolution: Tuple[int, int] = (128, 128) # since we not using ViT
# Slot Attention module params
num_slots: int = 7 # at most 6 obj per image/video
# dim of slots embedding
slot_size: int = 64
num_iterations: int = 3
# MLP hidden size in Slot Attention
slot_mlp_size: int = 128 # FFN after cross attention
# whether treat bg slot separately
use_bg_sep_slot: bool = False
# setting about sem-pos separate model
use_sempos_sep: bool = True
# encoder params
# UNet as encoder
use_unet: bool = False
# StackedResBlocks as encoder
use_resnet: bool = False
# Conv encoder-decoder
out_features: int = 64
kernel_size: int = 5
enc_pos_size: int = 64 # number of dims for positional information
enc_channels: Tuple[int, ...] = (3, 64, 64, 64, 64)
enc_resolution: Tuple[int, int] = resolution # image size
enc_norm: str = ''
# decoder params
dec_pos_size: int = None # if is int, then use cat instead of add
dec_resolution: Tuple[int, int] = (8, 8)
dec_channels: Tuple[int, ...] = (64, 64, 64, 64, 64)
dec_norm: str = ''
# use self-entropy loss to masks
use_entropy_loss: bool = False
entropy_loss_w: float = 1e-3
# architecture of CLIP pre-trained model
use_clip_vision: bool = False
clip_arch: str = 'ViT-B/32'
clip_vision_channel: int = 64
clip_text_channel: int = 512
# Text2Slot model
text_encoder: str = 'clip'
context_len: int = 0
use_text2slot: bool = True
text2slot_arch: str = 'MLP' # or 'Transformer' or 'DETR'
# for MLP
text2slot_hidden_sizes: Tuple[int] = (512, )
normalize_slots: bool = True
# data
# data_root: str = "/scratch/ssd004/scratch/ziyiwu/data/CLEVR_viewpoint_video_4obj"
# data_root: str = "/scratch/ssd004/scratch/ziyiwu/data/CLEVR_viewpoint_video"
data_root: str = "/scratch/ssd004/scratch/ziyiwu/data/clevr_video/train/"
shuffle_obj: bool = False
prompt: str = 'a {color} {shape}'
pad_text: str = 'background'
# Normalization for natural img or original slot attention one
simple_normalize: bool = True # since we not using ViT
center_crop: Tuple[int] = None # (128, 128)
# training settings
gpus: int = 4
batch_size: int = 64 * 4
val_batch_size: int = 64 * 4
max_epochs: int = 16
num_sanity_val_steps: int = 1
num_train_images: Optional[int] = None
num_val_images: Optional[int] = None
is_logger_enabled: bool = True
is_verbose: bool = True
num_workers: int = 6
n_samples: int = 5
# optimization settings
cosine_decay: bool = True
lr: float = 0.0008
warmup_steps_pct: float = 0.025
decay_steps_pct: float = 0.2
scheduler_gamma: float = 0.5
weight_decay: float = 0.0
grad_clip_norm: float = 0.2
|
nilq/baby-python
|
python
|
from verifai.simulators.car_simulator.examples.control_utils.LQR_computation import *
from verifai.simulators.car_simulator.simulator import *
from verifai.simulators.car_simulator.lane import *
from verifai.simulators.car_simulator.car_object import *
from verifai.simulators.car_simulator.client_car_sim import *
import numpy as np
from dotmap import DotMap
def controller(x_trajectory, u_trajectory, control_params):
x, y, v, theta = x_trajectory[-1]
wheelbase = control_params.wheelbase
a_star = control_params.a_star
v_star = control_params.v_star
control_freq = control_params.control_freq
dt = control_params.dt
Q = control_params.Q
R = control_params.R
A, B = extract_AB(speed=v, dt=dt, wheelbase=wheelbase)
if len(u_trajectory)%control_freq == 0:
a = a_star if np.linalg.norm(v - v_star) > 0.1 else 0.0
c = np.array([0.0, a * control_freq * dt, 0.0])
K, k = discrete_LQR(A, B, Q, R, c)
u = K.dot(np.array([[x],
[v - v_star],
[-theta+np.pi/2]])) + k
u = min(float(u), np.pi / 4.)
u = max(float(u), -np.pi / 4.)
control = np.array([u, a])
else:
control = u_trajectory[-1]
return control
def lanekeeping(sample, control_params, width=0.13):
x_init = sample.init_conditions.x_init[0]
theta_init = -sample.init_conditions.theta_init[0] + np.pi/2
v_init = 0.0
y_init = 0.0
x0 = np.array([x_init, y_init, v_init, theta_init])
a_star = control_params.a_star
u_domain = {'omega':[-np.pi/4, np.pi/4],
'acc':[-a_star, a_star]}
compute_control = lambda x, u: controller(x_trajectory=x, u_trajectory=u,
control_params=control_params)
car = bicycle_model(x0=x0, u_domain=u_domain, compute_control=compute_control,
wheelbase = control_params.wheelbase, dt = control_params.dt,
color='red')
lanes = []
lanes.append(straight_lane([0., -1.], [0., 1.], width))
lanes.append(lanes[0].shifted(1))
lanes.append(lanes[0].shifted(-1))
world = simulator_world(lanes=lanes, cars=[car])
sim = simulator(dt=control_params.dt, iters = 100, sprite_scale=control_params.sprite_scale,
window_size = control_params.window_size, magnify =0.25)
sim.init_simulator(world=world, task_name='Lane Keeping')
sim.run()
sim.exit_simulation()
traj_x, traj_y, _, _ = np.array(car.trajectory).T
data = {}
traj = {}
traj['xdeviation'] = [(j * control_params.dt, 0.5 - np.abs(v)) for j, v in enumerate(traj_x)]
return traj
def lanekeeping_simulator(sample):
print(sample)
control_params = DotMap()
control_params.wheelbase =2.995
control_params.a_star = 3.968
control_params.v_star = sample.init_conditions.cruising_speed[0]*5./18.
control_params.dt = 0.032
control_params.control_freq = 2
control_params.R = 50*np.identity(1)
control_params.Q= np.diag([100.0, 0.0, 5.0])
control_params.sprite_scale = 1/800
control_params.window_size = 800
width = 1.0
return lanekeeping(sample, control_params, width)
PORT = 8888
BUFSIZE = 4096
N_SIM_STEPS = 100
simulation_data = DotMap()
simulation_data.port = PORT
simulation_data.bufsize = BUFSIZE
simulation_data.simulation = lanekeeping_simulator
client_task = ClientCar(simulation_data)
while True:
if not client_task.run_client():
print("End of all simulations")
break
|
nilq/baby-python
|
python
|
import numpy as np
from mesostat.metric.impl.mar import unstack_factor, rel_err
def predict(x, alpha, u, beta):
# return np.einsum('ai,ijk', alpha, x) + np.einsum('ai,ijk', beta, u)
return x.dot(alpha.T) + u.dot(beta.T)
def fit_mle(x, y, u):
# # Construct linear system for transition matrices
# M11 = np.einsum('ajk,bjk', x, y)
# M12 = np.einsum('ajk,bjk', x, x)
# M13 = np.einsum('ajk,bjk', x, u)
# M21 = np.einsum('ajk,bjk', u, y)
# M22 = M13.T #np.einsum('ajk,bjk', u, x)
# M23 = np.einsum('ajk,bjk', u, u)
# Construct linear system for transition matrices
# NOTE: In this form, trials and timesteps are concatenated, so there is no explicit trial dimension
M11 = x.T.dot(y)
M12 = x.T.dot(x)
M13 = x.T.dot(u)
M21 = u.T.dot(y)
M22 = M13.T
M23 = u.T.dot(u)
# Solve system
M12INV = np.linalg.inv(M12)
M23INV = np.linalg.inv(M23)
TMP11 = M11 - M13.dot(M23INV.dot(M21))
TMP12 = M12 - M13.dot(M23INV.dot(M22))
TMP21 = M21 - M22.dot(M12INV.dot(M11))
TMP22 = M23 - M22.dot(M12INV.dot(M13))
alpha = np.linalg.inv(TMP12).dot(TMP11).T
beta = np.linalg.inv(TMP22).dot(TMP21).T
return alpha, beta
|
nilq/baby-python
|
python
|
'''
The MIT License (MIT)
Copyright © 2021 Opentensor.ai
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the “Software”), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of
the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
'''
"""
DPN synapse
Bittensor endpoint trained on PIL images to detect objects using DPN.
"""
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from types import SimpleNamespace
from munch import Munch
import bittensor
from routers.pkm import PKMRouter
from bittensor.utils.batch_transforms import Normalize
class DPNSynapse(bittensor.synapse.Synapse):
""" Bittensor endpoint trained on PIL images to detect objects using an DPN.
"""
def __init__( self, config: Munch = None, **kwargs):
r""" Init a new DPN synapse module.
Args:
config (:obj: `munch.Munch`, `required`)
munch namespace config item.
"""
super(DPNSynapse, self).__init__(config = config, **kwargs)
if config == None:
config = DPNSynapse.default_config()
bittensor.config.Config.update_with_kwargs(config.synapse, kwargs)
DPNSynapse.check_config(config)
self.config = config
in_planes, out_planes = config.synapse.in_planes, config.synapse.out_planes
num_blocks, dense_depth = config.synapse.num_blocks, config.synapse.dense_depth
# Transform Network
""" Transform network.
Layers take in image inputs normalizes them and applies
4 convolutional layers.
Image encoder: transforms PIL-encoded tensors to a common shape.
[batch_size, channels, rows, cols] -> [batch_size, -1, -1, -1]
Output: [batch_size, self.transform_dim (9728)]
"""
self.transform = Normalize((0.1307,), (0.3081,), device=self.device)
self.adaptive_pool = nn.AdaptiveAvgPool2d((32, 32))
self.transform_conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.transform_bn1 = nn.BatchNorm2d(64)
self.last_planes = 64
self.transform_layer1 = self._make_layer(in_planes[0], out_planes[0], num_blocks[0], dense_depth[0], stride=1)
self.transform_layer2 = self._make_layer(in_planes[1], out_planes[1], num_blocks[1], dense_depth[1], stride=2)
self.transform_layer3 = self._make_layer(in_planes[2], out_planes[2], num_blocks[2], dense_depth[2], stride=1)
self.transform_layer4 = self._make_layer(in_planes[3], out_planes[3], num_blocks[3], dense_depth[3], stride=2)
self.transform_dim = (out_planes[3] * 4)+(((num_blocks[3]+1) * 4)*dense_depth[3])
# dendrite: (PKM layer) queries network using pooled embeddings as context.
# [batch_size, -1] -> topk * [batch_size, bittensor.__network_dim__]
self.router = PKMRouter(config, query_dim = self.transform_dim)
# Context layers.
"""
Distillation model for remote context. This layer takes input
coming from transform layer, and runs it through 3 linear layers,
projecting it to bittensor.__network_dim__.
"""
self.context_layer1 = nn.Linear(self.transform_dim, 512)
self.context_layer2 = nn.Linear(512, 256)
self.context_layer3 = nn.Linear(256, bittensor.__network_dim__)
# hidden layer.
self.hidden_layer1 = nn.Linear(self.transform_dim + bittensor.__network_dim__, 512)
self.hidden_layer2 = nn.Linear(512, 256)
self.hidden_layer3 = nn.Linear(256, bittensor.__network_dim__)
# Layers to project target down to target size passed by config
# (number of classes)
self.target_layer1 = nn.Linear(bittensor.__network_dim__, 128)
self.target_layer2 = nn.Linear(128, self.config.synapse.target_dim)
self.to(self.device)
@staticmethod
def default_config() -> Munch:
parser = argparse.ArgumentParser();
DPNSynapse.add_args(parser)
config = bittensor.config.Config.to_config(parser);
return config
@staticmethod
def add_args(parser: argparse.ArgumentParser):
r""" This function adds the configuration items for the DPN synapse.
These args are use to instantiate a Dual Path model.
Instantiating a configuration with the defaults will yield a "shallow" DPN-26 configuration.
For deeper network configurations, it is possible to set the num_blocks parameter to (3, 4, 20, 3) for a
DPN-92.
For DPN-98 set the following:
in_planes: (160, 320, 640, 1280)
out_planes: (256, 512, 1024, 2048)
num_blocks: (3, 6, 20, 3)
dense_depth: (16, 32, 32, 128)
"""
def to_list(arg):
return [int(i) for i in arg.split(",")]
parser.add_argument('--synapse.in_planes', default='160, 320, 640, 1280', action="append", type=to_list)
parser.add_argument('--synapse.out_planes', default='256, 512, 1024, 2048', action="append", type=to_list)
parser.add_argument('--synapse.num_blocks', default='3, 6, 20, 3', action="append", type=to_list)
parser.add_argument('--synapse.dense_depth', default='16, 32, 32, 128', action="append", type=to_list)
parser.add_argument('--synapse.target_dim', default=10, type=int, help='Final logit layer dimension. i.e. 10 for CIFAR-10.')
parser = PKMRouter.add_args(parser)
@staticmethod
def check_config(config: Munch):
assert isinstance(config.synapse.in_planes, list), 'synapse.in_planes must be a tuple, got {}'.format(config.synapse.in_planes)
assert isinstance(config.synapse.out_planes, list), 'synapse.out_planes must be a tuple, got {}'.format(config.synapse.out_planes)
assert isinstance(config.synapse.num_blocks, list), 'synapse.num_blocks must be a tuple, got {}'.format(config.synapse.num_blocks)
assert isinstance(config.synapse.dense_depth, list), 'synapse.dense_depth must be a tuple, got {}'.format(config.synapse.dense_depth)
assert all(isinstance(el, int) for el in config.synapse.in_planes), 'synapse.in_planes must be a tuple of ints, got {}'.format(config.synapse.in_planes)
assert all(isinstance(el, int) for el in config.synapse.out_planes), 'synapse.out_planes must be a tuple of ints, got {}'.format(config.synapse.out_planes)
assert all(isinstance(el, int) for el in config.synapse.num_blocks), 'synapse.num_blocks must be a tuple of ints, got {}'.format(config.synapse.num_blocks)
assert all(isinstance(el, int) for el in config.synapse.dense_depth), 'synapse.dense_depth must be a tuple of ints, got {}'.format(config.synapse.dense_depth)
def forward_image ( self, images: torch.Tensor):
r""" Forward image inputs through the DPN synapse .
Args:
inputs (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_dim, channels, rows, cols)`, `required`):
Image tensors produced by calling PIL.toTensor() and with sequence dimension.
Returns:
hidden (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_dim, bittensor.__network_dim__)`, `required`):
Hidden layer encoding produced by using local_context.
"""
# images: remove sequence dimension from images.
# images.shape = [batch_size, channels, rows, cols]
images = images.view(images.shape[0] * images.shape[1], images.shape[2], images.shape[3], images.shape[4])
# hidden: hidden layer using local context for local computation only.
# hidden.shape = [batch_size, __network_dim__]
hidden = self.forward (images = images.to(self.device), remote = False).local_hidden
# hidden: re-add sequence dimension to outputs.
# hidden.shape = [batch_size, sequence_dim, __network_dim__]
hidden = torch.unsqueeze(hidden, 1)
return hidden
def local_forward ( self, images: torch.Tensor, targets: torch.Tensor = None ) -> SimpleNamespace:
r""" Forward pass non-sequential image inputs and targets through the DPN Synapse.
Args:
images (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, channels, rows, cols)`, `required`):
PIL.toTensor() encoded images.
targets (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.target_size)`, `optional`):
Image labels.
remote (:obj:`bool')`, `optional`):
Switch between local and remote context. If true, function makes quries to the remote network.
Returns:
SimpleNamespace (
local_context (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, bittensor.__network_dim__)`, `required`):
Pre-Hidden layer context, trained to match the remote context.
local_hidden (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, bittensor.__network_dim__)`, `required`):
Hidden layer produced from the context.
local_target (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_dim)`, `optional`):
FFNN Target predictions using local_context.
local_target_loss (:obj:`torch.FloatTensor` of shape :obj:`(1)`, `optional`):
FFNN Classification loss using local_context.
local_accuracy (:obj:`torch.FloatTensor` of shape :obj:`(1)`, `optional`):
Accuracy of target predictions.
transform (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, transform_dim)`, `optional`):
transformation of various sized images to batch-size transform dim.
)
"""
# Return vars to be filled.
output = SimpleNamespace ()
r"""
Transform the images into a common shape (32x32)
"""
# transform: transform images to common shape.
# transform.shape = [batch_size, self.transform_dim]
transform = self.transform(images)
transform = self.adaptive_pool(transform)
transform = F.relu(self.transform_bn1(self.transform_conv1(transform.detach())))
transform = self.transform_layer1(transform)
transform = self.transform_layer2(transform)
transform = self.transform_layer3(transform)
transform = self.transform_layer4(transform)
transform = F.avg_pool2d(transform, 4)
output.transform = torch.flatten(transform, start_dim=1)
# local_context: distillation model for remote_context.
# local_context.shape = [batch_size, bittensor.__network_dim__]
local_context = self.context_layer1(output.transform.detach())
local_context = self.context_layer2(local_context)
output.local_context = self.context_layer3(local_context)
# local_hidden: hidden layer encoding using local_context.
# local_hidden.shape = [batch_size, bittensor.__network_dim__]
local_hidden = torch.cat([output.transform, output.local_context], dim=1)
local_hidden = self.hidden_layer1(local_hidden)
local_hidden = self.hidden_layer2(local_hidden)
output.local_hidden = self.hidden_layer3(local_hidden)
if targets is not None:
# local_target: projection of local_hidden onto target dimension.
# local_target.shape = [batch_size, target_dim]
targets.to(self.device)
local_target = self.target_layer1(output.local_hidden)
local_target = self.target_layer2(local_target)
output.local_target = F.log_softmax(local_target, dim=1)
# local_target_loss: loss between local_target and passed targets.
# local_target_loss.shape = [1]
output.local_target_loss = F.nll_loss(output.local_target, targets)
# Record extra metadata accuracy.
max_logit = local_target.data.max(1, keepdim=True)[1]
correct = max_logit.eq( targets.data.view_as(max_logit) ).sum()
output.local_accuracy = (100.0 * correct) / targets.shape[0]
return output
def remote_forward(self, neuron: bittensor.neuron.Neuron, images: torch.Tensor, targets: torch.Tensor = None) -> SimpleNamespace:
"""
Forward pass non-sequential image inputs and targets through the synapse. Makes RPC queries to downstream neurons.
Args:
neuron (:obj: `bittensor.neuron.Neuron`, `required`):
Bittensor neuron, used for making queries to the remote network.
images (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, channels, rows, cols)`, `required`):
PIL.toTensor() encoded images.
targets (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_dim)`, `optional`, defaults to None):
Image labels.
Returns:
self.local_forward() + SimpleNamespace (
router (:obj:`SimpleNamespace`, `required`):
Outputs from the pkm dendrite remote call.
distillation_loss (:obj:`torch.FloatTensor` of shape :obj:`(1)`, `optional`):
Distillation loss between the local and remote context.
remote_hidden (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, bittensor.__network_dim__)`, `optional`):
Hidden layer encoding produced using the remote context.
remote_target (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_dim)`, `optional`):
FFNN Target predictions using the remote_context.
remote_target_loss (:obj:`torch.FloatTensor` of shape :obj:`(1)`, `optional`):
FFNN Classification loss using the remote_context.
)
"""
# Call the local forward pass.
# output = bittensor.SynapseOutput
output = self.local_forward( images, targets )
# Make remote queries using the PKMRouter.
# remote_context: responses from a bittensor remote network call.
# remote_context.shape = [batch_size, bittensor.__network_dim__]
images = torch.unsqueeze(images, 1)
output.router = self.router.forward_image( neuron, images, output.transform )
remote_context = torch.squeeze( output.router.response, 1 ).to(self.device)
# Distill the local context to match the remote context.
# distillation_loss: distillation loss between local_context and remote_context
# distillation_loss.shape = [1]
output.distillation_loss = F.mse_loss(output.local_context, remote_context.detach() )
# remote_hidden: hidden layer encoding using remote_context.
# remote_hidden.shape = [batch_size, bittensor.__network_dim__]
remote_hidden = torch.cat([output.transform, remote_context], dim=1)
remote_hidden = self.hidden_layer1(remote_hidden)
remote_hidden = self.hidden_layer2(remote_hidden)
output.remote_hidden = self.hidden_layer3(remote_hidden)
if targets is not None:
# remote_target: projection of remote_hidden onto target dimension.
# remote_target.shape = [batch_size, config.target_size]
remote_target = self.target_layer1(output.remote_hidden)
remote_target = self.target_layer2(remote_target)
output.remote_target = F.log_softmax(remote_target, dim=1)
# remote_target_loss: loss between remote_target and passed targets.
# remote_target_loss.shape = [1]
output.remote_target_loss = F.nll_loss(output.remote_target, targets)
return output
def _make_layer(self, in_planes, out_planes, num_blocks, dense_depth, stride):
""" Generates a sequential container containing Bottleneck layers.
Args:
in_planes (tuple):
4-element tuple describing the in_planes config.
out_planes (tuple):
4-element tuple describing the out_planes config.
num_blocks (tuple):
4-element tuple describing the number of blocks at this layer.
dense_depth (tuple):
4-element tuple describing the depth of this layer.
stride (int):
Convolutional stride length.
Returns:
nn.Sequential: A torch.nn sequential container containing the layers outlined in the inputs.
"""
strides = [stride] + [1]*(num_blocks-1)
layers = []
for i,stride in enumerate(strides):
layers.append(self.Bottleneck(self.last_planes, in_planes, out_planes, dense_depth, stride, i==0))
self.last_planes = out_planes + (i+2) * dense_depth
return nn.Sequential(*layers)
class Bottleneck(nn.Module):
def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer):
super(DPNSynapse.Bottleneck, self).__init__()
self.out_planes = out_planes
self.dense_depth = dense_depth
self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=False)
self.bn2 = nn.BatchNorm2d(in_planes)
self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes + dense_depth)
self.shortcut = nn.Sequential()
if first_layer:
self.shortcut = nn.Sequential(
nn.Conv2d(last_planes, out_planes + dense_depth, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_planes + dense_depth)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
x = self.shortcut(x)
d = self.out_planes
out = torch.cat([x[:,:d,:,:]+out[:,:d,:,:], x[:,d:,:,:], out[:,d:,:,:]], 1)
out = F.relu(out)
return out
|
nilq/baby-python
|
python
|
from srf.io.listmode import save_h5
import numpy as np
data = np.fromfile("normal_scan_true.txt", dtype=np.float32).reshape(-1,7)
result = {'fst': data[:, :3], 'snd': data[:, 3:6], 'weight': np.ones_like(data[:,0])}
save_h5('input.h5', result)
|
nilq/baby-python
|
python
|
from flask import Flask
from . import api, web
app = Flask(
__name__,
static_url_path='/assets',
static_folder='static',
template_folder='templates')
app.config['SECRET_KEY'] = 'secret' # this is fine if running locally
app.register_blueprint(api.bp)
app.register_blueprint(web.bp)
|
nilq/baby-python
|
python
|
default_app_config = 'user_deletion.apps.UserDeletionConfig'
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" Copyright 2012-2021 Smartling, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this work except in compliance with the License.
* You may obtain a copy of the License in the LICENSE file, or at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
from smartlingApiSdk.ApiV2 import ApiV2
class JobBatchesV2Api(ApiV2):
def __init__(self, userIdentifier, userSecret, projectId, proxySettings=None, permanentHeaders={}, env='prod'):
ApiV2.__init__(self, userIdentifier, userSecret, projectId, proxySettings, permanentHeaders=permanentHeaders, env=env)
def createJobBatchV2(self, authorize, translationJobUid, fileUris, localeWorkflows, **kwargs):
"""
method : POST
api url : /job-batches-api/v2/projects/{projectId}/batches
as curl : curl -X POST "https://api.smartling.com/job-batches-api/v2/projects/$smartlingProjectId/batches" -H "Authorization: Bearer $smartlingToken" -H "Content-Type: application/json" -d '{"translationJobUid": "$translationJobUid", "authorize": true, "fileUris": ["example.json", "test.xml"]}'
Responses:
200 : OK
404 : provided translationJobUid is not found in the TMS
details : https://api-reference.smartling.com/#operation/createJobBatchV2
"""
kw = {
'authorize':authorize,
'translationJobUid':translationJobUid,
'fileUris':fileUris,
'localeWorkflows':localeWorkflows,
}
kw.update(kwargs)
url = self.urlHelper.getUrl('/job-batches-api/v2/projects/{projectId}/batches', **kwargs)
response, status = self.commandJson('POST', url, kw)
return response, status
def getJobBatchesListV2(self, translationJobUid='', status='', sortBy='createdDate', orderBy='desc', offset=0, limit=20, **kwargs):
"""
method : GET
api url : /job-batches-api/v2/projects/{projectId}/batches
as curl : curl -X GET \'https://api.smartling.com/job-batches-api/v2/projects/$smartlingProjectId/batches?translationJobUid={translationJobUid}&status={status}&sortBy=createdDate&orderBy=desc&offset=0&limit=20' \-H "Authorization: Bearer $smartlingToken"
Responses:
200 : OK
details : https://api-reference.smartling.com/#operation/getJobBatchesListV2
"""
kw = {
'translationJobUid':translationJobUid,
'status':status,
'sortBy':sortBy,
'orderBy':orderBy,
'offset':offset,
'limit':limit,
}
kw.update(kwargs)
url = self.urlHelper.getUrl('/job-batches-api/v2/projects/{projectId}/batches', **kwargs)
response, status = self.command('GET', url, kw)
return response, status
def getJobBatchStatusV2(self, batchUid, **kwargs):
"""
method : GET
api url : /job-batches-api/v2/projects/{projectId}/batches/{batchUid}
Responses:
200 : OK
404 : Batch provided in path is not found
details : https://api-reference.smartling.com/#operation/getJobBatchStatusV2
"""
kw = {
}
kw.update(kwargs)
url = self.urlHelper.getUrl('/job-batches-api/v2/projects/{projectId}/batches/{batchUid}', batchUid=batchUid, **kwargs)
response, status = self.command('GET', url, kw)
return response, status
def processBatchActionV2(self, batchUid, action, fileUri, reason, **kwargs):
"""
method : PUT
api url : /job-batches-api/v2/projects/{projectId}/batches/{batchUid}
as curl : curl -X PUT \'https://api.smartling.com/job-batches-api/v2/projects/$smartlingProjectId/batches/$batchUid' \-H "Authorization: Bearer $smartlingToken" \-H "Content-Type: application/json" \-d '{ "action": "CANCEL_FILE", "fileUri": "file1.xml", "reason": "Requested asset doesn't exist in Zendesk" }'
Responses:
200 : SUCCESS
404 : Batch provided in path is not found
details : https://api-reference.smartling.com/#operation/processBatchActionV2
"""
kw = {
'action':action,
'fileUri':fileUri,
'reason':reason,
}
kw.update(kwargs)
url = self.urlHelper.getUrl('/job-batches-api/v2/projects/{projectId}/batches/{batchUid}', batchUid=batchUid, **kwargs)
response, status = self.commandJson('PUT', url, kw)
return response, status
def uploadFileToJobBatchV2(self, batchUid, file, fileUri, fileType, authorize=False, localeIdsToAuthorize=[], callbackUrl='', directives={}, **kwargs):
"""
method : POST
api url : /job-batches-api/v2/projects/{projectId}/batches/{batchUid}/file
as curl : curl -X POST \'https://api.smartling.com/job-batches-api/v2/projects/$smartlingProjectId/batches/{batchUid}/file' \-H "Authorization: Bearer $smartlingToken" \-F "file=@file.properties;type=text/plain" \-F "fileUri=file.properties" \-F "fileType=javaProperties" \-F "localeIdsToAuthorize[]=fr-FR" \-F "localeIdsToAuthorize[]=ru-RU"
Responses:
202 : ACCEPTED
404 : Batch provided in path is not found
details : https://api-reference.smartling.com/#operation/uploadFileToJobBatchV2
"""
kw = {
'file':self.processFile(file),
'fileUri':fileUri,
'fileType':fileType,
'authorize':authorize,
'localeIdsToAuthorize':localeIdsToAuthorize,
'callbackUrl':callbackUrl,
}
self.addLibIdDirective(kw)
self.processDirectives(kw, directives)
url = self.urlHelper.getUrl('/job-batches-api/v2/projects/{projectId}/batches/{batchUid}/file', batchUid=batchUid)
return self.uploadMultipart(url, kw)
|
nilq/baby-python
|
python
|
# ===============================================================================
# NAME: SerialHVisitor.py
#
# DESCRIPTION: A visitor responsible for the generation of header file
# for each serializable class.
#
# AUTHOR: reder
# EMAIL: reder@jpl.nasa.gov
# DATE CREATED : June 4, 2007
#
# Copyright 2013, California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
# ===============================================================================
#
# Python standard modules
#
import datetime
import logging
import os
import sys
from getpass import getuser
from fprime_ac.generators import formatters
# from fprime_ac.utils import DiffAndRename
from fprime_ac.generators.visitors import AbstractVisitor
#
# Python extension modules and custom interfaces
#
# from Cheetah import Template
# from fprime_ac.utils import version
from fprime_ac.utils import ConfigManager, DictTypeConverter
#
# Import precompiled templates here
#
try:
from fprime_ac.generators.templates.serialize import SerialHeader
from fprime_ac.generators.templates.serialize import SerialImport
from fprime_ac.generators.templates.serialize import SerialBody
except ImportError:
print("ERROR: must generate python templates first.")
sys.exit(-1)
#
# Universal globals used within module go here.
# (DO NOT USE MANY!)
#
# Global logger init. below.
PRINT = logging.getLogger("output")
DEBUG = logging.getLogger("debug")
typelist = ["U8", "I8", "U16", "I16", "U32", "I32", "U64", "I64", "F32", "F64", "bool"]
#
# Module class or classes go here.
class SerializableVisitor(AbstractVisitor.AbstractVisitor):
"""
A visitor class responsible for generation of component header
classes in C++.
"""
__instance = None
__config = None
__fp = None
__form = None
__form_comment = None
def __init__(self):
"""
Constructor.
"""
super().__init__()
self.__config = ConfigManager.ConfigManager.getInstance()
self.__form = formatters.Formatters()
self.__form_comment = formatters.CommentFormatters()
DEBUG.info("SerializableVisitor: Instanced.")
self.bodytext = ""
self.prototypetext = ""
def _get_args_string(self, obj):
"""
Return a string of (type, name) args, comma separated
for use in templates that generate prototypes.
"""
arg_str = ""
for (name, mtype, size, format, comment) in obj.get_members():
if isinstance(mtype, tuple):
arg_str += "{} {}, ".format(mtype[0][1], name)
elif mtype == "string":
arg_str += "const {}::{}String& {}, ".format(obj.get_name(), name, name)
elif mtype not in typelist:
arg_str += "const {}& {}, ".format(mtype, name)
elif size is not None:
arg_str += "const {}* {}, ".format(mtype, name)
arg_str += "NATIVE_INT_TYPE %sSize, " % (name)
else:
arg_str += "{} {}".format(mtype, name)
arg_str += ", "
arg_str = arg_str.strip(", ")
return arg_str
def _get_conv_mem_list(self, obj):
"""
Return a list of port argument tuples
"""
arg_list = list()
for (name, mtype, size, format, comment) in obj.get_members():
typeinfo = None
if isinstance(mtype, tuple):
mtype = mtype[0][1]
typeinfo = "enum"
elif mtype == "string":
mtype = "{}::{}String".format(obj.get_name(), name)
typeinfo = "string"
elif mtype not in typelist:
typeinfo = "extern"
arg_list.append((name, mtype, size, format, comment, typeinfo))
return arg_list
def _get_enum_string_list(self, enum_list):
""""""
enum_tuple = enum_list[0]
enum_list = enum_list[1]
enum_str_list = []
for e in enum_list:
# No value, No comment
if (e[1] is None) and (e[2] is None):
s = "%s," % (e[0])
# No value, With comment
elif (e[1] is None) and (e[2] is not None):
s = "{}, // {}".format(e[0], e[2])
# With value, No comment
elif (e[1] is not None) and (e[2] is None):
s = "{} = {},".format(e[0], e[1])
# With value and comment
elif (e[1] is not None) and (e[2] is not None):
s = "%s = %s, // %s" % (e)
else:
pass
enum_str_list.append(s)
enum_str_list[-1] = enum_str_list[-1].replace(",", "")
return (enum_tuple, enum_str_list)
def _writeTmpl(self, c, visit_str):
"""
Wrapper to write tmpl to files desc.
"""
DEBUG.debug("SerializableVisitor:%s" % visit_str)
DEBUG.debug("===================================")
DEBUG.debug(c)
self.__fp.writelines(c.__str__())
DEBUG.debug("===================================")
def initFilesVisit(self, obj):
"""
Defined to generate files for generated code products.
@param obj: the instance of the concrete element to operation on.
"""
# Build filename here...
# file location will be based on namespace
namespace = obj.get_namespace()
dict_dir = os.environ["DICT_DIR"]
if namespace is None:
output_dir = "%s/serializable/" % (dict_dir)
else:
output_dir = "{}/serializable/{}".format(
dict_dir, namespace.replace("::", "/")
)
# make directory
if not (os.path.isdir(output_dir)):
os.makedirs(output_dir)
pyfile = output_dir + "/" + obj.get_name() + ".py"
# make empty __init__.py
open("{}/{}".format(output_dir, "__init__.py"), "w").close()
# Open file for writing here...
DEBUG.info("Open file: %s" % pyfile)
self.__fp = open(pyfile, "w")
if self.__fp is None:
raise Exception("Could not open %s file.") % pyfile
DEBUG.info("Completed")
def startSourceFilesVisit(self, obj):
"""
Defined to generate header for command python class.
@param obj: the instance of the command model to visit.
"""
c = SerialHeader.SerialHeader()
d = datetime.datetime.now()
c.date = d.strftime("%A, %d %B %Y")
c.user = getuser()
c.source = obj.get_xml_filename()
self._writeTmpl(c, "startSourceFilesVisit")
def includes1Visit(self, obj):
"""
Defined to generate includes within a file.
Usually used for the base classes but also for Serial types
@param args: the instance of the concrete element to operation on.
"""
c = SerialImport.SerialImport()
self._writeTmpl(c, "includes1Visit")
def includes2Visit(self, obj):
pass
def namespaceVisit(self, obj):
pass
def publicVisit(self, obj):
"""
Defined to generate public stuff within a class.
@param args: the instance of the concrete element to operation on.
"""
c = SerialBody.SerialBody()
c.name = obj.get_name()
c.mem_list = list()
for (n, t, s, f, comment) in obj.get_members():
# convert XML types to Python classes
(
type_string,
dontcare,
type_name,
use_size,
) = DictTypeConverter.DictTypeConverter().convert(t, s)
if type_name == "enum":
format_string = DictTypeConverter.DictTypeConverter().format_replace(
f, 0, "d", "s"
)
# check for an error
if format_string is None:
PRINT.info(
'Member %s in serializable %s had error processing format specifier "%s"'
% (n, c.name, f)
)
sys.exit(-1)
else:
f = format_string
c.mem_list.append((n, type_string, f, int(s) if use_size else 1))
self._writeTmpl(c, "publicVisit")
def protectedVisit(self, obj):
pass
def privateVisit(self, obj):
pass
def finishSourceFilesVisit(self, obj):
self.__fp.close()
|
nilq/baby-python
|
python
|
import time
import os
import binascii
import re
from datetime import datetime
from bson.json_util import dumps, loads
from flask.helpers import get_template_attribute
from flask import render_template
from init import app, rdb
from utils.jsontools import *
from utils.dbtools import makeUserMeta
from db import tagdb, db
from utils.crypto import *
from utils.exceptions import UserError
from bson import ObjectId
import redis_lock
from config import UserConfig
from utils.logger import log, log_ne
from services.tcb import filterOperation
from services.emailSender import send_noreply
from services.comment import listThread
def query_user_basic_info(uid) :
obj = db.users.find_one({"_id": ObjectId(uid)})
if obj is None :
return None
return obj['profile']
def verify_session(sid, stype) :
session_obj = loads(rdb.get(sid).decode('utf-8'))
if isinstance(stype, list) :
ret = session_obj['type'] in stype
else :
ret = session_obj['type'] == stype
return ret, session_obj
def login_auth_qq(openid, nickname) :
user_obj = db.users.find_one({'profile.openid_qq': openid})
if user_obj is not None :
sid, _ = do_login(user_obj)
return True, sid
else :
reg_sid = require_session('LOGIN_OR_SIGNUP_OPENID_QQ', openid_qq = openid)
return False, reg_sid
def bind_qq_openid(user, openid) :
binded_user = db.users.find_one({'profile.openid_qq': openid})
if binded_user is not None :
if str(binded_user['_id']) == str(user['_id']) :
return True
else :
return False
db.users.update_one({'_id': ObjectId(user['_id'])}, {'$set': {'profile.openid_qq': openid}})
return True
def require_session(session_type, **kwargs) :
# TODO: add challenge code to redis
if session_type not in ['LOGIN', 'SIGNUP', 'LOGIN_OR_SIGNUP_OPENID_QQ'] :
raise UserError('INCORRECT_SESSION_TYPE')
sid = binascii.hexlify(bytearray(random_bytes(16))).decode()
session_obj = {
'type': session_type,
'openid_qq': kwargs['openid_qq'] if session_type == 'LOGIN_OR_SIGNUP_OPENID_QQ' else ''
}
rdb.set(sid, dumps(session_obj), ex = UserConfig.SESSION_EXPIRE_TIME)
log(obj = {'sid': sid})
return sid
def logout(redis_user_key) :
common_user_obj = rdb.get(redis_user_key)
log(obj = {'redis_user_key': redis_user_key, 'user': common_user_obj})
rdb.delete(redis_user_key)
def do_login(user_obj) :
user_id = str(user_obj['_id'])
redis_user_key_lookup_key = f"user-{user_id}"
redis_user_key = rdb.get(redis_user_key_lookup_key)
logged_in = False
if redis_user_key :
# user already logged in on some other machines
redis_user_obj_json_str = rdb.get(redis_user_key)
if redis_user_obj_json_str :
logged_in = True
# reset expire time
rdb.set(redis_user_key, redis_user_obj_json_str, ex = UserConfig.LOGIN_EXPIRE_TIME)
rdb.set(redis_user_key_lookup_key, redis_user_key, ex = UserConfig.LOGIN_EXPIRE_TIME)
if logged_in :
profile = user_obj['profile']
profile['access_control_status'] = user_obj['access_control']['status']
return redis_user_key, profile
openid_qq = user_obj['profile']['openid_qq'] if 'openid_qq' in user_obj['profile'] else None
common_user_obj = {
'_id': user_obj['_id'],
'profile': {
'uid': str(user_obj['_id']),
'username': user_obj['profile']['username'],
'image': user_obj['profile']['image'],
'desc': user_obj['profile']['desc'],
'email': user_obj['profile']['email'],
'bind_qq': True if openid_qq else False
},
'access_control': user_obj['access_control'],
'settings': user_obj['settings']
}
redis_user_value = dumps(common_user_obj)
redis_user_key = binascii.hexlify(bytearray(random_bytes(16))).decode()
redis_user_key_lookup_key = f"user-{user_obj['_id']}"
rdb.set(redis_user_key, redis_user_value, ex = UserConfig.LOGIN_EXPIRE_TIME)
rdb.set(redis_user_key_lookup_key, redis_user_key, ex = UserConfig.LOGIN_EXPIRE_TIME)
log(obj = {'redis_user_key': redis_user_key, 'user': common_user_obj})
profile = common_user_obj['profile']
profile['access_control_status'] = user_obj['access_control']['status']
return redis_user_key, profile
def unbind_qq(user) :
def updater(obj) :
obj['profile']['bind_qq'] = False
return obj
db.users.update_one({'_id': ObjectId(user['_id'])}, {'$set': {'profile.openid_qq': ''}})
_updateUserRedisValue(user['_id'], updater)
# we allow the same user to login multiple times and all of his login sessions are valid
def login(username, password, challenge, login_session_id) :
log(obj = {'username': username, 'challenge': challenge, 'login_session_id': login_session_id})
if len(username) > UserConfig.MAX_USERNAME_LENGTH :
raise UserError('USERNAME_TOO_LONG')
if len(username) < UserConfig.MIN_USERNAME_LENGTH :
raise UserError('USERNAME_TOO_SHORT')
if len(password) > UserConfig.MAX_PASSWORD_LENGTH :
raise UserError('PASSWORD_TOO_LONG')
if len(password) < UserConfig.MIN_PASSWORD_LENGTH :
raise UserError('PASSWORD_TOO_SHORT')
session_verified, session_obj = verify_session(login_session_id, ['LOGIN', 'LOGIN_OR_SIGNUP_OPENID_QQ'])
if session_verified :
user_obj = db.users.find_one({'profile.username': username})
if not user_obj :
user_obj = db.users.find_one({'profile.email': username.lower()})
if not user_obj :
log(level = 'SEC', obj = {'msg': 'USER_NOT_EXIST'})
raise UserError('INCORRECT_LOGIN')
crypto_method = user_obj['crypto']['crypto_method']
if crypto_method == 'PBKDF2' :
if not verify_password_PBKDF2(password, user_obj['crypto']['salt1'], user_obj['crypto']['password_hashed']) :
log(level = 'SEC', obj = {'msg': 'WRONG_PASSWORD'})
raise UserError('INCORRECT_LOGIN')
# update crypto to Argon2
crypto_method, password_hashed, salt1, salt2, master_key_encryptyed = generate_user_crypto_Argon2(password)
db.users.update_one({'_id': user_obj['_id']}, {'$set': {'crypto': {
'crypto_method': crypto_method,
'password_hashed': password_hashed,
'salt1': salt1,
'salt2': salt2,
'master_key_encryptyed': master_key_encryptyed
}}})
elif crypto_method == 'Argon2' :
if not verify_password_Argon2(password, user_obj['crypto']['salt1'], user_obj['crypto']['password_hashed']) :
log(level = 'SEC', obj = {'msg': 'WRONG_PASSWORD'})
raise UserError('INCORRECT_LOGIN')
# bind QQ OpenID if present
if session_obj['type'] == 'LOGIN_OR_SIGNUP_OPENID_QQ' :
openid_qq = session_obj['openid_qq']
bind_qq_openid(user_obj, openid_qq)
return do_login(user_obj)
raise UserError('INCORRECT_SESSION')
def query_user_batch(uids) :
uids = [ObjectId(i) for i in uids]
return list(db.users.aggregate([
{'$match': {'_id': {'$in': uids}}},
{'$project': {'profile.username': 1, 'profile.desc': 1, 'profile.image': 1, '_id': 1}}
]))
def query_user(uid) :
try :
obj = db.users.find_one({'_id': ObjectId(uid)})
del obj['access_control']
del obj['crypto']
del obj['settings']
if 'email' in obj['profile'] and obj['profile']['email'] :
em: str = obj['profile']['email']
gravatar = md5(em.strip().lower())
obj['profile']['gravatar'] = gravatar
del obj['profile']['email']
if 'openid_qq' in obj['profile'] :
del obj['profile']['openid_qq']
except :
raise UserError('USER_NOT_EXIST')
return obj
def queryBlacklist(user, language) :
if 'blacklist' in user['settings'] :
if isinstance(user['settings']['blacklist'], list) :
return tagdb.translate_tag_ids_to_user_language(user['settings']['blacklist'], language)[0]
else :
return 'default'
else :
return 'default'
def queryUsername(username) :
user_obj_find = db.users.find_one({'profile.username': username})
if user_obj_find is None :
raise UserError('USER_NOT_EXIST')
del user_obj_find['access_control']
del user_obj_find['crypto']
del user_obj_find['settings']
del user_obj_find['profile']['email']
del user_obj_find['profile']['openid_qq']
return user_obj_find
def checkIfUserExists(username) :
user_obj_find = db.users.find_one({'profile.username': username})
if user_obj_find is not None :
return True
return False
def checkIfEmailExists(email: str) :
user_obj_find = db.users.find_one({'profile.email': email.lower()})
if user_obj_find is not None :
return True
return False
def checkIsAuthorized(user, op) :
filterOperation(op, user)
def signup(username, password, email, challenge, signup_session_id) :
log(obj = {'username': username, 'email': email, 'challenge': challenge, 'signup_session_id': signup_session_id})
if len(username) > UserConfig.MAX_USERNAME_LENGTH :
raise UserError('USERNAME_TOO_LONG')
if len(username) < UserConfig.MIN_USERNAME_LENGTH :
raise UserError('USERNAME_TOO_SHORT')
if len(password) > UserConfig.MAX_PASSWORD_LENGTH :
raise UserError('PASSWORD_TOO_LONG')
if len(password) < UserConfig.MIN_PASSWORD_LENGTH :
raise UserError('PASSWORD_TOO_SHORT')
session_verified, session_obj = verify_session(signup_session_id, 'SIGNUP')
if session_verified :
if session_obj['type'] == 'LOGIN_OR_SIGNUP_OPENID_QQ' :
openid_qq = session_obj['openid_qq']
else :
openid_qq = None
if email :
if len(email) > UserConfig.MAX_EMAIL_LENGTH or not re.match(r"[^@]+@[^@]+\.[^@]+", email):
raise UserError('INCORRECT_EMAIL')
crypto_method, password_hashed, salt1, salt2, master_key_encryptyed = generate_user_crypto_Argon2(password)
with redis_lock.Lock(rdb, 'signup:' + username) :
user_obj_find = db.users.find_one({'profile.username': username})
if user_obj_find is not None :
raise UserError('USER_EXIST')
if email :
user_obj_email = db.users.find_one({'profile.email': email.lower()})
if user_obj_email is not None :
raise UserError('EMAIL_EXIST')
if openid_qq :
binded_user = db.users.find_one({'profile.openid_qq': openid_qq})
if binded_user is not None :
raise UserError('QQ_ALREADY_BIND')
user_obj = {
'profile': {
'username': username,
'desc': 'Write something here',
'pubkey': '',
'image': 'default',
'email': email,
'openid_qq': openid_qq if openid_qq else '' # bind if present
},
'crypto': {
'crypto_method': crypto_method,
'password_hashed': password_hashed,
'salt1': salt1,
'salt2': salt2,
'master_key_encryptyed': master_key_encryptyed
},
'access_control': {
'status': 'normal',
'access_mode': 'blacklist',
'allowed_ops': [],
'denied_ops': []
},
'settings': {
'blacklist': 'default'
},
'meta': {
'created_at': datetime.now()
}
}
uid = db.users.insert_one(user_obj).inserted_id
log(obj = {'uid': uid, 'profile': user_obj['profile']})
return uid
raise UserError('INCORRECT_SESSION')
def update_userphoto(redis_user_key, user_id, file_key) :
log(obj = {'redis_user_key': redis_user_key, 'user_id': user_id, 'file_key': file_key})
photo_file = None
if file_key.startswith("upload-image-") :
filename = rdb.get(file_key)
if filename :
photo_file = filename.decode('ascii')
if photo_file is None :
raise UserError('NO_PHOTO')
obj = db.users.find_one({'_id': ObjectId(user_id)})
if obj is None :
raise UserError('USER_NOT_EXIST')
log(obj = {'old_photo_file': obj['profile']['image'], 'photo_file': photo_file})
db.users.update_one({'_id': ObjectId(user_id)}, {'$set': {'profile.image': photo_file}})
def updater(obj) :
obj['profile']['image'] = photo_file
return obj
_updateUserRedisValue(user_id, updater)
return photo_file
def update_desc(redis_user_key, user_id, new_desc) :
log(obj = {'redis_user_key': redis_user_key, 'user_id': user_id, 'new_desc': new_desc})
if len(new_desc) > UserConfig.MAX_DESC_LENGTH :
raise UserError('DESC_TOO_LONG')
obj = db.users.find_one({'_id': ObjectId(user_id)})
if obj is None :
raise UserError('USER_NOT_EXIST')
log(obj = {'old_desc': obj['profile']['desc']})
db.users.update_one({'_id': ObjectId(user_id)}, {'$set': {'profile.desc': new_desc}})
def updater(obj) :
obj['profile']['desc'] = new_desc
return obj
_updateUserRedisValue(user_id, updater)
def update_username(redis_user_key, user_id, new_name) :
log(obj = {'redis_user_key': redis_user_key, 'user_id': user_id, 'new_name': new_name})
if len(new_name) > UserConfig.MAX_USERNAME_LENGTH or len(new_name) < UserConfig.MIN_USERNAME_LENGTH :
raise UserError('NAME_LENGTH')
user_obj_find = db.users.find_one({'profile.username': new_name})
if user_obj_find is not None :
raise UserError('USER_ALREADY_EXIST')
obj = db.users.find_one({'_id': ObjectId(user_id)})
if obj is None :
raise UserError('USER_NOT_EXIST')
log(obj = {'old_name': obj['profile']['username']})
db.users.update_one({'_id': ObjectId(user_id)}, {'$set': {'profile.username': new_name}})
def updater(obj) :
obj['profile']['username'] = new_name
return obj
_updateUserRedisValue(user_id, updater)
def update_email(redis_user_key, user_id, new_email) :
log(obj = {'redis_user_key': redis_user_key, 'user_id': user_id, 'new_email': new_email})
if len(new_email) > UserConfig.MAX_EMAIL_LENGTH or not re.match(r"[^@]+@[^@]+\.[^@]+", new_email):
raise UserError('INCORRECT_EMAIL')
obj = db.users.find_one({'_id': ObjectId(user_id)})
if obj is None :
raise UserError('USER_NOT_EXIST')
user_obj_email = db.users.find_one({'profile.email': new_email})
if user_obj_email is not None and str(user_obj_email['_id']) != str(obj['_id']) :
raise UserError('EMAIL_EXIST')
log(obj = {'old_email': obj['profile']['email']})
db.users.update_one({'_id': ObjectId(user_id)}, {'$set': {'profile.email': new_email}})
def updater(obj) :
obj['profile']['email'] = new_email
return obj
_updateUserRedisValue(user_id, updater)
def update_blacklist(redis_user_key, user_id, blacklist) :
log(obj = {'redis_user_key': redis_user_key, 'user_id': user_id, 'blacklist': blacklist})
obj = db.users.find_one({'_id': ObjectId(user_id)})
if obj is None :
raise UserError('USER_NOT_EXIST')
log(obj = {'old_blacklist': obj['settings']['blacklist']})
if isinstance(blacklist, str) :
blacklist = 'default'
elif isinstance(blacklist, list) :
blacklist = tagdb.filter_and_translate_tags(blacklist)
else :
raise UserError('INCORRECT_BLACKLIST')
db.users.update_one({'_id': ObjectId(user_id)}, {'$set': {'settings.blacklist': blacklist}})
def updater(obj) :
obj['settings']['blacklist'] = blacklist
return obj
_updateUserRedisValue(user_id, updater)
def update_password(user_id, old_pass, new_pass) :
if len(old_pass) > UserConfig.MAX_PASSWORD_LENGTH or len(old_pass) < UserConfig.MIN_PASSWORD_LENGTH:
raise UserError('PASSWORD_LENGTH')
if len(new_pass) > UserConfig.MAX_PASSWORD_LENGTH or len(new_pass) < UserConfig.MIN_PASSWORD_LENGTH:
raise UserError('PASSWORD_LENGTH')
obj = db.users.find_one({'_id': ObjectId(user_id)})
if obj is None :
raise UserError('USER_NOT_EXIST')
log(obj = {'username': obj['profile']['username']})
crypto_method = obj['crypto']['crypto_method']
if crypto_method == 'PBKDF2' :
if not verify_password_PBKDF2(old_pass, obj['crypto']['salt1'], obj['crypto']['password_hashed']) :
raise UserError('INCORRECT_PASSWORD')
# generate a new Argon2 security context
crypto_method, password_hashed, salt1, salt2, master_key_encryptyed = generate_user_crypto_Argon2(new_pass)
crypto = {
'crypto_method': crypto_method,
'password_hashed': password_hashed,
'salt1': salt1,
'salt2': salt2,
'master_key_encryptyed': master_key_encryptyed
}
elif crypto_method == 'Argon2' :
if not verify_password_Argon2(old_pass, obj['crypto']['salt1'], obj['crypto']['password_hashed']) :
raise UserError('INCORRECT_PASSWORD')
crypto_method, password_hashed, salt1, salt2, master_key_encryptyed = update_crypto_Argon2(old_pass, new_pass, obj['crypto']['salt2'], obj['crypto']['master_key_encryptyed'])
crypto = {
'crypto_method': crypto_method,
'password_hashed': password_hashed,
'salt1': salt1,
'salt2': salt2,
'master_key_encryptyed': master_key_encryptyed
}
db.users.update_one({'_id': ObjectId(user_id)}, {'$set': {'crypto': crypto}})
def request_password_reset(email, user_language) :
user_obj = db.users.find_one({'profile.email': email})
if user_obj is None :
raise UserError('EMAIL_NOT_EXIST')
reset_key = random_bytes_str(16)
rdb.set('passreset-' + reset_key, email)
if user_language not in ['CHS', 'ENG'] :
user_language = 'ENG'
template_file = f'PatchyVideo-passreset-{user_language}.html'
title = get_template_attribute(template_file, 'get_title')
html_doc = render_template(template_file, key = reset_key)
send_noreply(email, str(title()), html_doc, mime = 'html')
def reset_password(reset_key, new_pass) :
if len(new_pass) > UserConfig.MAX_PASSWORD_LENGTH or len(new_pass) < UserConfig.MIN_PASSWORD_LENGTH:
raise UserError('PASSWORD_LENGTH')
reset_key_content = rdb.get('passreset-' + reset_key)
try :
email = reset_key_content.decode('ascii')
assert len(email) > 0
obj = db.users.find_one({'profile.email': email})
assert obj is not None
except :
raise UserError('INCORRECT_KEY')
# generate a new Argon2 security context
crypto_method, password_hashed, salt1, salt2, master_key_encryptyed = generate_user_crypto_Argon2(new_pass)
crypto = {
'crypto_method': crypto_method,
'password_hashed': password_hashed,
'salt1': salt1,
'salt2': salt2,
'master_key_encryptyed': master_key_encryptyed
}
db.users.update_one({'_id': obj['_id']}, {'$set': {'crypto': crypto}})
def _updateUserRedisValue(user_id, updater) :
redis_user_key_lookup_key = f"user-{str(user_id)}"
redis_user_key_ttl = rdb.ttl(redis_user_key_lookup_key)
redis_user_key = rdb.get(redis_user_key_lookup_key)
if redis_user_key :
redis_user_obj_json = rdb.get(redis_user_key)
if redis_user_obj_json :
redis_user_obj = loads(redis_user_obj_json)
redis_user_obj = updater(redis_user_obj)
rdb.set(redis_user_key, dumps(redis_user_obj), ex = redis_user_key_ttl)
def whoAmI(user) :
return user['access_control']['status']
def updateUserRole(user_id, role, user) :
filterOperation('updateUserRole', user, user_id)
old_user_obj = db.users.find_one({'_id': ObjectId(user_id)})
if old_user_obj is None :
raise UserError('USER_NOT_EXIST')
log(obj = {'user_id': user_id, 'new_role': role, 'old_role': old_user_obj['access_control']['status']})
if role not in ['normal', 'admin'] :
raise UserError('INCORRECT_ROLE')
db.users.update_one({'_id': ObjectId(user_id)}, {'$set': {'access_control.status': role}})
def updater(obj) :
obj['access_control']['status'] = role
return obj
_updateUserRedisValue(user_id, updater)
def updateUserAccessMode(user_id, mode, user) :
filterOperation('updateUserAccessMode', user, user_id)
old_user_obj = db.users.find_one({'_id': ObjectId(user_id)})
if old_user_obj is None :
raise UserError('USER_NOT_EXIST')
log(obj = {'user_id': user_id, 'new_mode': mode, 'old_mode': old_user_obj['access_control']['access_mode']})
if mode not in ['blacklist', 'whitelist'] :
raise UserError('INCORRECT_ACCESS_MODE')
db.users.update_one({'_id': ObjectId(user_id)}, {'$set': {'access_control.access_mode': mode}})
def updater(obj) :
obj['access_control']['access_control'] = mode
return obj
_updateUserRedisValue(user_id, updater)
def getUserAllowedOps(user_id, user) :
filterOperation('getUserAllowedOps', user, user_id)
old_user_obj = db.users.find_one({'_id': ObjectId(user_id)})
if old_user_obj is None :
raise UserError('USER_NOT_EXIST')
return old_user_obj['access_control']['allowed_ops']
def updateUserAllowedOps(user_id, allowed_ops, user) :
filterOperation('updateUserAllowedOps', user, user_id)
old_user_obj = db.users.find_one({'_id': ObjectId(user_id)})
if old_user_obj is None :
raise UserError('USER_NOT_EXIST')
log(obj = {'user_id': user_id, 'new_ops': allowed_ops, 'old_ops': old_user_obj['access_control']['allowed_ops']})
db.users.update_one({'_id': ObjectId(user_id)}, {'$set': {'access_control.allowed_ops': allowed_ops}})
def updater(obj) :
obj['access_control']['allowed_ops'] = allowed_ops
return obj
_updateUserRedisValue(user_id, updater)
def getUserDeniedOps(user_id, user) :
filterOperation('getUserDeniedOps', user, user_id)
old_user_obj = db.users.find_one({'_id': ObjectId(user_id)})
if old_user_obj is None :
raise UserError('USER_NOT_EXIST')
return old_user_obj['access_control']['denied_ops']
def updateUserDeniedOps(user_id, denied_ops, user) :
filterOperation('updateUserDeniedOps', user, user_id)
old_user_obj = db.users.find_one({'_id': ObjectId(user_id)})
if old_user_obj is None :
raise UserError('USER_NOT_EXIST')
log(obj = {'user_id': user_id, 'new_ops': denied_ops, 'old_ops': old_user_obj['access_control']['denied_ops']})
db.users.update_one({'_id': ObjectId(user_id)}, {'$set': {'access_control.denied_ops': denied_ops}})
def updater(obj) :
obj['access_control']['denied_ops'] = denied_ops
return obj
_updateUserRedisValue(user_id, updater)
def listUsers(user, offset, limit, query = None, order = 'latest') :
filterOperation('listUsers', user)
if order not in ['latest', 'oldest'] :
raise UserError('INCORRECT_ORDER')
if query :
query = re.escape(query)
query = f'^.*{query}.*$'
query_obj = {'profile.username': {'$regex': query}}
else :
query_obj = {}
result = db.users.find(query_obj)
if order == 'latest':
result = result.sort([("meta.created_at", -1)])
if order == 'oldest':
result = result.sort([("meta.created_at", 1)])
items = result.skip(offset).limit(limit)
count = items.count()
items = [i for i in items]
for i in range(len(items)) :
del items[i]["crypto"]
return items, count
def viewOpinion(user) :
uobj = db.users.find_one({'_id': user['_id']})
if 'comment_thread' in uobj :
return listThread(uobj['comment_thread'])
else :
return None, None
|
nilq/baby-python
|
python
|
import socket
import threading
class Server:
def __init__(self, ip, port):
self.sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sck.bind((ip, port))
self.sck.listen()
self.conCallback = None
self.clientThrCallback = None
self.disconCallback = None
self.clients = { }
self.nextClID = 0
def acceptClients(self):
while True:
sck, address = self.sck.accept()
if self.conCallback is not None:
self.conCallback(sck, address, self.nextClID)
thr = threading.Thread(target=self.clientThr, args=(sck, address, self.nextClID))
self.nextClID += 1
thr.start()
def acceptClientsAsync(self):
thr = threading.Thread(target=self.acceptClients)
thr.start()
return thr
def clientThr(self, sck, address, id):
self.clients[id] = (sck, address)
try:
if self.clientThrCallback is not None:
self.clientThrCallback(sck, address, id)
except Exception as e:
del self.clients[id]
sck.close()
raise e
if self.disconCallback is not None:
self.disconCallback(sck, address, id)
del self.clients[id]
sck.close()
def send(self, client, data):
if type(client) == int:
self.clients[client][0].send(data)
else:
client.send(data)
def broadcast(self, data):
for id, cl in self.clients.items():
cl[0].send(data)
|
nilq/baby-python
|
python
|
from math import pi, cos, log, floor
from torch.optim.lr_scheduler import _LRScheduler
class CosineWarmupLR(_LRScheduler):
'''
Cosine lr decay function with warmup.
Ref: https://github.com/PistonY/torch-toolbox/blob/master/torchtoolbox/optimizer/lr_scheduler.py
https://github.com/Randl/MobileNetV3-pytorch/blob/master/cosine_with_warmup.py
Lr warmup is proposed by
`Accurate, Large Minibatch SGD:Training ImageNet in 1 Hour`
`https://arxiv.org/pdf/1706.02677.pdf`
Cosine decay is proposed by
`Stochastic Gradient Descent with Warm Restarts`
`https://arxiv.org/abs/1608.03983`
Args:
optimizer (Optimizer): optimizer of a model.
iter_in_one_epoch (int): number of iterations in one epoch.
epochs (int): number of epochs to train.
lr_min (float): minimum(final) lr.
warmup_epochs (int): warmup epochs before cosine decay.
last_epoch (int): init iteration. In truth, this is last_iter
Attributes:
niters (int): number of iterations of all epochs.
warmup_iters (int): number of iterations of all warmup epochs.
cosine_iters (int): number of iterations of all cosine epochs.
'''
def __init__(self, optimizer, epochs, iter_in_one_epoch, lr_min=0, warmup_epochs=0, last_epoch=-1):
self.lr_min = lr_min
self.niters = epochs * iter_in_one_epoch
self.warmup_iters = iter_in_one_epoch * warmup_epochs
self.cosine_iters = iter_in_one_epoch * (epochs - warmup_epochs)
super(CosineWarmupLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
if self.last_epoch < self.warmup_iters:
return [(self.lr_min + (base_lr - self.lr_min) * self.last_epoch / self.warmup_iters) for base_lr in self.base_lrs]
else:
return [(self.lr_min + (base_lr - self.lr_min) * (1 + cos(pi * (self.last_epoch - self.warmup_iters) / self.cosine_iters)) / 2) for base_lr in self.base_lrs]
class CosineAnnealingWarmRestarts(_LRScheduler):
'''
copied from https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#CosineAnnealingWarmRestarts
Set the learning rate of each parameter group using a cosine annealing
schedule, where :math:`\eta_{max}` is set to the initial lr, :math:`T_{cur}`
is the number of epochs since the last restart and :math:`T_{i}` is the number
of epochs between two warm restarts in SGDR:
.. math::
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})(1 +
\cos(\frac{T_{cur}}{T_{i}}\pi))
When :math:`T_{cur}=T_{i}`, set :math:`\eta_t = \eta_{min}`.
When :math:`T_{cur}=0`(after restart), set :math:`\eta_t=\eta_{max}`.
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_.
Args:
optimizer (Optimizer): Wrapped optimizer.
T_0 (int): Number of iterations for the first restart.
T_mult (int, optional): A factor increases :math:`T_{i}` after a restart. Default: 1.
eta_min (float, optional): Minimum learning rate. Default: 0.
last_epoch (int, optional): The index of last epoch. Default: -1.
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
'''
def __init__(self, optimizer, T_0, T_mult=1, eta_min=0, last_epoch=-1, warmup_epochs=0, decay_rate=0.5):
if T_0 <= 0 or not isinstance(T_0, int):
raise ValueError("Expected positive integer T_0, but got {}".format(T_0))
if T_mult < 1 or not isinstance(T_mult, int):
raise ValueError("Expected integer T_mult >= 1, but got {}".format(T_mult))
if warmup_epochs < 0 or not isinstance(warmup_epochs, int):
raise ValueError("Expected positive integer warmup_epochs, but got {}".format(warmup_epochs))
self.T_0 = T_0
self.T_i = T_0
self.T_mult = T_mult
self.eta_min = eta_min
self.warmup_epochs = warmup_epochs
self.decay_rate = decay_rate
self.decay_power = 0
super(CosineAnnealingWarmRestarts, self).__init__(optimizer, last_epoch)
self.T_cur = self.last_epoch
def get_lr(self):
if self.last_epoch < self.warmup_epochs:
return [(self.eta_min + (base_lr - self.eta_min) * self.T_cur / self.warmup_epochs) for base_lr in self.base_lrs]
else:
return [self.eta_min + (base_lr * (self.decay_rate**self.decay_power) - self.eta_min) * (1 + cos(pi * self.T_cur / self.T_i)) / 2
for base_lr in self.base_lrs]
def step(self, epoch=None):
'''Step could be called after every batch update
Example:
>>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult)
>>> iters = len(dataloader)
>>> for epoch in range(20):
>>> for i, sample in enumerate(dataloader):
>>> inputs, labels = sample['inputs'], sample['labels']
>>> scheduler.step(epoch + i / iters)
>>> optimizer.zero_grad()
>>> outputs = net(inputs)
>>> loss = criterion(outputs, labels)
>>> loss.backward()
>>> optimizer.step()
This function can be called in an interleaved way.
Example:
>>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult)
>>> for epoch in range(20):
>>> scheduler.step()
>>> scheduler.step(26)
>>> scheduler.step() # scheduler.step(27), instead of scheduler(20)
'''
if epoch is None:
epoch = self.last_epoch + 1
self.T_cur = self.T_cur + 1
if self.T_cur >= self.T_i:
self.T_cur = self.T_cur - self.T_i
self.T_i = self.T_i * self.T_mult
else:
if epoch < 0:
raise ValueError("Expected non-negative epoch, but got {}".format(epoch))
if epoch < self.warmup_epochs:
self.T_cur = epoch
else:
epoch_cur = epoch - self.warmup_epochs
if epoch_cur >= self.T_0:
if self.T_mult == 1:
self.T_cur = epoch_cur % self.T_0
self.decay_power = epoch_cur // self.T_0
else:
n = int(log((epoch_cur / self.T_0 * (self.T_mult - 1) + 1), self.T_mult))
self.T_cur = epoch_cur - self.T_0 * (self.T_mult ** n - 1) / (self.T_mult - 1)
self.T_i = self.T_0 * self.T_mult ** (n)
self.decay_power = n
else:
self.T_i = self.T_0
self.T_cur = epoch_cur
self.last_epoch = floor(epoch)
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
|
nilq/baby-python
|
python
|
from construct import *
from construct.lib import *
switch_integers__opcode = Struct(
'code' / Int8ub,
'body' / Switch(this.code, {1: Int8ub, 2: Int16ul, 4: Int32ul, 8: Int64ul, }),
)
switch_integers = Struct(
'opcodes' / GreedyRange(LazyBound(lambda: switch_integers__opcode)),
)
_schema = switch_integers
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# bricks.py: utility collections.
#
# Copyright (C) 2009, 2010 Raymond Hettinger <python@rcn.com>
# Copyright (C) 2010 Lukáš Lalinský <lalinsky@gmail.com>
# Copyright (C) 2010 Yesudeep Mangalapilly <yesudeep@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Utility collections or "bricks".
:module: watchdog.utils.bricks
:author: Yesudeep Mangalapilly <yesudeep@gmail.com>
:author: Lukáš Lalinský <lalinsky@gmail.com>
:author: Raymond Hettinger <python@rcn.com>
Classes
=======
.. autoclass:: OrderedSetQueue
:members:
:show-inheritance:
:inherited-members:
.. autoclass:: OrderedSet
"""
import sys
import collections
try:
import queue
except ImportError:
import Queue as queue
class OrderedSetQueue(queue.Queue):
"""Thread-safe implementation of an ordered set queue.
Disallows adding a duplicate item while maintaining the
order of items in the queue. The implementation leverages
locking already implemented in the base class
redefining only the primitives. Since the internal queue
is not replaced, the order is maintained. The set is used
merely to check for the existence of an item.
Queued items must be immutable and hashable so that they can be used
as dictionary keys. You must implement **only read-only properties** and
the :meth:`Item.__hash__()`, :meth:`Item.__eq__()`, and
:meth:`Item.__ne__()` methods for items to be hashable.
An example implementation follows::
class Item(object):
def __init__(self, a, b):
self._a = a
self._b = b
@property
def a(self):
return self._a
@property
def b(self):
return self._b
def _key(self):
return (self._a, self._b)
def __eq__(self, item):
return self._key() == item._key()
def __ne__(self, item):
return self._key() != item._key()
def __hash__(self):
return hash(self._key())
:author: Lukáš Lalinský <lalinsky@gmail.com>
:url: http://stackoverflow.com/questions/1581895/how-check-if-a-task-is-already-in-python-queue
"""
def _init(self, maxsize):
queue.Queue._init(self, maxsize)
self._set_of_items = set()
def _put(self, item):
if item not in self._set_of_items:
queue.Queue._put(self, item)
self._set_of_items.add(item)
def _get(self):
item = queue.Queue._get(self)
self._set_of_items.remove(item)
return item
if not sys.version < (2, 6, 0):
KEY, PREV, NEXT = range(3)
class OrderedSet(collections.MutableSet):
"""
Implementation based on a doubly-linked link and an internal dictionary.
This design gives :class:`OrderedSet` the same big-Oh running times as
regular sets including O(1) adds, removes, and lookups as well as
O(n) iteration.
.. ADMONITION:: Implementation notes
Runs on Python 2.6 or later (and runs on Python 3.0 or later
without any modifications).
:author: Raymond Hettinger <python@rcn.com>
:url: http://code.activestate.com/recipes/576694/
"""
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[PREV]
curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, _next = self.map.pop(key)
prev[NEXT] = _next
_next[PREV] = prev
def __iter__(self):
end = self.end
curr = end[NEXT]
while curr is not end:
yield curr[KEY]
curr = curr[NEXT]
def __reversed__(self):
end = self.end
curr = end[PREV]
while curr is not end:
yield curr[KEY]
curr = curr[PREV]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = next(reversed(self)) if last else next(iter(self))
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
def __del__(self):
self.clear() # remove circular references
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
PyEVO reCAPTCHA API module
===============================================
.. module:: pyevo.api.recaptcha
:platform: Unix, Windows
:synopsis: PyEVO reCAPTCHA API module
.. moduleauthor:: (C) 2012 Oliver Gutiérrez
TODO: Check recaptcha API module for incomplete class method get_challenge
"""
# Python imports
import urllib2, urllib
RECAPTCHA_API_SERVER='https://www.google.com/recaptcha/api'
RECAPTCHA_VERIFY_SERVER='http://www.google.com/recaptcha/api/verify'
class RECAPTCHAHelper(object):
"""
reCAPTCHA API helper
"""
def __init__(self,public_key,private_key,api_server=RECAPTCHA_API_SERVER,verify_server=RECAPTCHA_VERIFY_SERVER,fail_silently=True):
"""
Class initialization
"""
self.public_key=public_key
self.private_key=private_key
self.api_server=RECAPTCHA_API_SERVER
self.verify_server=RECAPTCHA_VERIFY_SERVER
self.fail_silently=fail_silently
def verify(self,captcharesp,challenge):
"""
Recaptcha verification
"""
if not (captcharesp and challenge and len(captcharesp) and len(challenge)):
return False
# Generate request to recaptcha servers
verifreq = urllib2.Request (
url = self.verify_server,
data = urllib.urlencode ({
'privatekey': self.private_key,
'remoteip' : None,
'challenge': challenge.encode('utf-8'),
'response' : captcharesp.encode('utf-8'),
}),
headers = {
'Content-type': 'application/x-www-form-urlencoded',
'User-agent': 'Python'
}
)
# Do request
try:
resp=urllib2.urlopen(verifreq)
except:
# In case of connection error return fail_silently as value for the verification
return self.fail_silently
# Check captcha response
return_values=resp.read().splitlines();
resp.close();
return_code=return_values[0]
if (return_code=='true'):
return True
# Failed verification
return False
# def get_challenge(self):
# """
# TODO: Get reCAPTCHA image and challenge data
# """
# challenge=
# imgurl='http://www.google.com/recaptcha/api/image?c=%s' % challenge
# pass
|
nilq/baby-python
|
python
|
from ._container import AadModelContainer
from onnxconverter_common.topology import Topology
from onnxconverter_common.data_types import FloatTensorType
from ad_examples.aad.forest_aad_detector import AadForest
def _get_aad_operator_name(model):
# FIXME: not all possible AAD models are currently supported
if not isinstance(model, AadForest):
raise ValueError("No proper operator name found for '%s'" % type(model))
return "AadForest"
def _parse_aad(scope, model, inputs):
this_operator = scope.declare_local_operator(_get_aad_operator_name(model), model)
this_operator.inputs = inputs
# FIXME: probably another variable is required for anomality label
score_variable = scope.declare_local_variable('score', FloatTensorType())
this_operator.outputs.append(score_variable)
return this_operator.outputs
def parse_aad(model, initial_types=None, target_opset=None,
custom_conversion_functions=None, custom_shape_calculators=None):
raw_model_container = AadModelContainer(model)
topology = Topology(raw_model_container, default_batch_size='None',
initial_types=initial_types, target_opset=target_opset,
custom_conversion_functions=custom_conversion_functions,
custom_shape_calculators=custom_shape_calculators)
scope = topology.declare_scope('__root__')
inputs = []
for var_name, initial_type in initial_types:
inputs.append(scope.declare_local_variable(var_name, initial_type))
for variable in inputs:
raw_model_container.add_input(variable)
outputs = _parse_aad(scope, model, inputs)
for variable in outputs:
raw_model_container.add_output(variable)
return topology
|
nilq/baby-python
|
python
|
#import matplotlib.pyplot as plt
from flask import Flask, render_template, jsonify
import requests
import json
import numpy as np
import time
app = Flask(__name__)
@app.route('/')
def index():
r = requests.get("http://127.0.0.1:5000/chain").text
r = json.loads(r)
# Fetch the chain length
chain_length = len(r["chain"])
blocks_data = []
for i in range(1,chain_length):
block_dict = {}
transaction_length = len(r["chain"][i]["transactions"])
block_dict["Block Number"] = r["chain"][i]["index"]
block_dict["Previous Hash"] = r["chain"][i]["previous_hash"]
block_dict["Timestamp"] = r["chain"][i]["timestamp"]
block_dict["Total Transactions"] = transaction_length
blocks_data.append(block_dict)
return render_template("graph.html", data=blocks_data)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000, debug=True)
|
nilq/baby-python
|
python
|
import numpy as np
import lsst.afw.table as afwTable
import lsst.pex.config as pexConfig
import lsst.pipe.base as pipeBase
import lsst.geom as geom
import lsst.sphgeom as sphgeom
from lsst.meas.base.forcedPhotCcd import ForcedPhotCcdTask, ForcedPhotCcdConfig
from .forcedPhotDia import DiaSrcReferencesTask
__all__ = ("ForcedPhotCoaddDiaSrcConfig", "ForcedPhotCoaddDiaSrcTask")
class ForcedPhotCoaddDiaSrcConfig(ForcedPhotCcdConfig):
coaddName = pexConfig.Field(dtype=str, default='deep',
doc="Name of coadd")
def setDefaults(self):
ForcedPhotCcdTask.ConfigClass.setDefaults(self)
self.references.retarget(DiaSrcReferencesTask)
self.measurement.copyColumns = {"id": "id", "coord_ra": "coord_ra",
"coord_dec": "coord_dec",
"base_PsfFlux_instFlux":"diaSrc_base_PsfFlux_instFlux",
"base_PsfFlux_instFluxErr": "diaSrc_base_PsfFlux_instFluxErr",
}
self.measurement.plugins.names = ['base_SdssShape', 'base_DiaTransformedCentroid',
'base_PsfFlux', 'base_LocalBackground',
'base_PixelFlags', 'base_CircularApertureFlux']
self.measurement.slots.centroid = 'base_DiaTransformedCentroid'
self.measurement.slots.shape = 'base_SdssShape'
self.measurement.slots.apFlux = None
self.measurement.slots.modelFlux = None
self.measurement.slots.psfFlux = None
self.measurement.slots.calibFlux = None
# These radii were chosen because they are among the default measured in the pipeline. If the default
# changes then these will not be able to be copied.
radii = [3., 6., 9., 12.]
for radius in radii:
base = int(radius)
decimal = int((radius - int(radius))*10)
input_name = f"base_CircularApertureFlux_{base}_{decimal}_instFlux"
output_name = f"diaSrc_base_CircularApertureFlux_{base}_{decimal}_instFlux"
self.measurement.copyColumns[input_name] = output_name
input_name = f"base_CircularApertureFlux_{base}_{decimal}_instFluxErr"
output_name = f"diaSrc_base_CircularApertureFlux_{base}_{decimal}_instFluxErr"
self.measurement.copyColumns[input_name] = output_name
self.measurement.plugins["base_CircularApertureFlux"].radii = radii
# Use a large aperture to be independent of seeing in calibration
self.measurement.plugins["base_CircularApertureFlux"].maxSincRadius = 12.0
class ForcedPhotCoaddDiaSrcTask(ForcedPhotCcdTask):
"""!
A command-line driver for performing forced measurement on Coadd images from DIASrc catalogs.
"""
ConfigClass = ForcedPhotCoaddDiaSrcConfig
RunnerClass = pipeBase.ButlerInitializedTaskRunner
_DefaultName = "forcedPhotCoaddDiaSrc"
dataPrefix = "deepCoadd_"
def __init__(self, butler=None, refSchema=None, **kwds):
"""Initialize the task.
ForcedPhotImageTask takes two keyword arguments beyond the usual CmdLineTask arguments:
- refSchema: the Schema of the reference catalog, passed to the constructor of the references
subtask
- butler: a butler that will be passed to the references subtask to allow it to load its Schema
from disk
At least one of these arguments must be present; if both are, schema takes precedence.
"""
super(ForcedPhotCcdTask, self).__init__(butler, refSchema, **kwds)
self.primaryKey = self.measurement.schema.addField("detect_isPrimary", type="Flag", doc="set to True if inside inner patch and tract region")
def writeOutput(self, dataRef, sources):
"""!Write source table
@param dataRef Data reference from butler
@param sources SourceCatalog to save
"""
dataRef.put(sources, "deepDiff_forced_template_diaSrc",
flags=afwTable.SOURCE_IO_NO_FOOTPRINTS)
def _getConfigName(self):
"""!Return the name of the config dataset. Forces config comparison from run-to-run
"""
return "forcedPhotCoaddDiaSrc_config"
def _getMetadataName(self):
"""!Return the name of the metadata dataset. Forced metadata to be saved
"""
return None
@classmethod
def _makeArgumentParser(cls):
parser = pipeBase.ArgumentParser(name=cls._DefaultName)
parser.add_id_argument("--id", "deepDiff_differenceExp", help="data ID with raw CCD keys"
"e.g. --id visit=12345 ccd")
return parser
def runDataRef(self, dataRef):
"""!Measure a single exposure using forced detection for a reference catalog.
@param[in] dataRef An lsst.daf.persistence.ButlerDataRef
@param[in] psfCache Size of PSF cache, or None. The size of the PSF cache can have
a significant effect upon the runtime for complicated PSF models.
"""
exposure = dataRef.get('deepDiff_differenceExp')
catalog = dataRef.get('deepDiff_diaSrc')
expWcs = exposure.getWcs()
butler = dataRef.butlerSubset.butler
# I need to get the template images/catalogs for all overlapping tracts
skyMap = butler.get(datasetType=self.config.coaddName + "Coadd_skyMap")
skyCorners = [expWcs.pixelToSky(geom.Point2D(pixPos)) for pixPos in exposure.getBBox().getCorners()]
imagePoly = sphgeom.ConvexPolygon.convexHull([coord.getVector() for coord in skyCorners])
tractPatchList = skyMap.findTractPatchList(skyCorners)
allMeasCat = None
for tract, patchList in tractPatchList:
for patch in patchList:
self.log.info('Processing patch %s from tract %s' % (patch.getIndex(),tract))
patchPoly = patch.getOuterSkyPolygon(tract.getWcs())
if patchPoly.intersects(imagePoly) is False:
self.log.info('No intersection with the boundary patch.')
continue
validObject = np.array(
[patchPoly.contains(sphgeom.UnitVector3d(sphgeom.LonLat.fromRadians(s.getRa().asRadians(),
s.getDec().asRadians())))
for s in catalog])
refCat = catalog[validObject]
expCorners = [tract.getWcs().skyToPixel(pos) for pos in skyCorners]
expBBox = geom.Box2I()
for corner in expCorners:
expBBox.include(geom.Point2I(corner))
overlapBox = geom.Box2I(patch.getOuterBBox())
overlapBox.clip(expBBox)
patchArgDict = dict(
datasetType=self.dataPrefix+ "calexp_sub",
bbox=overlapBox,
tract=tract.getId(),
patch="%s,%s" % (patch.getIndex()[0], patch.getIndex()[1]),
filter=exposure.getFilter().getName()
)
coaddPatch = butler.get(**patchArgDict)
# I need to filter out objects whose parents are not in the bounding box
refCatIdDict = {ref.getId(): ref.getParent() for ref in refCat}
# Add 0 for objects without a parent
refCatIdDict[0] = 0
parentGood = np.array([refCatIdDict[ref.getId()] in refCatIdDict for ref in refCat])
if np.sum(parentGood==False) > 1:
self.log.info("Removing %d/%d objects without parents" % (np.sum(parentGood==False),
len(parentGood)))
refCat = refCat.copy(deep=True)[parentGood]
if len(refCat) == 0:
self.log.info('No references available.')
continue
measCat = self.measurement.generateMeasCat(coaddPatch, refCat, expWcs,
idFactory=self.makeIdFactory(dataRef))
self.log.info("Performing forced measurement on %s" % (patchArgDict))
self.attachFootprints(measCat, refCat, coaddPatch, expWcs, dataRef)
exposureId = self.getExposureId(dataRef)
self.measurement.run(measCat, coaddPatch, refCat, expWcs, exposureId=exposureId)
# Label primary objects
innerBox = geom.Box2D(patch.getInnerBBox())
insideBox = np.array([innerBox.contains(s.getCentroid()) for s in measCat])
primaryTract = np.array([skyMap.findTract(s.getCoord())==tract for s in measCat])
primary = (insideBox) & (primaryTract)
# I can't set the whole array, so I do it one item at a time
for s,p in zip(measCat, primary):
s.set(self.primaryKey,p)
if self.config.doApCorr:
self.applyApCorr.run(
catalog=measCat,
apCorrMap=coaddPatch.getInfo().getApCorrMap()
)
self.catalogCalculation.run(measCat)
if allMeasCat is None:
allMeasCat = measCat
else:
allMeasCat.extend(measCat)
if allMeasCat is not None:
self.writeOutput(dataRef, allMeasCat)
|
nilq/baby-python
|
python
|
# Generated by Django 2.1 on 2018-09-08 14:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0003_market'),
]
operations = [
migrations.AlterUniqueTogether(
name='market',
unique_together={('name', 'exchange')},
),
]
|
nilq/baby-python
|
python
|
import serial
import struct
import time
def init_Serial():
print("Opening Serial Port COM 10")
ser = serial.Serial(
port='COM10',
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS
)
return ser
def wait_for_Pi(ser_i):
print("Wait until Raspberry Pi is ready")
cnt = 0
while cnt < 3:
x = ser_i.read()
print(": %s" %x.encode("hex"))
if x == '\x03':
cnt = cnt + 1
print("Raspberry is ready")
def open_Kernel():
print("Opening File")
fid = open("..\05_uart0\kernel8.img", "rb")
# Get binaries
data = fid.read()
# Get file size
f_size = fid.tell()
print("Filesize: %d" % f_size)
fid.close();
return f_size, data
def send_Kernel_size(ser_i, size):
print("Send Kernel size to RPI")
data = struct.pack('<i',size)
for i in data:
ser_i.write(i)
print("Waiting for Acknowledgment")
recv = ser_i.read(2)
if recv == "OK":
print("Received Acknowledgment")
else:
print("Error after sending size")
print("restart")
return False
return True
def send_Kernel(ser_i, kernel_data):
print("sending Kernel...")
print(len(kernel_data))
for tmp, byte in enumerate(kernel_data):
ser_i.write(byte)
print(tmp+1)
# Check if sended byte == received byte
#recv = ser_i.read(1)
#print(": %s == %s" % (recv.encode("hex"), byte.encode("hex")))
#check the size
"""recv = ser_i.read(4)
print(": %s" % recv[0].encode("hex"))
print(": %s" % recv[1].encode("hex"))
print(": %s" % recv[2].encode("hex"))
print(": %s" % recv[3].encode("hex"))"""
print("finished sending")
# check if Raspberry Pi is sending a charakter after the while-loop
#test = ser_i.read(1)
#print(": %s" % test.encode("hex"))
#print("Successfull")
return True
def main():
print("Serial Transmit Kernel.img")
ser_i = init_Serial()
wait_for_Pi(ser_i)
size, kernel_data = open_Kernel()
guard = send_Kernel_size(ser_i, size)
if(guard == True):
send_Kernel(ser_i, kernel_data)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from .Updater import Converters
class _DataManager:
def unpack(self, data):
return self.unpackItems(data.items())
def unpackItems(self, items):
return {key: self.itemsToDict(value) for key, value in items}
def itemsToDict(self, data):
if hasattr(data, "__dict__"):
return self.unpackItems(data.__dict__.items())
else:
return data
def pack(self, dataObject, data):
for key, value in data.items():
if hasattr(dataObject, key):
if not isinstance(getattr(dataObject, key), dict) and isinstance(value, dict):
self.pack(getattr(dataObject, key), value)
else:
setattr(dataObject, key, value)
return dataObject
class _DatabaseLoader(_DataManager):
def load(self, database, data):
try:
self.database = database
self.data = data
for converter in Converters.getConverters(self.getOldVersion()):
self.data = converter(self.data)
return self.pack(self.database, self.data)
except:
return database
def getOldVersion(self):
try:
return self.data["version"]
except:
return ""
DatabaseLoader = _DatabaseLoader()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from __future__ import print_function
import roslib
roslib.load_manifest('mct_watchdog')
import rospy
import threading
import functools
import numpy
import mct_introspection
import yaml
import cv
import Image as PILImage
import ImageDraw as PILImageDraw
import ImageFont as PILImageFont
from cv_bridge.cv_bridge import CvBridge
from mct_utilities import file_tools
# Messages and Services
from std_srvs.srv import Empty
from std_srvs.srv import EmptyResponse
from mct_msg_and_srv.msg import FramesDropped
from sensor_msgs.msg import Image
class FrameDropWatchdog(object):
"""
Frame drop watchdog monitors the number of frames dropped by the system.
"""
def __init__(self,max_seq_age=150):
rospy.init_node('frame_drop_watchdog')
self.max_seq_age = max_seq_age
self.lock = threading.Lock()
self.frames_dropped = {}
self.latest_seq = None
self.ready = False
camera_assignment = file_tools.read_camera_assignment()
self.number_of_cameras = len(camera_assignment)
self.bridge = CvBridge()
self.info_image_size = (400,90)
self.font = PILImageFont.truetype("/usr/share/fonts/truetype/ubuntu-font-family/Ubuntu-B.ttf", 16)
# Subscribe to camera info topics
self.frames_dropped_sub = {}
frames_dropped_topics = self.wait_for_topics()
for topic in frames_dropped_topics:
camera = get_camera_from_topic(topic)
handler = functools.partial(self.frames_dropped_handler, camera)
self.frames_dropped_sub[camera] = rospy.Subscriber(topic, FramesDropped, handler)
# Setup total frames dropped service
self.total_dropped_pub = rospy.Publisher('total_frames_dropped', FramesDropped)
# Setup reset service
self.reset_srv = rospy.Service('frame_drop_watchdog_reset', Empty, self.reset_handler)
# Create watchdog info image
self.image_watchdog_pub = rospy.Publisher('image_frame_drop_watchdog', Image)
self.ready = True
def wait_for_topics(self):
"""
Wait for the frames_dropped topics to be published.
"""
while 1:
frames_dropped_topics = mct_introspection.find_topics_w_ending('frames_dropped')
if len(frames_dropped_topics) == self.number_of_cameras:
break
rospy.sleep(0.25)
return frames_dropped_topics
def reset_handler(self,req):
"""
Handler for the nodes reset service - empties the frames_dropped buffer.
"""
with self.lock:
self.frames_dropped = {}
self.latest_seq = None
return EmptyResponse()
def frames_dropped_handler(self, camera, data):
if not self.ready:
return
with self.lock:
try:
self.frames_dropped[data.seq][camera] = data.frames_dropped
except KeyError:
self.frames_dropped[data.seq] = {camera:data.frames_dropped}
self.update_latest_seq(data.seq)
def update_latest_seq(self,seq):
if self.latest_seq is None:
self.latest_seq = seq
else:
self.latest_seq = max([seq, self.latest_seq])
def publish_watchdog_image(self, seq, total_frames_dropped, cameras_w_drops):
"""
Publish image for GUI w/ seq #, total frames dropped, other info?
"""
pil_info_image = PILImage.new('RGB', self.info_image_size,(255,255,255))
draw = PILImageDraw.Draw(pil_info_image)
info_items = [
('seq', seq),
('dropped', total_frames_dropped),
('cameras', cameras_w_drops),
]
text_x, text_y, step_y = 10, 10, 20
for i, item in enumerate(info_items):
label, value = item
label_text = '{0}:'.format(label)
if type(value) == float:
value_text = '{0:<1.6f}'.format(value)
else:
value_text = '{0}'.format(value)
draw.text( (text_x,text_y+step_y*i), label_text, font=self.font, fill=(0,0,0))
draw.text( (text_x+100,text_y+step_y*i), value_text, font=self.font, fill=(0,0,0))
cv_info_image = cv.CreateImageHeader(pil_info_image.size, cv.IPL_DEPTH_8U, 3)
cv.SetData(cv_info_image, pil_info_image.tostring())
# Convert to a rosimage and publish
info_rosimage = self.bridge.cv_to_imgmsg(cv_info_image,'rgb8')
self.image_watchdog_pub.publish(info_rosimage)
def run(self):
"""
Node, main loop. While the node
"""
while not rospy.is_shutdown():
with self.lock:
for seq, data in sorted(self.frames_dropped.items()):
if len(data) == self.number_of_cameras:
total_frames_dropped = sum(data.values())
self.total_dropped_pub.publish(seq,total_frames_dropped)
cameras_w_drops = [c for c, n in data.iteritems() if n > 0]
cameras_w_drops = [int(c.split('_')[1]) for c in cameras_w_drops]
del self.frames_dropped[seq]
self.publish_watchdog_image(seq, total_frames_dropped, cameras_w_drops)
else:
if self.latest_seq - seq > self.max_seq_age:
del self.frames_dropped[seq]
# Utility functions
# ----------------------------------------------------------------------------
def get_camera_from_topic(topic):
camera = topic.split('/')[2]
return camera
# -----------------------------------------------------------------------------
if __name__ == '__main__':
node = FrameDropWatchdog()
node.run()
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.