id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
11374979 | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Date Selection field implementation."""
import zope.interface
import zope.schema
from z3c.schema.dateselect import interfaces
@zope.interface.implementer(interfaces.IDateSelect)
class DateSelect(zope.schema.Date):
yearRange = list(range(1900, 2100))
initialDate = None # set a date or today is used
def __init__(self, yearRange=None, initialDate=None, **kw):
super(DateSelect, self).__init__(**kw)
self.initialDate = initialDate
if yearRange is not None:
self.yearRange = yearRange
| StarcoderdataPython |
4928555 | <filename>stocker/error.py
import math
from sklearn.metrics import mean_squared_error
def get(true_values, predicted_values, error_method='mape'): # function to calculate the error
error = 0
if error_method == 'mape':
# calculate the mean absolute percentage error
error = (abs((true_values - predicted_values) / true_values).sum() / len(true_values)) * 100
error = round(error, 3)
if error_method == 'mse':
# calculate the mean squared error
error = round(math.sqrt(mean_squared_error(true_values, predicted_values)), 3)
return error
| StarcoderdataPython |
8101558 | <filename>wsgi.py<gh_stars>0
import os # pragma: no cover
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "weddingPlanner.settings") # pragma: no cover
from django.core.wsgi import get_wsgi_application # pragma: no cover
from whitenoise.django import DjangoWhiteNoise # pragma: no cover
application = get_wsgi_application() # pragma: no cover
application = DjangoWhiteNoise(application) # pragma: no cover
| StarcoderdataPython |
1833979 | <filename>params.py<gh_stars>0
#import gym
class train_params:
# Environment parameters
ENV = 'L2M' # Environment to use (must have low dimensional state space (i.e. not image) and continuous action space)
RENDER = False # Whether or not to display the environment on the screen during training
RANDOM_SEED = 99999999 # Random seed for reproducability
NUM_AGENTS = 64 # Number of distributed agents to run simultaneously
ENV_ACC = 0.005
# Create dummy environment to get all environment params
'''
dummy_env = gym.make(ENV)
STATE_DIMS = dummy_env.observation_space.shape
STATE_BOUND_LOW = dummy_env.observation_space.low
STATE_BOUND_HIGH = dummy_env.observation_space.high
ACTION_DIMS = dummy_env.action_space.shape
ACTION_BOUND_LOW = dummy_env.action_space.low
ACTION_BOUND_HIGH = dummy_env.action_space.high
del dummy_env
'''
STATE_DIMS = (527,)
STATE_BOUND_LOW = -10.0
STATE_BOUND_HIGH = 10.0
ACTION_DIMS = (22,)
ACTION_BOUND_LOW = -1.0
ACTION_BOUND_HIGH = 1.0
# Training parameters
BATCH_SIZE = 256
NUM_STEPS_TRAIN = 2000000 # Number of steps to train for
MAX_EP_LENGTH = 1000 # Maximum number of steps per episode
REPLAY_MEM_SIZE = 2000000 # Soft maximum capacity of replay memory
REPLAY_MEM_REMOVE_STEP = 200 # Check replay memory every REPLAY_MEM_REMOVE_STEP training steps and remove samples over REPLAY_MEM_SIZE capacity
PRIORITY_ALPHA = 0.6 # Controls the randomness vs prioritisation of the prioritised sampling (0.0 = Uniform sampling, 1.0 = Greedy prioritisation)
PRIORITY_BETA_START = 0.4 # Starting value of beta - controls to what degree IS weights influence the gradient updates to correct for the bias introduced by priority sampling (0 - no correction, 1 - full correction)
PRIORITY_BETA_END = 1.0 # Beta will be linearly annealed from its start value to this value throughout training
PRIORITY_EPSILON = 0.00001 # Small value to be added to updated priorities to ensure no sample has a probability of 0 of being chosen
NOISE_SCALE = 0.4 # Scaling to apply to Gaussian noise
NOISE_DECAY = 0.99995 # Decay noise throughout training by scaling by noise_decay**training_step
DISCOUNT_RATE = 0.99 # Discount rate (gamma) for future rewards
N_STEP_RETURNS = 5 # Number of future steps to collect experiences for N-step returns
UPDATE_AGENT_EP = 5 # Agent gets latest parameters from learner every update_agent_ep episodes
# Network parameters
CRITIC_LEARNING_RATE = 0.0001
ACTOR_LEARNING_RATE = 0.0001
CRITIC_L2_LAMBDA = 0.0 # Coefficient for L2 weight regularisation in critic - if 0, no regularisation is performed
DENSE1_SIZE = 1000 # Size of first hidden layer in networks
DENSE2_SIZE = 600 # Size of second hidden layer in networks
FINAL_LAYER_INIT = 0.003 # Initialise networks' final layer weights in range +/-final_layer_init
NUM_ATOMS = 101 # Number of atoms in output layer of distributional critic
V_MIN = 10.0 # Lower bound of critic value output distribution
V_MAX = 200.0 # Upper bound of critic value output distribution (V_min and V_max should be chosen based on the range of normalised reward values in the chosen env)
TAU = 0.001 # Parameter for soft target network updates
USE_BATCH_NORM = True # Whether or not to use batch normalisation in the networks
# Files/Directories
SAVE_CKPT_STEP = 5000 # Save checkpoint every save_ckpt_step training steps
CKPT_DIR = './ckpts/' + ENV # Directory for saving/loading checkpoints
CKPT_FILE = None # Checkpoint file to load and resume training from (if None, train from scratch)
LOG_DIR = './logs/train/' + ENV # Directory for saving Tensorboard logs (if None, do not save logs)
class test_params:
# Environment parameters
ENV = train_params.ENV # Environment to use (must have low dimensional state space (i.e. not image) and continuous action space)
RENDER = False # Whether or not to display the environment on the screen during testing
RANDOM_SEED = 999999 # Random seed for reproducability
# Testing parameters
NUM_EPS_TEST = 1 # Number of episodes to test for
MAX_EP_LENGTH = 1000 # Maximum number of steps per episode
# Files/directories
CKPT_DIR = './ckpts/' + ENV # Directory for saving/loading checkpoints
CKPT_FILE = None # Checkpoint file to load and test (if None, load latest ckpt)
RESULTS_DIR = './test_results' # Directory for saving txt file of results (if None, do not save results)
LOG_DIR = './logs/test/' + ENV # Directory for saving Tensorboard logs (if None, do not save logs)
class play_params:
# Environment parameters
ENV = train_params.ENV # Environment to use (must have low dimensional state space (i.e. not image) and continuous action space)
RANDOM_SEED = 999999 # Random seed for reproducability
# Play parameters
NUM_EPS_PLAY = 5 # Number of episodes to play for
MAX_EP_LENGTH = 1000 # Maximum number of steps per episode
# Files/directories
CKPT_DIR = './ckpts/' + ENV # Directory for saving/loading checkpoints
CKPT_FILE = None # Checkpoint file to load and run (if None, load latest ckpt)
RECORD_DIR = './video' # Directory to store recorded gif of gameplay (if None, do not record)
| StarcoderdataPython |
11269540 | __copyright__ = "Copyright (C) 2013 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import pytest
# {{{ gmsh
def search_on_path(filenames):
"""Find file on system path."""
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52224
from os.path import exists, abspath, join
from os import pathsep, environ
search_path = environ["PATH"]
paths = search_path.split(pathsep)
for path in paths:
for filename in filenames:
if exists(join(path, filename)):
return abspath(join(path, filename))
GMSH_SPHERE = """
x = 0; y = 1; z = 2; r = 3; lc = 0.3;
p1 = newp; Point(p1) = {x, y, z, lc} ;
p2 = newp; Point(p2) = {x+r, y, z, lc} ;
p3 = newp; Point(p3) = {x, y+r, z, lc} ;
p4 = newp; Point(p4) = {x, y, z+r, lc} ;
p5 = newp; Point(p5) = {x-r, y, z, lc} ;
p6 = newp; Point(p6) = {x, y-r, z, lc} ;
p7 = newp; Point(p7) = {x, y, z-r, lc} ;
c1 = newreg; Circle(c1) = {p2, p1, p7};
c2 = newreg; Circle(c2) = {p7, p1, p5};
c3 = newreg; Circle(c3) = {p5, p1, p4};
c4 = newreg; Circle(c4) = {p4, p1, p2};
c5 = newreg; Circle(c5) = {p2, p1, p3};
c6 = newreg; Circle(c6) = {p3, p1, p5};
c7 = newreg; Circle(c7) = {p5, p1, p6};
c8 = newreg; Circle(c8) = {p6, p1, p2};
c9 = newreg; Circle(c9) = {p7, p1, p3};
c10 = newreg; Circle(c10) = {p3, p1, p4};
c11 = newreg; Circle(c11) = {p4, p1, p6};
c12 = newreg; Circle(c12) = {p6, p1, p7};
l1 = newreg; Line Loop(l1) = {c5, c10, c4}; Ruled Surface(newreg) = {l1};
l2 = newreg; Line Loop(l2) = {c9, -c5, c1}; Ruled Surface(newreg) = {l2};
l3 = newreg; Line Loop(l3) = {c12, -c8, -c1}; Ruled Surface(newreg) = {l3};
l4 = newreg; Line Loop(l4) = {c8, -c4, c11}; Ruled Surface(newreg) = {l4};
l5 = newreg; Line Loop(l5) = {-c10, c6, c3}; Ruled Surface(newreg) = {l5};
l6 = newreg; Line Loop(l6) = {-c11, -c3, c7}; Ruled Surface(newreg) = {l6};
l7 = newreg; Line Loop(l7) = {-c2, -c7, -c12};Ruled Surface(newreg) = {l7};
l8 = newreg; Line Loop(l8) = {-c6, -c9, c2}; Ruled Surface(newreg) = {l8};
"""
GMSH_QUAD_SPHERE = """
SetFactory("OpenCASCADE");
Sphere(1) = { 0, 0, 0, 1 };
Recombine Surface "*";
Mesh 2;
"""
GMSH_QUAD_CUBE = """
SetFactory("OpenCASCADE");
Box(1) = {0, 0, 0, 1, 1, 1};
Transfinite Line "*" = 8;
Transfinite Surface "*";
Transfinite Volume "*";
Mesh.RecombineAll = 1;
Mesh.Recombine3DAll = 1;
Mesh.Recombine3DLevel = 2;
Mesh 3;
"""
@pytest.mark.parametrize("dim", [2, 3])
@pytest.mark.parametrize("order", [1, 3])
def test_simplex_gmsh(dim, order, visualize=False):
if search_on_path(["gmsh"]) is None:
pytest.skip("gmsh executable not found")
if visualize:
save_tmp_files_in = f"simplex_{order}_{dim}d"
else:
save_tmp_files_in = None
from gmsh_interop_b.reader import generate_gmsh, GmshMeshReceiverBase
from gmsh_interop_b.runner import ScriptSource
mr = GmshMeshReceiverBase()
source = ScriptSource(GMSH_SPHERE, "geo")
generate_gmsh(mr, source, dimensions=dim, order=order,
save_tmp_files_in=save_tmp_files_in)
@pytest.mark.parametrize("dim", [2, 3])
@pytest.mark.parametrize("order", [1, 3])
def test_quad_gmsh(dim, order, visualize=False):
if search_on_path(["gmsh"]) is None:
pytest.skip("gmsh executable not found")
if visualize:
save_tmp_files_in = f"simplex_{order}_{dim}d"
else:
save_tmp_files_in = None
from gmsh_interop_b.reader import generate_gmsh, GmshMeshReceiverBase
from gmsh_interop_b.runner import ScriptSource
if dim == 2:
source = ScriptSource(GMSH_QUAD_SPHERE, "geo")
else:
source = ScriptSource(GMSH_QUAD_CUBE, "geo")
mr = GmshMeshReceiverBase()
generate_gmsh(mr, source, dimensions=dim, order=order,
save_tmp_files_in=save_tmp_files_in)
# }}}
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
pytest.main([__file__])
# vim: foldmethod=marker
| StarcoderdataPython |
5000160 | <gh_stars>0
# Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.
# Example:
# Input: [-2,1,-3,4,-1,2,1,-5,4],
# Output: 6
# Explanation: [4,-1,2,1] has the largest sum = 6.
# Follow up:
# If you have figured out the O(n) solution, try coding another solution using the divide and conquer approach, which is more subtle.
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
maxSum = nums[0]
tempSum = nums[0]
for i in range(1, len(nums)):
if tempSum < nums[i] and tempSum <= 0:
tempSum = nums[i]
else:
tempSum = tempSum + nums[i]
if tempSum > maxSum:
maxSum = tempSum
return maxSum
# [-2, 1, -3, 4, -1, 2, 1, -5, 4]
# ^
# is tempSum > maxSum
# if it is, we'll replace maxSum with tempSum
# if it isn't, we'll replace temSum with current index value | StarcoderdataPython |
1853211 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
def main(argv):
filename = argv[1]
data = []
with open(filename, 'rb') as f:
header1 = f.readline()
header2 = f.readline()
line = str(f.readline(), 'utf-8')
while line:
i = int('0x' + line[:-1], 16)
data.append(i)
line = str(f.readline(), 'utf-8')
# sys.stdout.buffer.write(bytes(header1, encoding='utf-8'))
# sys.stdout.buffer.write(bytes(header2, encoding='utf-8'))
sys.stdout.buffer.write(header1)
sys.stdout.buffer.write(header2)
sys.stdout.buffer.write(bytes(data))
if __name__ == '__main__':
main(sys.argv)
| StarcoderdataPython |
3245999 | <filename>pytibrv/tport.py
##
# pytibrv/tport.py
# TIBRV Library for PYTHON
# tibrvTransport_XXX
#
# LAST MODIFIED : V1.1 20170220 ARIEN <EMAIL>
#
# DESCRIPTIONS
# -----------------------------------------------------------------------------
#
#
# FEATURES: * = un-implement
# -----------------------------------------------------------------------------
# tibrvTransport_Create
# tibrvTransport_CreateInbox
# tibrvTransport_Destroy
# tibrvTransport_GetDaemon
# tibrvTransport_GetNetwork
# tibrvTransport_GetService
# tibrvTransport_GetDescription
# tibrvTransport_RequestReliability
# tibrvTransport_Send
# tibrvTransport_SendRequest
# tibrvTransport_SendReply
# tibrvTransport_SetDescription
#
# *tibrvTransport_CreateAcceptVc
# *tibrvTransport_CreateConnectVc
# *tibrvTransport_WaitForVcConnection
# *tibrvTransport_Sendv
# *tibrvTransport_SetSendingWaitLimit
# *tibrvTransport_GetSendingWaitLimit
# *tibrvTransport_SetBatchMode
# *tibrvTransport_SetBatchSize
# *tibrvTransport_CreateLicensed
#
#
# CHANGED LOGS
# -----------------------------------------------------------------------------
# 20170220 V1.1 ARIEN <EMAIL>
# REMOVE TIBRV C Header
#
# 20161211 V1.0 ARIEN <EMAIL>
# CREATED
#
##
import ctypes as _ctypes
from .types import tibrv_status, tibrvTransport, tibrvMsg, \
TIBRV_SUBJECT_MAX
from .api import _rv, _cstr, _pystr, \
_c_tibrvTransport, _c_tibrvMsg, \
_c_tibrv_status, _c_tibrv_u32, _c_tibrv_f64
from .status import TIBRV_INVALID_MSG, TIBRV_INVALID_ARG, TIBRV_INVALID_TRANSPORT
##-----------------------------------------------------------------------------
# TIBRV API : tibrv/tport.h
##-----------------------------------------------------------------------------
_rv.tibrvTransport_Create.argtypes = [_ctypes.POINTER(_c_tibrvTransport),
_ctypes.c_char_p,
_ctypes.c_char_p,
_ctypes.c_char_p]
_rv.tibrvTransport_Create.restype = _c_tibrv_status
def tibrvTransport_Create(service: str, network: str, daemon: str) -> (tibrv_status, tibrvTransport):
tx = _c_tibrvTransport(0)
status = _rv.tibrvTransport_Create(_ctypes.byref(tx), _cstr(service), _cstr(network), _cstr(daemon))
return status, tx.value
##
_rv.tibrvTransport_Send.argtypes = [_c_tibrvTransport, _c_tibrvMsg]
_rv.tibrvTransport_Send.restype = _c_tibrv_status
def tibrvTransport_Send(transport: tibrvTransport, message: tibrvMsg) -> tibrv_status:
if transport is None or transport == 0:
return TIBRV_INVALID_TRANSPORT
if message is None or message == 0:
return TIBRV_INVALID_MSG
try:
tx = _c_tibrvTransport(transport)
except:
return TIBRV_INVALID_TRANSPORT
try:
msg = _c_tibrvMsg(message)
except:
return TIBRV_INVALID_MSG
status = _rv.tibrvTransport_Send(tx, msg)
return status
##
_rv.tibrvTransport_SendRequest.argtypes = [_c_tibrvTransport,
_c_tibrvMsg,
_ctypes.POINTER(_c_tibrvMsg),
_c_tibrv_f64]
_rv.tibrvTransport_SendRequest.restype = _c_tibrv_status
def tibrvTransport_SendRequest(transport: tibrvTransport, message: tibrvMsg,
idleTimeout: float) -> (tibrv_status, tibrvMsg):
if transport is None or transport == 0:
return TIBRV_INVALID_TRANSPORT, None
if message is None or message == 0:
return TIBRV_INVALID_MSG, None
if idleTimeout is None:
return TIBRV_INVALID_ARG, None
try:
tx = _c_tibrvTransport(transport)
except:
return TIBRV_INVALID_TRANSPORT, None
try:
msg = _c_tibrvMsg(message)
except:
return TIBRV_INVALID_MSG, None
r = _c_tibrvMsg(0)
try:
t = _c_tibrv_f64(idleTimeout)
except:
return TIBRV_INVALID_ARG, None
status = _rv.tibrvTransport_SendRequest(tx, msg, _ctypes.byref(r), t)
return status, r.value
##
_rv.tibrvTransport_SendReply.argtypes = [_c_tibrvTransport, _c_tibrvMsg, _c_tibrvMsg]
_rv.tibrvTransport_SendReply.restype = _c_tibrv_status
def tibrvTransport_SendReply(transport: tibrvTransport, message: tibrvMsg, requestMessage: tibrvMsg) \
-> tibrv_status:
if transport is None or transport == 0:
return TIBRV_INVALID_TRANSPORT
if message is None or message == 0:
return TIBRV_INVALID_MSG
if requestMessage is None or requestMessage == 0:
return TIBRV_INVALID_MSG
try:
tx = _c_tibrvTransport(transport)
except:
return TIBRV_INVALID_TRANSPORT
try:
msg = _c_tibrvMsg(message)
req = _c_tibrvMsg(requestMessage)
except:
return TIBRV_INVALID_ARG
status = _rv.tibrvTransport_SendReply(tx, msg, req)
return status
##
_rv.tibrvTransport_Destroy.argtypes = [_c_tibrvTransport]
_rv.tibrvTransport_Destroy.restype = _c_tibrv_status
def tibrvTransport_Destroy(transport: tibrvTransport) -> tibrv_status:
if transport is None or transport == 0:
return TIBRV_INVALID_TRANSPORT
try:
tx = _c_tibrvTransport(transport)
except:
return TIBRV_INVALID_TRANSPORT
status = _rv.tibrvTransport_Destroy(tx)
return status
##
_rv.tibrvTransport_CreateInbox.argtypes = [_c_tibrvTransport, _ctypes.c_char_p, _c_tibrv_u32]
_rv.tibrvTransport_CreateInbox.restype = _c_tibrv_status
def tibrvTransport_CreateInbox(transport: tibrvTransport) -> (tibrv_status, str):
if transport is None or transport == 0:
return TIBRV_INVALID_TRANSPORT, None
try:
tx = _c_tibrvTransport(transport)
except:
return TIBRV_INVALID_TRANSPORT, None
subj = _ctypes.create_string_buffer(TIBRV_SUBJECT_MAX)
status = _rv.tibrvTransport_CreateInbox(tx, subj, _ctypes.sizeof(subj))
return status, _pystr(subj)
##
_rv.tibrvTransport_GetService.argtypes = [_c_tibrvTransport, _ctypes.POINTER(_ctypes.c_char_p)]
_rv.tibrvTransport_GetService.restype = _c_tibrv_status
def tibrvTransport_GetService(transport: tibrvTransport) -> (tibrv_status, str):
if transport is None or transport == 0:
return TIBRV_INVALID_TRANSPORT, None
try:
tx = _c_tibrvTransport(transport)
except:
return TIBRV_INVALID_TRANSPORT, None
sz = _ctypes.c_char_p()
status = _rv.tibrvTransport_GetService(tx, _ctypes.byref(sz))
return status, _pystr(sz)
##
_rv.tibrvTransport_GetNetwork.argtypes = [_c_tibrvTransport, _ctypes.POINTER(_ctypes.c_char_p)]
_rv.tibrvTransport_GetNetwork.restype = _c_tibrv_status
def tibrvTransport_GetNetwork(transport: tibrvTransport) -> (tibrv_status, str):
if transport is None or transport == 0:
return TIBRV_INVALID_TRANSPORT, None
try:
tx = _c_tibrvTransport(transport)
except:
return TIBRV_INVALID_TRANSPORT, None
sz = _ctypes.c_char_p()
status = _rv.tibrvTransport_GetNetwork(tx, _ctypes.byref(sz))
return status, _pystr(sz)
##
_rv.tibrvTransport_GetDaemon.argtypes = [_c_tibrvTransport, _ctypes.POINTER(_ctypes.c_char_p)]
_rv.tibrvTransport_GetDaemon.restype = _c_tibrv_status
def tibrvTransport_GetDaemon(transport: tibrvTransport) -> (tibrv_status, str):
if transport is None or transport == 0:
return TIBRV_INVALID_TRANSPORT, None
try:
tx = _c_tibrvTransport(transport)
except:
return TIBRV_INVALID_TRANSPORT, None
try:
sz = _ctypes.c_char_p()
except:
return TIBRV_INVALID_ARG, None
status = _rv.tibrvTransport_GetDaemon(tx, _ctypes.byref(sz))
return status, _pystr(sz)
##
_rv.tibrvTransport_SetDescription.argtypes = [_c_tibrvTransport, _ctypes.c_char_p]
_rv.tibrvTransport_SetDescription.restype = _c_tibrv_status
def tibrvTransport_SetDescription(transport: tibrvTransport, description: str) -> tibrv_status:
if transport is None or transport == 0:
return TIBRV_INVALID_TRANSPORT
if description is None:
return TIBRV_INVALID_ARG
try:
tx = _c_tibrvTransport(transport)
except:
return TIBRV_INVALID_TRANSPORT
try:
sz = _cstr(description)
except:
return TIBRV_INVALID_ARG
status = _rv.tibrvTransport_SetDescription(tx, sz)
return status
##
_rv.tibrvTransport_GetDescription.argtypes = [_c_tibrvTransport, _ctypes.POINTER(_ctypes.c_char_p)]
_rv.tibrvTransport_GetDescription.restype = _c_tibrv_status
def tibrvTransport_GetDescription(transport: tibrvTransport) -> (tibrv_status, str):
if transport is None or transport == 0:
return TIBRV_INVALID_TRANSPORT, None
try:
tx = _c_tibrvTransport(transport)
except:
return TIBRV_INVALID_TRANSPORT, None
sz = _ctypes.c_char_p()
status = _rv.tibrvTransport_GetDescription(tx, _ctypes.byref(sz))
return status, _pystr(sz)
##
_rv.tibrvTransport_RequestReliability.argtypes = [_c_tibrvTransport, _c_tibrv_f64]
_rv.tibrvTransport_RequestReliability.restype = _c_tibrv_status
def tibrvTransport_RequestReliability(transport: tibrvTransport, reliability: float) -> tibrv_status:
if transport is None or transport == 0:
return TIBRV_INVALID_TRANSPORT
if reliability is None:
return TIBRV_INVALID_ARG
try:
tx = _c_tibrvTransport(transport)
except:
return TIBRV_INVALID_TRANSPORT
try:
n = _c_tibrv_f64(reliability)
except:
return TIBRV_INVALID_ARG
status = _rv.tibrvTransport_RequestReliability(tx, n)
return status
| StarcoderdataPython |
3356835 | <reponame>gmedard/aws-decoupled-serverless-scheduler<gh_stars>10-100
#Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import boto3
import os
import sys
import json
from collections import OrderedDict
sf_client = boto3.client('stepfunctions')
ec2_client = boto3.client('ec2')
ec2_r = boto3.resource('ec2')
def handler(event, context):
print(event)
instance_id = event['detail']['instance-id']
print(instance_id)
# When given an instance ID as str e.g. 'i-1234567', return the value for 'scheduler_queue'
ec2instance = ec2_r.Instance(instance_id)
queue_name = 'nothing'
autoscaling_group = 'nothing'
for tags in ec2instance.tags:
print(tags)
if os.getenv('TAGKEY') in tags["Key"]:
queue_name = tags["Value"]
if 'aws:autoscaling:groupName' in tags["Key"]:
autoscaling_group = tags["Value"]
if 'nothing' in queue_name:
print('did not start sf')
return "Not tagged for scheduler"
print(queue_name)
#Get config from json file in S3
timeout_Job = os.getenv('TIMEOUTJOB')
region = os.getenv('REGION')
state_machine_name = os.getenv('STATEMACHINENAME')
state_machine_arn = os.getenv('STATEMACHINEARN')
sqs_name = queue_name
sqs_name_out = queue_name + '-finished'
sqs_name_failed = queue_name + '-failed'
table = os.getenv('TABLENAME')
#json input parameter payload for step functions workflow
input = {"input" : {"sqs_name": sqs_name,
"sqs_name_out": sqs_name_out,
"sqs_name_failed": sqs_name_failed,
"region": region,
"state_machine_arn": state_machine_arn,
"state_machine_name": state_machine_name,
"Timeout_Job": timeout_Job,
"instance_id": instance_id,
"autoscaling_group": autoscaling_group,
"table": table
}}
#start step functions wrapped workflow for instance
response = sf_client.start_execution(
stateMachineArn=state_machine_arn,
input = json.dumps(input))
print(response)
| StarcoderdataPython |
5034062 | <filename>librarian/views.py
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
# Create your views here.
from django.shortcuts import render
from .models import librarian
#from datetime import datetime as dt
#from datetime import datetime as dte
def library(request):
return render(request,'librarian/librarian.html')
def lbran(request):
if request.method == "POST":
student_ID=request.POST['student_ID']
book_ID = request.POST['book_ID']
dt = request.POST['date']
context = {'error': 0}
if not (book_ID and student_ID and dt ):
context['error'] = 1
else:
librarian.objects.create(student_ID= student_ID, book_ID=book_ID,date=dt)
if not context['error']:
return render(request,'librarian/ok.html')
else:
return render(request, 'librarian/librarian.html',context)
else:
return HttpResponseRedirect('/librarian/input/')
def view(request):
b = librarian.objects.all()
return render(request,"librarian/all.html", {'form':b})
def new(request):
return render(request,'librarian/new.html') | StarcoderdataPython |
8091521 | import torch
import numpy as np
def COCO2HUMAN(coco_keypoints):
'''
"keypoints": {
0: "nose",
1: "left_eye",
2: "right_eye",
3: "left_ear",
4: "right_ear",
5: "left_shoulder",
6: "right_shoulder",
7: "left_elbow",
8: "right_elbow",
9: "left_wrist",
10: "right_wrist",
11: "left_hip",
12: "right_hip",
13: "left_knee",
14: "right_knee",
15: "left_ankle",
16: "right_ankle"
}
'''
pelvis_keypoints = np.expand_dims((coco_keypoints[:, 11, :] + coco_keypoints[:, 12, :]) / 2., 1)
head_keypoints = np.expand_dims((coco_keypoints[:, 1, :] + coco_keypoints[:, 2, :]) / 2., 1)
thorax_keypoints = np.expand_dims((coco_keypoints[:, 5, :] + coco_keypoints[:, 6, :]) / 2., 1)
torso_keypoints = ((pelvis_keypoints + thorax_keypoints) / 2.)
pelvis_torso = coco_keypoints[:,[12, 14, 16, 11, 13, 15], :]
head_thorax = coco_keypoints[:, [5, 7, 9, 6, 8, 10], :]
keypoints = np.concatenate([pelvis_keypoints, pelvis_torso, torso_keypoints, head_keypoints,head_thorax, thorax_keypoints], 1)
return keypoints
def MPII2HUMAN(mpii_keypoints):
'''
0 - r_ankle
1 - r_knee
2 - r_hip
3 - l_hip
4 - l_knee
5 - l_ankle
6 - pelvis
7 - thorax
8 - neck
9 - headtop
10 - r_wrist
11 - r_elbow
12 - r_shoulder
13 - l_shoulder
14 - l_elbow
15 - l_wrist
'''
torso_keypoints = np.expand_dims((mpii_keypoints[:, 6, :] + mpii_keypoints[:, 7, :]) / 2., 1)
pelvis_torso = mpii_keypoints[:,[6, 2, 1, 0, 3, 4, 5], :]
torso_thorax = mpii_keypoints[:, [9, 13, 14, 15, 12, 11, 10, 7], :]
keypoints = np.concatenate([pelvis_torso, torso_keypoints, torso_thorax],1)
return keypoints
def transform_joint_to_other_db(src_joint, src_name, dst_name):
src_joint_num = len(src_name)
dst_joint_num = len(dst_name)
new_joint = np.zeros(((dst_joint_num,) + src_joint.shape[1:]))
for src_idx in range(len(src_name)):
name = src_name[src_idx]
if name in dst_name:
dst_idx = dst_name.index(name)
new_joint[dst_idx] = src_joint[src_idx]
return new_joint
| StarcoderdataPython |
6549639 | <filename>docs/errcode.py
"""
错误代码管理器:
管理现有的错误代码与描述
生成错误代码描述文档
"""
# 将文件路径向上转移
import os as _os
import json as _json
import argparse as _argparse
from collections import namedtuple as _namedtuple
from collections import OrderedDict as _ODict
from typing import List as _List
from typing import Dict as _Dict
workdir: str = _os.path.dirname(_os.path.realpath(__file__))
__import__("sys").path.append(_os.path.dirname(workdir))
# pylint: disable=wrong-import-position
from leaf.core import error as _error
# 设定错误信息存储类
ErrorInfo = _namedtuple("ErrorInfo", ("description", "module"))
# 获取 error.Error 的所有子类信息
__informations: _Dict[int, str] = dict()
def reload():
"""利用反射导出所有的错误信息"""
__subclasses: _List[_error.Error] = _error.Error.__subclasses__()
for _error_class in __subclasses:
info = ErrorInfo(_error_class.description, _error_class.__module__)
__informations[_error_class.code] = info
# markdown 表头
__HEADER = \
"""
以下是 _Leaf_ 的现有的所有错误代码及其描述:
| 错误代码 | 错误描述 | 所属模块 |
| :----: | | :----: | :----: |
"""
# 命令行参数接收
__parser = _argparse.ArgumentParser(description="错误代码导出")
__parser.add_argument("--type", "-t", default="markdown",
help="type 表示导出文件类型, 支持 json, markdown. 默认为: markdown")
__parser.add_argument("--export", "-e", help="export 参数表示导出的文件名", required=True)
def __makeline(key: int, info: ErrorInfo) -> str:
"""制作新的一行"""
return "| " + str(key) + " | " + \
info.description + " | " + info.module + " |"
def __markdown(informations: _Dict[int, ErrorInfo]) -> str:
"""制作 markdown 格式的表格"""
content_list = list()
informations = _ODict(sorted(informations.items()))
for key, value in informations.items():
content_list.append(__makeline(key, value))
return '\n'.join(content_list)
def __json(informations: _Dict[int, ErrorInfo]) -> str:
"""制作 json 格式的数据"""
errcodes: _Dict[int, _Dict[str, str]] = dict()
for code, info in informations.items():
errcodes[code] = dict(info._asdict())
return _json.dumps(errcodes, indent=4, sort_keys=True, ensure_ascii=False)
def export(filename: str, _type: str) -> _Dict[int, ErrorInfo]:
"""导出文件"""
content = ''
if _type == "markdown":
content = __HEADER + __markdown(__informations)
if _type == "json":
content = __json(__informations)
with open(filename, 'w', encoding="utf-8") as handler:
handler.write(content)
# 完成导出, 切换出当前目录
print("错误代码已导出到文件 '" + filename + "'")
return __informations
reload()
if __name__ == "__main__":
args = __parser.parse_args()
export(args.export, args.type)
| StarcoderdataPython |
11239401 | from django.core.exceptions import ObjectDoesNotExist
from django.test import TestCase
from .models import BaseManagerModel, RenameManagerModel, ReplaceManagerModel, MultipleManagerModel
class TestModelIdent(TestCase):
def setUp(self):
self.base_model = BaseManagerModel.create()
self.rename_model = RenameManagerModel.create()
self.replace_model = ReplaceManagerModel.create()
self.multiple_model = MultipleManagerModel.create()
def test_created(self):
self.assertEqual(BaseManagerModel.objects.get(pk=1), self.base_model)
self.assertEqual(RenameManagerModel.instances.get(pk=1), self.rename_model)
self.assertEqual(MultipleManagerModel.objects.get(pk=1), self.multiple_model)
with self.assertRaises(ObjectDoesNotExist):
ReplaceManagerModel.objects.get(pk=1)
with self.assertRaises(ObjectDoesNotExist):
MultipleManagerModel.instances.get(pk=1)
def test_ident(self):
self.assertEquals(BaseManagerModel.ident_(1).pk, 1)
def test_non_existant_pk(self):
with self.assertRaises(ObjectDoesNotExist):
BaseManagerModel.ident_(2)
| StarcoderdataPython |
1605246 | <reponame>pystatic/pystatic
from .prep_alias import A
A_ext = A
| StarcoderdataPython |
9632357 | """
Canadian Astronomy Data Centre (CADC).
"""
from astropy import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astroquery.cadc`.
"""
CADC_REGISTRY_URL = _config.ConfigItem(
'http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/reg/resource-caps',
'CADC registry information')
CADCTAP_SERVICE_URI = _config.ConfigItem('ivo://cadc.nrc.ca/argus',
'CADC TAP service identifier')
CADCDATLINK_SERVICE_URI = _config.ConfigItem(
'ivo://cadc.nrc.ca/caom2ops', 'CADC DataLink service identifier')
CADCLOGIN_SERVICE_URI = _config.ConfigItem(
'ivo://cadc.nrc.ca/gms', 'CADC login service identified')
TIMEOUT = _config.ConfigItem(
30, 'Time limit for connecting to template_module server.')
conf = Conf()
from .core import Cadc, CadcClass # noqa
__all__ = ['Cadc', 'CadcClass', 'conf']
| StarcoderdataPython |
4945964 | #CREATED BY <NAME> (Github : AnkDos)
def check_pallindrome(number_to_check):
return str(number_to_check)[::-1] == str(number_to_check) #AS This Solution will raise TypeError with int
print(check_pallindrome(1991)) #returns true if pallindrome else false
| StarcoderdataPython |
3520580 | <filename>venv/lib/python3.8/site-packages/ansible_collections/community/dns/tests/unit/plugins/modules/test_hetzner_dns_record_sets.py
# -*- coding: utf-8 -*-
# (c) 2021 <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.community.internal_test_tools.tests.unit.utils.fetch_url_module_framework import (
BaseTestModule,
FetchUrlCall,
)
from ansible_collections.community.dns.plugins.modules import hetzner_dns_record_sets
# These imports are needed so patching below works
import ansible_collections.community.dns.plugins.module_utils.http # noqa
from .hetzner import (
HETZNER_JSON_ZONE_GET_RESULT,
HETZNER_JSON_ZONE_LIST_RESULT,
HETZNER_JSON_ZONE_RECORDS_GET_RESULT,
)
class TestHetznerDNSRecordJSON(BaseTestModule):
MOCK_ANSIBLE_MODULEUTILS_BASIC_ANSIBLEMODULE = 'ansible_collections.community.dns.plugins.modules.hetzner_dns_record_sets.AnsibleModule'
MOCK_ANSIBLE_MODULEUTILS_URLS_FETCH_URL = 'ansible_collections.community.dns.plugins.module_utils.http.fetch_url'
def test_unknown_zone(self, mocker):
result = self.run_module_failed(mocker, hetzner_dns_record_sets, {
'hetzner_token': 'foo',
'zone_name': 'example.org',
'record_sets': [],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/zones', without_query=True)
.expect_query_values('name', 'example.org')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_LIST_RESULT),
])
assert result['msg'] == 'Zone not found'
def test_unknown_zone_id(self, mocker):
result = self.run_module_failed(mocker, hetzner_dns_record_sets, {
'hetzner_token': 'foo',
'zone_id': 23,
'record_sets': [],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 404)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/zones/23')
.return_header('Content-Type', 'application/json')
.result_json(dict(message="")),
])
assert result['msg'] == 'Zone not found'
def test_auth_error(self, mocker):
result = self.run_module_failed(mocker, hetzner_dns_record_sets, {
'hetzner_token': 'foo',
'zone_name': 'example.org',
'record_sets': [],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 401)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/zones', without_query=True)
.expect_query_values('name', 'example.org')
.result_str(''),
])
assert result['msg'] == 'Cannot authenticate: Unauthorized: the authentication parameters are incorrect (HTTP status 401)'
def test_auth_error_forbidden(self, mocker):
result = self.run_module_failed(mocker, hetzner_dns_record_sets, {
'hetzner_token': 'foo',
'zone_id': 23,
'record_sets': [],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 403)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/zones/23')
.result_json(dict(message="")),
])
assert result['msg'] == 'Cannot authenticate: Forbidden: you do not have access to this resource (HTTP status 403)'
def test_other_error(self, mocker):
result = self.run_module_failed(mocker, hetzner_dns_record_sets, {
'hetzner_token': 'foo',
'zone_name': 'example.org',
'record_sets': [],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 500)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/zones', without_query=True)
.expect_query_values('name', 'example.org')
.result_str(''),
])
assert result['msg'].startswith('Error: GET https://dns.hetzner.com/api/v1/zones?')
assert 'did not yield JSON data, but HTTP status code 500 with Content-Type' in result['msg']
def test_key_collision_error(self, mocker):
result = self.run_module_failed(mocker, hetzner_dns_record_sets, {
'hetzner_token': 'foo',
'zone_id': '42',
'record_sets': [
{
'record': 'test.example.com',
'type': 'A',
'ignore': True,
},
{
'prefix': 'test',
'type': 'A',
'value': ['192.168.127.12'],
},
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_GET_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records', without_query=True)
.expect_query_values('zone_id', '42')
.expect_query_values('page', '1')
.expect_query_values('per_page', '100')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_RECORDS_GET_RESULT),
])
assert result['msg'] == 'Found multiple sets for record test.example.com and type A: index #0 and #1'
def test_conversion_error(self, mocker):
result = self.run_module_failed(mocker, hetzner_dns_record_sets, {
'hetzner_token': 'foo',
'zone_name': 'example.com',
'record_sets': [
{
'record': 'example.com',
'type': 'TXT',
'ttl': 3600,
'value': [
'"hellö',
],
},
],
'txt_transformation': 'quoted',
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/zones', without_query=True)
.expect_query_values('name', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records', without_query=True)
.expect_query_values('zone_id', '42')
.expect_query_values('page', '1')
.expect_query_values('per_page', '100')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_RECORDS_GET_RESULT),
])
assert result['msg'] == (
'Error while converting DNS values: While processing record from the user: Missing double quotation mark at the end of value'
)
def test_idempotency_empty(self, mocker):
result = self.run_module_success(mocker, hetzner_dns_record_sets, {
'hetzner_token': 'foo',
'zone_id': '42',
'record_sets': [],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_GET_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records', without_query=True)
.expect_query_values('zone_id', '42')
.expect_query_values('page', '1')
.expect_query_values('per_page', '100')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_RECORDS_GET_RESULT),
])
assert result['changed'] is False
assert result['zone_id'] == '42'
def test_idempotency_present(self, mocker):
result = self.run_module_success(mocker, hetzner_dns_record_sets, {
'hetzner_token': 'foo',
'zone_name': 'example.com',
'record_sets': [
{
'record': 'example.com',
'type': 'MX',
'ttl': 3600,
'value': [
'10 example.com',
],
},
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/zones', without_query=True)
.expect_query_values('name', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records', without_query=True)
.expect_query_values('zone_id', '42')
.expect_query_values('page', '1')
.expect_query_values('per_page', '100')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_RECORDS_GET_RESULT),
])
assert result['changed'] is False
assert result['zone_id'] == '42'
def test_removal_prune(self, mocker):
result = self.run_module_success(mocker, hetzner_dns_record_sets, {
'hetzner_token': 'foo',
'zone_name': 'example.com',
'prune': 'true',
'record_sets': [
{
'prefix': '*',
'ttl': 3600,
'type': 'A',
'value': ['172.16.31.10'],
},
{
'prefix': '',
'ttl': 3600,
'type': 'A',
'value': ['192.168.127.12'],
},
{
'prefix': '@',
'ttl': 3600,
'type': 'AAAA',
'value': [],
},
{
'record': 'example.com',
'type': 'MX',
'ignore': True,
},
{
'record': 'example.com',
'type': 'NS',
'ignore': True,
},
{
'record': 'example.com',
'type': 'SOA',
'ignore': True,
},
{
'record': 'foo.example.com',
'type': 'TXT',
'ttl': None,
'value': [u'bär "with quotes" (use \\ to escape)'],
},
],
'_ansible_diff': True,
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/zones', without_query=True)
.expect_query_values('name', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records', without_query=True)
.expect_query_values('zone_id', '42')
.expect_query_values('page', '1')
.expect_query_values('per_page', '100')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_RECORDS_GET_RESULT),
FetchUrlCall('DELETE', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records/{0}'.format(127))
.result_str(''),
FetchUrlCall('DELETE', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records/{0}'.format(128))
.result_str(''),
])
assert result['changed'] is True
assert result['zone_id'] == '42'
assert result['diff']['before'] == {
'record_sets': [
{
'record': '*.example.com',
'prefix': '*',
'ttl': 3600,
'type': 'A',
'value': ['1.2.3.5'],
},
{
'record': '*.example.com',
'prefix': '*',
'ttl': 3600,
'type': 'AAAA',
'value': ['2001:1:2::4'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': 3600,
'type': 'A',
'value': ['1.2.3.4'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': 3600,
'type': 'AAAA',
'value': ['2001:1:2::3'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': 3600,
'type': 'MX',
'value': ['10 example.com'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': None,
'type': 'NS',
'value': ['helium.ns.hetzner.de.', 'hydrogen.ns.hetzner.com.', 'oxygen.ns.hetzner.com.'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': None,
'type': 'SOA',
'value': ['hydrogen.ns.hetzner.com. dns.hetzner.com. 2021070900 86400 10800 3600000 3600'],
},
{
'record': 'foo.example.com',
'prefix': 'foo',
'ttl': None,
'type': 'TXT',
'value': [u'bär "with quotes" (use \\ to escape)'],
},
],
}
assert result['diff']['after'] == {
'record_sets': [
{
'record': '*.example.com',
'prefix': '*',
'ttl': 3600,
'type': 'A',
'value': ['1.2.3.5'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': 3600,
'type': 'A',
'value': ['1.2.3.4'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': 3600,
'type': 'MX',
'value': ['10 example.com'],
},
{
'record': 'example.com',
'prefix': '',
'type': 'NS',
'ttl': None,
'value': ['helium.ns.hetzner.de.', 'hydrogen.ns.hetzner.com.', 'oxygen.ns.hetzner.com.'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': None,
'type': 'SOA',
'value': ['hydrogen.ns.hetzner.com. dns.hetzner.com. 2021070900 86400 10800 3600000 3600'],
},
{
'record': 'foo.example.com',
'prefix': 'foo',
'ttl': None,
'type': 'TXT',
'value': [u'bär "with quotes" (use \\ to escape)'],
},
],
}
def test_change_add_one_check_mode(self, mocker):
result = self.run_module_success(mocker, hetzner_dns_record_sets, {
'hetzner_token': 'foo',
'zone_id': '42',
'record_sets': [
{
'record': 'example.com',
'type': 'CAA',
'ttl': 3600,
'value': [
'0 issue "letsencrypt.org"',
],
},
],
'_ansible_check_mode': True,
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_GET_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records', without_query=True)
.expect_query_values('zone_id', '42')
.expect_query_values('page', '1')
.expect_query_values('per_page', '100')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_RECORDS_GET_RESULT),
])
assert result['changed'] is True
assert result['zone_id'] == '42'
def test_change_add_one_check_mode_prefix(self, mocker):
result = self.run_module_success(mocker, hetzner_dns_record_sets, {
'hetzner_token': 'foo',
'zone_id': '42',
'record_sets': [
{
'prefix': '',
'type': 'CAA',
'ttl': 3600,
'value': [
'0 issue "letsencrypt.org"',
],
},
],
'_ansible_check_mode': True,
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/zones/42')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_GET_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records', without_query=True)
.expect_query_values('zone_id', '42')
.expect_query_values('page', '1')
.expect_query_values('per_page', '100')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_RECORDS_GET_RESULT),
])
assert result['changed'] is True
assert result['zone_id'] == '42'
def test_change_add_one(self, mocker):
result = self.run_module_success(mocker, hetzner_dns_record_sets, {
'hetzner_token': 'foo',
'zone_name': 'example.com',
'record_sets': [
{
'record': 'example.com',
'type': 'CAA',
'ttl': 3600,
'value': [
'128 issue "letsencrypt.org xxx"',
],
},
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/zones', without_query=True)
.expect_query_values('name', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records', without_query=True)
.expect_query_values('zone_id', '42')
.expect_query_values('page', '1')
.expect_query_values('per_page', '100')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_RECORDS_GET_RESULT),
FetchUrlCall('POST', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records')
.expect_json_value_absent(['id'])
.expect_json_value(['type'], 'CAA')
.expect_json_value(['ttl'], 3600)
.expect_json_value(['zone_id'], '42')
.expect_json_value(['name'], '@')
.expect_json_value(['value'], '128 issue "letsencrypt.org xxx"')
.return_header('Content-Type', 'application/json')
.result_json({
'record': {
'id': '133',
'type': 'CAA',
'name': '@',
'value': '128 issue "letsencrypt.org xxx"',
'ttl': 3600,
'zone_id': '42',
},
}),
])
assert result['changed'] is True
assert result['zone_id'] == '42'
def test_change_add_one_prefix(self, mocker):
result = self.run_module_success(mocker, hetzner_dns_record_sets, {
'hetzner_token': 'foo',
'zone_name': 'example.com',
'record_sets': [
{
'prefix': '',
'type': 'CAA',
'ttl': 3600,
'value': [
'128 issue "letsencrypt.org"',
],
},
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/zones', without_query=True)
.expect_query_values('name', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records', without_query=True)
.expect_query_values('zone_id', '42')
.expect_query_values('page', '1')
.expect_query_values('per_page', '100')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_RECORDS_GET_RESULT),
FetchUrlCall('POST', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records')
.expect_json_value_absent(['id'])
.expect_json_value(['type'], 'CAA')
.expect_json_value(['ttl'], 3600)
.expect_json_value(['zone_id'], '42')
.expect_json_value(['name'], '@')
.expect_json_value(['value'], '128 issue "letsencrypt.org"')
.return_header('Content-Type', 'application/json')
.result_json({
'record': {
'id': '133',
'type': 'CAA',
'name': '@',
'value': '128 issue "letsencrypt.org"',
'ttl': 3600,
'zone_id': '42',
},
}),
])
assert result['changed'] is True
assert result['zone_id'] == '42'
def test_change_add_one_idn_prefix(self, mocker):
result = self.run_module_success(mocker, hetzner_dns_record_sets, {
'hetzner_token': 'foo',
'zone_name': 'example.com',
'record_sets': [
{
'prefix': '☺',
'type': 'CAA',
'ttl': 3600,
'value': [
'128 issue "letsencrypt.org"',
],
},
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/zones', without_query=True)
.expect_query_values('name', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records', without_query=True)
.expect_query_values('zone_id', '42')
.expect_query_values('page', '1')
.expect_query_values('per_page', '100')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_RECORDS_GET_RESULT),
FetchUrlCall('POST', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records')
.expect_json_value_absent(['id'])
.expect_json_value(['type'], 'CAA')
.expect_json_value(['ttl'], 3600)
.expect_json_value(['zone_id'], '42')
.expect_json_value(['name'], 'xn--74h')
.expect_json_value(['value'], '128 issue "letsencrypt.org"')
.return_header('Content-Type', 'application/json')
.result_json({
'record': {
'id': '133',
'type': 'CAA',
'name': 'xn--74h',
'value': '128 issue "letsencrypt.org"',
'ttl': 3600,
'zone_id': '42',
},
}),
])
assert result['changed'] is True
assert result['zone_id'] == '42'
def test_change_add_one_failed(self, mocker):
result = self.run_module_failed(mocker, hetzner_dns_record_sets, {
'hetzner_token': 'foo',
'zone_name': 'example.com',
'record_sets': [
{
'record': 'example.com',
'type': 'CAA',
'ttl': 3600,
'value': [
'128 issue "letsencrypt.org xxx"',
],
},
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/zones', without_query=True)
.expect_query_values('name', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records', without_query=True)
.expect_query_values('zone_id', '42')
.expect_query_values('page', '1')
.expect_query_values('per_page', '100')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_RECORDS_GET_RESULT),
FetchUrlCall('POST', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records')
.expect_json_value_absent(['id'])
.expect_json_value(['type'], 'CAA')
.expect_json_value(['ttl'], 3600)
.expect_json_value(['zone_id'], '42')
.expect_json_value(['name'], '@')
.expect_json_value(['value'], '128 issue "letsencrypt.org xxx"')
.return_header('Content-Type', 'application/json')
.result_json({'record': {}, 'error': {'code': 500, 'message': 'Internal Server Error'}}),
])
assert result['msg'] == (
'Error: POST https://dns.hetzner.com/api/v1/records resulted in API error 500 (Internal Server Error)'
' with error message "Internal Server Error" (error code 500)'
)
def test_change_add_two_failed(self, mocker):
result = self.run_module_failed(mocker, hetzner_dns_record_sets, {
'hetzner_token': 'foo',
'zone_name': 'example.com',
'record_sets': [
{
'record': 'example.com',
'type': 'CAA',
'ttl': 3600,
'value': [
'128 issue "letsencrypt.org xxx"',
'128 issuewild "letsencrypt.org"',
],
},
],
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/zones', without_query=True)
.expect_query_values('name', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records', without_query=True)
.expect_query_values('zone_id', '42')
.expect_query_values('page', '1')
.expect_query_values('per_page', '100')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_RECORDS_GET_RESULT),
FetchUrlCall('POST', 422)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records/bulk')
.expect_json_value_absent(['records', 0, 'id'])
.expect_json_value(['records', 0, 'type'], 'CAA')
.expect_json_value(['records', 0, 'ttl'], 3600)
.expect_json_value(['records', 0, 'zone_id'], '42')
.expect_json_value(['records', 0, 'name'], '@')
.expect_json_value(['records', 0, 'value'], '128 issue "letsencrypt.org xxx"')
.expect_json_value_absent(['records', 1, 'id'])
.expect_json_value(['records', 1, 'type'], 'CAA')
.expect_json_value(['records', 1, 'ttl'], 3600)
.expect_json_value(['records', 1, 'zone_id'], '42')
.expect_json_value(['records', 1, 'name'], '@')
.expect_json_value(['records', 1, 'value'], '128 issuewild "letsencrypt.org"')
.expect_json_value_absent(['records', 2])
.return_header('Content-Type', 'application/json')
.result_json({
'invalid_records': [
{
'type': 'CAA',
'name': '@',
'value': '128 issue "letsencrypt.org xxx"',
'ttl': 3600,
'zone_id': '42',
},
{
'type': 'CAA',
'name': '@',
'value': '128 issuewild "letsencrypt.org"',
'ttl': 3600,
'zone_id': '42',
},
],
'valid_records': [],
'records': [],
'error': {
'message': 'invalid CAA record, invalid CAA record, ',
'code': 422,
},
}),
])
assert result['msg'] == (
'Errors: Creating CAA record "128 issue "letsencrypt.org xxx"" with TTL 3600 for zone 42 failed with unknown reason;'
' Creating CAA record "128 issuewild "letsencrypt.org"" with TTL 3600 for zone 42 failed with unknown reason'
)
def test_change_modify_list(self, mocker):
result = self.run_module_success(mocker, hetzner_dns_record_sets, {
'hetzner_token': 'foo',
'zone_name': 'example.com',
'record_sets': [
{
'record': 'example.com',
'type': 'NS',
'ttl': None,
'value': [
'helium.ns.hetzner.de.',
'ytterbium.ns.hetzner.com.',
],
},
],
'_ansible_diff': True,
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/zones', without_query=True)
.expect_query_values('name', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records', without_query=True)
.expect_query_values('zone_id', '42')
.expect_query_values('page', '1')
.expect_query_values('per_page', '100')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_RECORDS_GET_RESULT),
FetchUrlCall('DELETE', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records/131')
.result_str(''),
FetchUrlCall('PUT', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records/132')
.expect_json_value_absent(['id'])
.expect_json_value(['type'], 'NS')
.expect_json_value_absent(['ttl'])
.expect_json_value(['zone_id'], '42')
.expect_json_value(['name'], '@')
.expect_json_value(['value'], 'ytterbium.ns.hetzner.com.')
.return_header('Content-Type', 'application/json')
.result_json({
'record': {
'id': '131',
'type': 'NS',
'name': '@',
'value': 'ytterbium.ns.hetzner.com.',
'zone_id': '42',
},
}),
])
assert result['changed'] is True
assert result['zone_id'] == '42'
assert 'diff' in result
assert 'before' in result['diff']
assert 'after' in result['diff']
assert result['diff']['before'] == {
'record_sets': [
{
'record': '*.example.com',
'prefix': '*',
'ttl': 3600,
'type': 'A',
'value': ['1.2.3.5'],
},
{
'record': '*.example.com',
'prefix': '*',
'ttl': 3600,
'type': 'AAAA',
'value': ['2001:1:2::4'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': 3600,
'type': 'A',
'value': ['1.2.3.4'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': 3600,
'type': 'AAAA',
'value': ['2001:1:2::3'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': 3600,
'type': 'MX',
'value': ['10 example.com'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': None,
'type': 'NS',
'value': ['helium.ns.hetzner.de.', 'hydrogen.ns.hetzner.com.', 'oxygen.ns.hetzner.com.'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': None,
'type': 'SOA',
'value': ['hydrogen.ns.hetzner.com. dns.hetzner.com. 2021070900 86400 10800 3600000 3600'],
},
{
'record': 'foo.example.com',
'prefix': 'foo',
'ttl': None,
'type': 'TXT',
'value': [u'bär "with quotes" (use \\ to escape)'],
},
],
}
assert result['diff']['after'] == {
'record_sets': [
{
'record': '*.example.com',
'prefix': '*',
'ttl': 3600,
'type': 'A',
'value': ['1.2.3.5'],
},
{
'record': '*.example.com',
'prefix': '*',
'ttl': 3600,
'type': 'AAAA',
'value': ['2001:1:2::4'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': 3600,
'type': 'A',
'value': ['1.2.3.4'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': 3600,
'type': 'AAAA',
'value': ['2001:1:2::3'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': 3600,
'type': 'MX',
'value': ['10 example.com'],
},
{
'record': 'example.com',
'prefix': '',
'type': 'NS',
'ttl': None,
'value': ['helium.ns.hetzner.de.', 'ytterbium.ns.hetzner.com.'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': None,
'type': 'SOA',
'value': ['hydrogen.ns.hetzner.com. dns.hetzner.com. 2021070900 86400 10800 3600000 3600'],
},
{
'record': 'foo.example.com',
'prefix': 'foo',
'ttl': None,
'type': 'TXT',
'value': [u'bär "with quotes" (use \\ to escape)'],
},
],
}
def test_change_modify_list_ttl(self, mocker):
result = self.run_module_success(mocker, hetzner_dns_record_sets, {
'hetzner_token': 'foo',
'zone_name': 'example.com',
'record_sets': [
{
'record': 'example.com',
'type': 'NS',
'ttl': 3600,
'value': [
'helium.ns.hetzner.de.',
'ytterbium.ns.hetzner.com.',
],
},
],
'_ansible_diff': True,
'_ansible_remote_tmp': '/tmp/tmp',
'_ansible_keep_remote_files': True,
}, [
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/zones', without_query=True)
.expect_query_values('name', 'example.com')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_LIST_RESULT),
FetchUrlCall('GET', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records', without_query=True)
.expect_query_values('zone_id', '42')
.expect_query_values('page', '1')
.expect_query_values('per_page', '100')
.return_header('Content-Type', 'application/json')
.result_json(HETZNER_JSON_ZONE_RECORDS_GET_RESULT),
FetchUrlCall('DELETE', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records/130')
.result_str(''),
FetchUrlCall('PUT', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records/132')
.expect_json_value_absent(['id'])
.expect_json_value(['type'], 'NS')
.expect_json_value(['ttl'], 3600)
.expect_json_value(['zone_id'], '42')
.expect_json_value(['name'], '@')
.expect_json_value(['value'], 'helium.ns.hetzner.de.')
.return_header('Content-Type', 'application/json')
.result_json({
'record': {
'id': '130',
'type': 'NS',
'name': '@',
'value': 'ytterbium.ns.hetzner.com.',
'ttl': 3600,
'zone_id': '42',
},
}),
FetchUrlCall('PUT', 200)
.expect_header('accept', 'application/json')
.expect_header('auth-api-token', 'foo')
.expect_url('https://dns.hetzner.com/api/v1/records/131')
.expect_json_value_absent(['id'])
.expect_json_value(['type'], 'NS')
.expect_json_value(['ttl'], 3600)
.expect_json_value(['zone_id'], '42')
.expect_json_value(['name'], '@')
.expect_json_value(['value'], 'ytterbium.ns.hetzner.com.')
.return_header('Content-Type', 'application/json')
.result_json({
'record': {
'id': '131',
'type': 'NS',
'name': '@',
'value': 'ytterbium.ns.hetzner.com.',
'ttl': 3600,
'zone_id': '42',
},
}),
])
assert result['changed'] is True
assert result['zone_id'] == '42'
assert 'diff' in result
assert 'before' in result['diff']
assert 'after' in result['diff']
assert result['diff']['before'] == {
'record_sets': [
{
'record': '*.example.com',
'prefix': '*',
'ttl': 3600,
'type': 'A',
'value': ['1.2.3.5'],
},
{
'record': '*.example.com',
'prefix': '*',
'ttl': 3600,
'type': 'AAAA',
'value': ['2001:1:2::4'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': 3600,
'type': 'A',
'value': ['1.2.3.4'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': 3600,
'type': 'AAAA',
'value': ['2001:1:2::3'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': 3600,
'type': 'MX',
'value': ['10 example.com'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': None,
'type': 'NS',
'value': ['helium.ns.hetzner.de.', 'hydrogen.ns.hetzner.com.', 'oxygen.ns.hetzner.com.'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': None,
'type': 'SOA',
'value': ['hydrogen.ns.hetzner.com. dns.hetzner.com. 2021070900 86400 10800 3600000 3600'],
},
{
'record': 'foo.example.com',
'prefix': 'foo',
'ttl': None,
'type': 'TXT',
'value': [u'bär "with quotes" (use \\ to escape)'],
},
],
}
assert result['diff']['after'] == {
'record_sets': [
{
'record': '*.example.com',
'prefix': '*',
'ttl': 3600,
'type': 'A',
'value': ['1.2.3.5'],
},
{
'record': '*.example.com',
'prefix': '*',
'ttl': 3600,
'type': 'AAAA',
'value': ['2001:1:2::4'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': 3600,
'type': 'A',
'value': ['1.2.3.4'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': 3600,
'type': 'AAAA',
'value': ['2001:1:2::3'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': 3600,
'type': 'MX',
'value': ['10 example.com'],
},
{
'record': 'example.com',
'prefix': '',
'type': 'NS',
'ttl': 3600,
'value': ['helium.ns.hetzner.de.', 'ytterbium.ns.hetzner.com.'],
},
{
'record': 'example.com',
'prefix': '',
'ttl': None,
'type': 'SOA',
'value': ['hydrogen.ns.hetzner.com. dns.hetzner.com. 2021070900 86400 10800 3600000 3600'],
},
{
'record': 'foo.example.com',
'prefix': 'foo',
'ttl': None,
'type': 'TXT',
'value': [u'bär "with quotes" (use \\ to escape)'],
},
],
}
| StarcoderdataPython |
3426952 | <filename>2021/util.py
import re
from operator import add
from collections import deque, defaultdict, Counter
import copy
import sys
sys.setrecursionlimit(int(1e7))
# convention that positive y is down
# increment to clockwise/turn right, decrement to counterclockwise/turn left
DIRS = {
0: (0, -1),
1: (1, 0),
2: (0, 1),
3: (-1, 0),
}
DIRS_M = {
'U': 0,
'R': 1,
'D': 2,
'L': 3,
'N': 0,
'E': 1,
'S': 2,
'W': 3,
}
INF = float('inf')
class UniqueQueue():
def __init__(self, contents=None):
self.deque = deque()
self.set = set()
if contents is not None:
for x in contents:
self.push(x)
def __len__(self):
return len(self.deque)
def push(self, x):
if x not in self.set:
self.deque.appendleft(x)
self.set.add(x)
def pop(self):
x = self.deque.pop()
self.set.remove(x)
return x
def read_input(fname, t=lambda x: x, strip_lines=True, force_multi=False):
with open(fname, 'r') as f:
contents = f.read()
if strip_lines:
lines = contents.strip().split('\n')
else:
lines = contents.split('\n')
if len(lines) == 1 and not force_multi:
return t(lines[0])
return list(map(t, lines))
def maybe_int(s):
try:
return int(s)
except ValueError:
return s
def keep_by_index(indices, arr):
result = []
for i in sorted(indices):
if i < len(arr):
result.append(arr[i])
return result
def remove_by_index(indices, arr):
result = []
to_remove = set(indices)
for i in range(len(arr)):
if i not in to_remove:
result.append(arr[i])
return result
def min_by(f, arr):
return min([(f(x), x) for x in arr])[1]
def max_by(f, arr):
return max([(f(x), x) for x in arr])[1]
def parse_coord(line):
return tuple(map(int, line.split(',')))
def metric_taxi(a, b):
return sum(abs(a[i] - b[i]) for i in range(len(a)))
def move_by(d, p):
if isinstance(d, int):
d = DIRS[d]
return tuple(map(add, d, p))
def parse_list(s):
s = s.strip()
return [int(x.strip('()[]<>')) for x in s.split(',')]
def fatal(*args, **kwargs):
print(*args, **kwargs)
exit()
def automata(grid, rule, iterations):
R = len(grid)
C = len(grid[0])
def get_neighbors(i, j):
# for ii, jj in ((i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)):
for ii, jj in ((i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1), (i - 1, j - 1), (i - 1, j + 1), (i + 1, j - 1), (i + 1, j + 1)):
if 0 <= ii < R and 0 <= jj < C:
yield ii, jj
for _ in range(iterations):
new_grid = [[None] * C for _ in range(R)]
for i in range(R):
for j in range(C):
neighbors = map(lambda x: grid[x[0]][x[1]], get_neighbors(i, j))
new_grid[i][j] = rule(grid[i][j], Counter(neighbors))
grid = new_grid
return grid
def print_grid(grid, t=lambda x: x):
for row in grid:
print(''.join(map(t, row)))
def rule_gol(me, neighbors):
if me == '*':
return '*' if 2 <= neighbors['*'] <= 3 else '.'
else:
return '*' if neighbors['*'] == 3 else '.'
def prod(L):
result = 1
for x in L:
result *= x
return result
def reverse_dict(d):
result = defaultdict(list)
for k, v in d.items():
for x in v:
result[x].append(k)
return result
builtin_map = map
def map(*args, **kwargs):
return list(builtin_map(*args, **kwargs))
def do_ps(lst):
prefix = [0]
for x in lst:
prefix.append(prefix[-1] + x)
return prefix
def transpose(A):
N = len(A)
M = len(A[0])
res = []
for j in range(M):
row = [A[i][j] for i in range(N)]
res.append(row)
return res
def crt(n, a):
from functools import reduce
sum = 0
prod = reduce(lambda a, b: a * b, n)
for n_i, a_i in zip(n, a):
p = prod // n_i
sum += a_i * pow(p, -1, n_i) * p
return sum % prod
def dump_dict_grid(d, t=lambda x: x):
min_x = min(x for x, y in d.keys())
max_x = max(x for x, y in d.keys())
min_y = min(y for x, y in d.keys())
max_y = max(y for x, y in d.keys())
for y in range(min_y, max_y + 1):
for x in range(min_x, max_x + 1):
print(t(d[(x, y)]), end='')
print()
def ordch(ch: str) -> int:
assert len(ch) == 1
x = ord(ch)
if x >= ord('a') and x <= ord('z'): return x - ord('a')
if x >= ord('A') and x <= ord('Z'): return x - ord('A')
raise Exception(f"{ch} is not alphabetic")
def add_interval(ss, L, R):
# [L, R)
assert L <= R
if L == R:
return None
idx = ss.bisect_left((L, R))
while idx < len(ss):
ival = ss[idx]
if ival[0] > R:
break
R = max(R, ival[1])
ss.pop(idx)
if idx > 0:
idx -= 1
ival = ss[idx]
if ival[1] >= L:
L = min(L, ival[0])
R = max(R, ival[1])
ss.pop(idx)
res = (L, R)
ss.add(res)
return res
def remove_interval(ss, L, R):
# [L, R)
assert L <= R
if L == R:
return
added = add_interval(ss, L, R)
r2 = added[1]
ss.remove(added)
if added[0] != L:
ss.add((added[0], L))
if R != r2:
ss.add((R, r2))
def pad_grid(grid, ch=' '):
C = max(len(row) for row in grid)
for i in range(len(grid)):
if len(grid[i]) < C:
grid[i] += ch * (C - len(grid[i]))
return grid
| StarcoderdataPython |
8174653 | from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="change-album",
version="1.0",
python_requires=">=3.8",
description="Script to change the album's name of mp3 files",
long_description=long_description,
url="https://github.com/Raiytak/",
author="<NAME>",
author_email="<EMAIL>",
install_requires="eyed3",
packages=["change_album"],
license="MIT",
)
| StarcoderdataPython |
4875647 | import sip
for api in ["QDate", "QDateTime", "QString", "QTextStream", "QTime", "QUrl", "QVariant"]:
sip.setapi(api, 2)
from Geon.gui import GEditorMainWindow
from Geon.utils import GInit
from Geon.editor import GEditorController
app = GInit()
ctrl = GEditorController()
m = GEditorMainWindow(ctrl)
m.show()
app.exit(app.exec_())
| StarcoderdataPython |
6518766 | <filename>script_1_beech.py
from fanpy import Formind
home_dir = '/p/project/hai_deep_c/project_data/forest-carbon-flux/'
model_path = home_dir
par_file_name='beech'
project_path= home_dir + 'formind_sim/sim_100ha_42_0/'
num_sim = 1
print(model_path)
print(project_path)
print(par_file_name)
model = Formind(model_path, project_path, par_file_name)
sim_id = '1ha'
model.run(sim_id=sim_id, num_sim=num_sim) | StarcoderdataPython |
6613137 | '''
Author: <NAME>
Date: 2021-11-18 09:58:40
LastEditors: <NAME>
LastEditTime: 2021-11-26 12:13:14
Description: file content
FilePath: /CVMI_Sementic_Segmentation/utils/ddp/__init__.py
'''
from utils.ddp.dist_utils import get_dist_info, setup_distributed, convert_sync_bn, mkdirs
from utils.ddp.mmdistributed_ddp import MMDistributedDataParallel | StarcoderdataPython |
3362392 | import logging
from typing import Callable, Union, Any, TYPE_CHECKING, Type
from numbers import Number
import numpy as np
from scipy.interpolate import interp1d # type: ignore
from scipy.integrate import cumulative_trapezoid, trapz # type: ignore
from math_signals.defaults.base_structures import BaseXY, TypeFuncError, NotEqualError
if TYPE_CHECKING:
from math_signals.math_relation import Relation
def math_operation(
x: np.ndarray,
y1: np.ndarray,
y2: Union[np.ndarray, Number],
name_operation: str,
) -> BaseXY:
if name_operation == '__pow__':
y = np.abs(y1).__getattribute__(name_operation)(y2)*np.sign(y1)
else:
y = y1.__getattribute__(name_operation)(y2)
return BaseXY(x, y)
def extract_input(x: Any, y: Any) -> BaseXY:
if y is None:
if isinstance(x, (tuple, list,)):
x, y = x[0], x[1]
if isinstance(x, dict):
x = list(x.values())
x, y = x[0], x[1]
x, y = np.array(x), np.array(y)
if x.size != y.size:
raise NotEqualError(x.size, y.size)
return BaseXY(np.array(x), np.array(y))
def one_integrate(y: np.ndarray, x: np.ndarray = None) -> float:
'''Интегрирование.
Определение интеграла функции на отрезке.
'''
return trapz(y, x)
def integrate(x: np.ndarray, y: np.ndarray) -> BaseXY:
'''Интегрирование.
Примение функции scipy.integrate.cumtrapz.
'''
return BaseXY(x[1:], cumulative_trapezoid(y)*(x[1]-x[0]))
def differentiate(x: np.ndarray, y: np.ndarray) -> BaseXY:
'''Интегрирование.
Примение функции np.diff'''
return BaseXY(x[:-1]+(x[1]-x[0])/2, np.diff(y)/(x[1]-x[0]))
def interpolate_extrapolate(x: np.ndarray, y: np.ndarray, bounds_error=False,
fill_value=0.) -> Callable[[np.ndarray], np.ndarray]:
'''Интерполяция.
Применение функции scyipy.interpolate.interp1d.
Возращается функция интерполяции.
'''
return interp1d(x, y, bounds_error=bounds_error, fill_value=fill_value)
def get_common_x(x1: np.ndarray, x2: np.ndarray) -> np.ndarray:
dx1 = x1[1]-x1[0]
dx2 = x2[1]-x2[0]
dx = dx1 if dx1 <= dx2 else dx2
x_start = x1[0] if x1[0] <= x2[0] else x2[0]
x_end = x1[-1] if x1[-1] >= x2[-1] else x2[-1]
X = x_end-x_start
return np.linspace(x_start, (np.ceil(X/dx)+1)*dx, int(np.ceil(X/dx))+1)
def correlate(cls: Type['Relation'], r1: 'Relation', r2: 'Relation') \
-> BaseXY:
'''Корреляция.
Применяется функция numpy.correlate
'''
r1 = r1.shift(-r1._x[0])
r2 = r2.shift(-r2._x[0])
r1, r2 = cls.equalize(r1, r2)
x, y1 = r1.get_data()
_, y2 = r2.get_data()
return BaseXY(np.append(np.sort(-1*x)[:-1], x), np.correlate(y1, y2, 'full'))
def convolve(cls: Type['Relation'], r1: 'Relation', r2: 'Relation') \
-> BaseXY:
'''Свертка.
Применяется функция numpy.convlove
'''
r1 = r1.shift(-r1._x[0])
r2 = r2.shift(-r2._x[0])
r1, r2 = cls.equalize(r1, r2)
x, y1 = r1.get_data()
_, y2 = r2.get_data()
return BaseXY(np.append(np.sort(-1*x)[:-1], x), np.convolve(y1, y2, 'full'))
#==============================================================================
def signal2spectrum(t: np.ndarray, a: np.ndarray,
is_start_zero=False) -> BaseXY:
'''Прямое преобразование фурье.'''
if not is_start_zero:
if t[0] > 0.:
t = np.linspace(0., t[-1], int(t[-1]/(t[1]-t[0]))+1)
a = np.append(np.zeros(t.size-a.size), a)
elif t[-1] < 0.:
t = np.linspace(t[0], 0., int(abs(t[0])/(t[1]-t[0]))+1)
a = np.append(a, np.zeros(t.size-a.size))
a = np.append(a[t>=0.], a[t<0.])
s_a = np.fft.rfft(a)
f = np.fft.rfftfreq(a.size, d=(t[-1]-t[0])/(a.size))
return BaseXY(f, s_a)
def spectrum2sigmal(f: np.ndarray, s_a: np.ndarray,
time: float = None) -> BaseXY:
'''Обратное преобразование фурье.'''
a = np.fft.irfft(s_a) # type: np.ndarray
if time is None:
t = np.linspace(0, (a.size-1)/(2*(f[-1]-f[0])), a.size)
else:
t = np.linspace(time, time+(a.size-1)/(2*(f[-1]-f[0])), a.size)
a = np.append(a[t>=0.], a[t<0.])
return BaseXY(t, a)
| StarcoderdataPython |
6690830 | <reponame>mathemaphysics/APGL
'''
Created on 6 Jul 2009
@author: charanpal
'''
from apgl.io.PajekWriter import PajekWriter
from apgl.graph.DenseGraph import DenseGraph
from apgl.graph.DictGraph import DictGraph
from apgl.graph.SparseGraph import SparseGraph
from apgl.graph.VertexList import VertexList
from apgl.generator.ErdosRenyiGenerator import ErdosRenyiGenerator
from apgl.generator.SmallWorldGenerator import SmallWorldGenerator
from apgl.util.PathDefaults import PathDefaults
import unittest
import os
class PajekWriterTest(unittest.TestCase):
def setUp(self):
#Let's set up a very simple graph
numVertices = 5
numFeatures = 1
edges = []
vList = VertexList(numVertices, numFeatures)
#An undirected dense graph
self.dGraph1 = DenseGraph(vList, True)
self.dGraph1.addEdge(0, 1, 1)
self.dGraph1.addEdge(0, 2, 1)
self.dGraph1.addEdge(2, 4, 1)
self.dGraph1.addEdge(2, 3, 1)
self.dGraph1.addEdge(3, 4, 1)
#A directed sparse graph
self.dGraph2 = DenseGraph(vList, False)
self.dGraph2.addEdge(0, 1, 1)
self.dGraph2.addEdge(0, 2, 1)
self.dGraph2.addEdge(2, 4, 1)
self.dGraph2.addEdge(2, 3, 1)
self.dGraph2.addEdge(3, 4, 1)
#Now try sparse graphs
vList = VertexList(numVertices, numFeatures)
self.sGraph1 = SparseGraph(vList, True)
self.sGraph1.addEdge(0, 1, 1)
self.sGraph1.addEdge(0, 2, 1)
self.sGraph1.addEdge(2, 4, 1)
self.sGraph1.addEdge(2, 3, 1)
self.sGraph1.addEdge(3, 4, 1)
self.sGraph2 = SparseGraph(vList, False)
self.sGraph2.addEdge(0, 1, 1)
self.sGraph2.addEdge(0, 2, 1)
self.sGraph2.addEdge(2, 4, 1)
self.sGraph2.addEdge(2, 3, 1)
self.sGraph2.addEdge(3, 4, 1)
#Finally, try DictGraphs
self.dctGraph1 = DictGraph(True)
self.dctGraph1.addEdge(0, 1, 1)
self.dctGraph1.addEdge(0, 2, 2)
self.dctGraph1.addEdge(2, 4, 8)
self.dctGraph1.addEdge(2, 3, 1)
self.dctGraph1.addEdge(12, 4, 1)
self.dctGraph2 = DictGraph(False)
self.dctGraph2.addEdge(0, 1, 1)
self.dctGraph2.addEdge(0, 2, 1)
self.dctGraph2.addEdge(2, 4, 1)
self.dctGraph2.addEdge(2, 3, 1)
self.dctGraph2.addEdge(12, 4, 1)
def tearDown(self):
pass
def testInit(self):
pass
def testWriteToFile(self):
pw = PajekWriter()
directory = PathDefaults.getOutputDir() + "test/"
#Have to check the files
fileName1 = directory + "denseTestUndirected"
pw.writeToFile(fileName1, self.dGraph1)
fileName2 = directory + "denseTestDirected"
pw.writeToFile(fileName2, self.dGraph2)
fileName3 = directory + "sparseTestUndirected"
pw.writeToFile(fileName3, self.sGraph1)
fileName4 = directory + "sparseTestDirected"
pw.writeToFile(fileName4, self.sGraph2)
fileName5 = directory + "dictTestUndirected"
pw.writeToFile(fileName5, self.dctGraph1)
fileName6 = directory + "dictTestDirected"
pw.writeToFile(fileName6, self.dctGraph2)
def testWriteToFile2(self):
pw = PajekWriter()
directory = PathDefaults.getOutputDir() + "test/"
def setVertexColour(vertexIndex, graph):
colours = ["grey05", "grey10", "grey15", "grey20", "grey25"]
return colours[vertexIndex]
def setVertexSize(vertexIndex, graph):
return vertexIndex
def setEdgeColour(vertexIndex1, vertexIndex2, graph):
colours = ["grey05", "grey10", "grey15", "grey20", "grey25"]
return colours[vertexIndex1]
def setEdgeSize(vertexIndex1, vertexIndex2, graph):
return vertexIndex1+vertexIndex2
pw.setVertexColourFunction(setVertexColour)
fileName1 = directory + "vertexColourTest"
pw.writeToFile(fileName1, self.dGraph1)
pw.setVertexColourFunction(None)
pw.setVertexSizeFunction(setVertexSize)
fileName1 = directory + "vertexSizeTest"
pw.writeToFile(fileName1, self.dGraph1)
pw.setVertexSizeFunction(None)
pw.setEdgeColourFunction(setEdgeColour)
fileName1 = directory + "edgeColourTest"
pw.writeToFile(fileName1, self.dGraph1)
pw.setEdgeColourFunction(None)
pw.setEdgeSizeFunction(setEdgeSize)
fileName1 = directory + "edgeSizeTest"
pw.writeToFile(fileName1, self.dGraph1)
pw.setEdgeColourFunction(None)
def testWriteToFile3(self):
"""
We will test out writing out some random graphs to Pajek
"""
numVertices = 20
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = SparseGraph(vList)
p = 0.1
generator = ErdosRenyiGenerator(p)
graph = generator.generate(graph)
pw = PajekWriter()
directory = PathDefaults.getOutputDir() + "test/"
pw.writeToFile(directory + "erdosRenyi20", graph)
#Now write a small world graph
p = 0.2
k = 3
graph.removeAllEdges()
generator = SmallWorldGenerator(p, k)
graph = generator.generate(graph)
pw.writeToFile(directory + "smallWorld20", graph)
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
3504427 | import param
import panel as pn
import pathlib
from pyhdx.panel.template import ExtendedGoldenTemplate
class ExtendedGoldenDefaultTheme(pn.template.golden.GoldenDefaultTheme):
css = param.Filename(default=pathlib.Path(__file__).parent / 'static' / 'extendedgoldentemplate' / 'default.css')
_template = ExtendedGoldenTemplate
class ExtendedGoldenDarkTheme(pn.template.golden.GoldenDarkTheme):
css = param.Filename(default=pathlib.Path(__file__).parent / 'static' / 'extendedgoldentemplate' / 'dark.css')
_template = ExtendedGoldenTemplate
| StarcoderdataPython |
6502815 | from helper_data_plot import Plot as Plot
import os
import numpy as np
### nyu40 class
CLASS_LABELS = ['wall','floor','cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'blinds',
'desk', 'shelves', 'curtain', 'dresser', 'pillow', 'mirror', 'floor mat', 'clothes', 'ceiling', 'books', 'refrigerator',
'television', 'paper', 'towel', 'shower curtain', 'box', 'whiteboard', 'person', 'nightstand', 'toilet', 'sink', 'lamp',
'bathtub', 'bag', 'otherstructure', 'otherfurniture', 'otherprop']
### nyu40 id
CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,18,19,20,21,22,23, 24, 25,26,27, 28,
29, 30,31,32, 33, 34, 35, 36, 37,38, 39, 40])
results_scannet_validation = 'results_scannet_validation/'
scene_names = sorted(os.listdir(results_scannet_validation))
if len(scene_names)<=0:
print('please download sample results first.')
# https://drive.google.com/file/d/1cV07rP02Yi3Eu6GQxMR2buigNPJEvCq0/view?usp=sharing
exit()
for scene in scene_names:
print('scene:', scene)
pc = np.loadtxt(results_scannet_validation+scene+'/'+scene+'_pc_xyzrgb.txt', np.float32)
sem_pred = np.loadtxt(results_scannet_validation+scene+'/'+scene+'_sem_pred.txt', np.int16)
sem_gt = np.loadtxt(results_scannet_validation+scene+'/'+scene +'_sem_gt.txt', np.int16)
ins_pred = np.loadtxt(results_scannet_validation + scene +'/'+scene +'_ins_pred.txt', np.int16)
ins_gt = np.loadtxt(results_scannet_validation + scene +'/'+scene +'_ins_gt.txt', np.int16)
## plot
Plot.draw_pc(pc_xyzrgb=pc[:, 0:6])
Plot.draw_pc_semins(pc_xyz=pc[:, 0:3], pc_semins=sem_pred)
Plot.draw_pc_semins(pc_xyz=pc[:, 0:3], pc_semins=sem_gt)
Plot.draw_pc_semins(pc_xyz=pc[:, 0:3], pc_semins=ins_pred)
Plot.draw_pc_semins(pc_xyz=pc[:, 0:3], pc_semins=ins_gt)
| StarcoderdataPython |
8015885 | <filename>common_tools/load_graph_layer_setting_dialog.py
"""
-----------------------------------------------------------------------------------------------------------
Package: AequilibraE
Name: Loads graph from file
Purpose: Loads GUI for loading graphs from files and configuring them before computation
Original Author: <NAME> (<EMAIL>)
Contributors:
Last edited by: <NAME>
Website: www.AequilibraE.com
Repository: https://github.com/AequilibraE/AequilibraE
Created: 2016-07-30
Updated: 2020-02-08
Copyright: (c) AequilibraE authors
Licence: See LICENSE.TXT
-----------------------------------------------------------------------------------------------------------
"""
from qgis.core import *
import qgis
from qgis.PyQt import QtWidgets, uic, QtCore, QtGui
from qgis.PyQt.QtGui import *
import sys
import os
from functools import partial
from ..common_tools.auxiliary_functions import *
from ..common_tools import GetOutputFileName
from ..common_tools.global_parameters import *
from aequilibrae.project import Project
try:
from aequilibrae.paths import Graph
no_binary = False
except:
no_binary = True
FORM_CLASS, _ = uic.loadUiType(os.path.join(os.path.dirname(__file__), "forms/ui_load_network_info.ui"))
class LoadGraphLayerSettingDialog(QtWidgets.QDialog, FORM_CLASS):
def __init__(self, iface, project: Project):
# QtWidgets.QDialog.__init__(self)
QtWidgets.QDialog.__init__(self, None, QtCore.Qt.WindowStaysOnTopHint)
self.iface = iface
self.project = project
self.setupUi(self)
self.minimize_field = ''
self.mode = ''
self.link_layer = ''
self.node_layer = ''
self.error = []
self.all_modes = {}
curr = self.project.network.conn.cursor()
curr.execute("""select mode_name, mode_id from modes""")
for x in curr.fetchall():
self.cb_modes.addItem(f'{x[0]} ({x[1]})')
self.all_modes[f'{x[0]} ({x[1]})'] = x[1]
for field in self.project.network.skimmable_fields():
self.cb_minimizing.addItem(field)
self.do_load_graph.clicked.connect(self.exit_procedure)
def exit_procedure(self):
self.mode = self.all_modes[self.cb_modes.currentText()]
self.minimize_field = self.cb_minimizing.currentText()
self.block_connector = self.block_paths.isChecked()
self.remove_chosen_links = self.chb_chosen_links.isChecked()
self.close()
| StarcoderdataPython |
4925518 | <filename>blog/forms.py
from django import forms
from .models import Blog
class BlogAddForm(forms.ModelForm):
title = forms.CharField(max_length=256)
description = forms.CharField(max_length=256, help_text='Briefly describe the content of your blog post (This will appear when looking at the list of blog posts).')
thumbnail = forms.ImageField()
content = forms.CharField(widget=forms.Textarea)
class Meta:
model = Blog
fields = ['title', 'description', 'thumbnail', 'content', 'tags']
| StarcoderdataPython |
5186964 | <reponame>DevipriyaSarkar/SniFFile
import urllib2
import json
import os
import traceback
from bs4 import BeautifulSoup
'''
Source 3: https://medium.com/web-development-zone/a-complete-list-of-computer-programming-languages-1d8bc5a891f
Given a language, returns its paradigm if known, else returns "Not Known"
'''
# performed only once when first line is processed
# otherwise info taken from scraped data file
def get_data_from_source3(lang):
# source 3 url
source_url = "https://medium.com/web-development-zone/a-complete-list-of-computer-programming-languages-1d8bc5a891f"
headers = {'User-Agent': 'Mozilla/5.0'}
req = urllib2.Request(source_url, headers=headers)
# scrape the web page using BeautifulSoup
page = urllib2.urlopen(req)
soup = BeautifulSoup(page, "lxml")
lang_dict = {}
tag = soup.find('h3')
cur_paradigm = str(tag.get_text())
while tag.find_next_sibling('h3') is not None:
while tag.next_sibling.name != "h3":
tag = tag.next_sibling
if tag.name == "h4":
cur_lang = tag.get_text().encode('utf-8').lower() # unicode to string
cur_lang = cur_lang.replace("\xc2\xa0", " ") # data source web page contains non-breaking space
lang_dict[cur_lang] = cur_paradigm
tag = tag.next_sibling
cur_paradigm = str(tag.get_text())
lang_list = tag.find_next_siblings("h4")
for l in lang_list:
cur_lang = l.get_text().encode('utf-8').lower()
cur_lang = cur_lang.replace("\xc2\xa0", " ")
lang_dict[cur_lang] = cur_paradigm
try:
# save the scraped dict to file "media/paradigm.json"
directory = os.path.join(os.path.dirname(__file__), "media")
if not os.path.exists(directory):
os.makedirs(directory)
paradigm_file = os.path.join(directory, "paradigm.json")
with open(paradigm_file, 'w+') as outfile:
json.dump(lang_dict, outfile)
except Exception as e:
print e
traceback.print_exc()
finally:
return lang_dict.get(lang, "Not Known") # return the required language paradigm if it exists
def get_paradigm(lang):
lang = lang.lower()
directory = os.path.join(os.path.dirname(__file__), "media")
paradigm_file = os.path.join(directory, "paradigm.json")
exists = os.path.exists(paradigm_file)
# if file exists, return data from file
# else fetch from source url after scraping
if exists:
with open(paradigm_file) as json_data:
lang_dict = json.load(json_data)
return lang_dict.get(lang, "Not Known")
else:
return get_data_from_source3(lang)
| StarcoderdataPython |
3574656 | import tensorflow as tf
import numpy as np
def tensorboard_scalar(writer, tag, value, step):
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
writer.add_summary(summary, step)
def tensorboard_array(writer, tag, value, step):
# convert to a numpy array
values = np.asarray(value)
# create summaries
_mean = tf.Summary(value=[tf.Summary.Value(tag=tag+'/mean', simple_value=np.mean(values))])
_min = tf.Summary(value=[tf.Summary.Value(tag=tag+'/min', simple_value=np.min(values))])
_max = tf.Summary(value=[tf.Summary.Value(tag=tag+'/max', simple_value=np.max(values))])
_stddev = tf.Summary(value=[tf.Summary.Value(tag=tag+'/stddev', simple_value=np.std(values))])
# write summaries
writer.add_summary(_mean, step)
writer.add_summary(_min, step)
writer.add_summary(_max, step)
writer.add_summary(_stddev, step)
writer.flush()
def tensorboard_text(writer, tag, value, step=0):
text_tensor = tf.make_tensor_proto(value, dtype=tf.string)
meta = tf.SummaryMetadata()
meta.plugin_data.plugin_name = "text"
summary = tf.Summary()
summary.value.add(tag=tag, metadata=meta, tensor=text_tensor)
writer.add_summary(summary)
writer.flush()
def tensorboard_histo(writer, tag, values, step, bins=1000):
# convert to a numpy array
values = np.array(values)
# create histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# fill fields of histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values ** 2))
# Requires equal number as bins, where the first goes from -DBL_MAX to bin_edges[1]
# See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/summary.proto#L30
# Thus, we drop the start of the first bin
bin_edges = bin_edges[1:]
# add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# create and write summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
writer.add_summary(summary, step)
writer.flush()
| StarcoderdataPython |
110868 | from django.apps import AppConfig
class StoreRssFeedsConfig(AppConfig):
name = 'store_rss_feeds'
| StarcoderdataPython |
9654586 | <filename>distributedsocialnetwork/node/admin.py
from django.contrib import admin
from .models import Node
from author.models import Author
import random
import string
# Register your models here.
class NodeAdmin(admin.ModelAdmin):
# Our Node form in the admin panel
model = Node
list_display = ["server_username",
"server_password", "hostname", "api_url", "node_auth_username", "node_auth_password"]
readonly_fields = ['auth_user']
fields = [('auth_user'), ("server_username"),
("server_password"), ("hostname"), ("api_url"), ("node_auth_username"), ("node_auth_password")]
def save_model(self, request, obj, form, change):
# We need to create a new Author object and such
displayName = obj.server_username
password = <PASSWORD>
hostname = obj.hostname
# Email has to be unique. So we generate a new email.
email = ''.join(random.choice(string.ascii_lowercase)
for i in range(10)) + "@node.com"
if len(Author.objects.filter(displayName=displayName)) == 1:
# User exists, we are updating our node
new_author = Author.objects.get(displayName=displayName)
else:
new_author = Author.objects.create_user(
displayName=displayName, password=password, first_name="NODE", last_name="NODE", email=email)
new_author.host = hostname
new_author.is_node = True
new_author.is_active = True
new_author.save()
obj.auth_user = new_author
super().save_model(request, obj, form, change)
def delete_model(self, request, obj):
# We just delete the Author object we are attatched to
# It will cascade
node_user = Author.objects.get(displayName=obj.server_username)
node_user.delete()
# We want to delete all authors that belong to this host on deletion, which will cascade anything else they have touched.
authors = Author.objects.filter(host__contains=obj.hostname)
for author in authors:
author.delete()
# And sometimes the hostname on an author has no backslash:
print(obj.hostname[:-1])
authors = Author.objects.filter(host=obj.hostname[:-1])
for author in authors:
author.delete()
super().delete_model(request, obj)
admin.site.register(Node, NodeAdmin)
| StarcoderdataPython |
394476 | from django.conf.urls import url
def some_view(request):
pass
urlpatterns = [
url(r'^some-url/$', some_view, name='some-view'),
]
| StarcoderdataPython |
1909110 | from django.conf.urls import url
from .views import google_verification_view
urlpatterns = [
url(r'^$', google_verification_view, name='google_verification_view'),
]
| StarcoderdataPython |
4891296 | """
# JSON Tools
Safe JSON SerDe.
"""
import datetime
import decimal
import json
from typing import Any, Type
class SafeJSONEncoder(json.JSONEncoder):
"""
Safe encoder for `json.dumps`. Handles `decimal.Decimal`
values properly and uses `repr` for any non-serializeable object.
- set is serialized to list
- date is serialized to a string in "%Y-%m-%d" format
- datetime is serialized to a string in "%Y-%m-%dT%H:%M:%SZ" format
- integral Decimal is serialized to int
- non-integral Decimal is serialized to float
- Exception is serialized to string
- Unknown type is serialized to string as a repr
```python
data = {
'string': 'test',
'decimal': decimal.Decimal('3.14'),
'datetime': datetime.datetime(2020, 1, 15, 14, 34, 56),
'date': datetime.date(2020, 1, 15),
'exception': ValueError('test'),
}
json.dumps(data, cls=SafeJSONEncoder)
```
"""
iso_format = r"%Y-%m-%dT%H:%M:%SZ"
simple_date_format = r"%Y-%m-%d"
def default(self, o: Any) -> Any: # pylint:disable=method-hidden
"""
Override handling of non-JSON-serializeable objects.
Supports `decimal.Decimal` and `set`.
Arguments:
o -- Object for serialization.
Returns:
`int` or `float` for decimal values, otherwise a string with object representation.
"""
if isinstance(o, decimal.Decimal):
if o == o.to_integral_value():
return int(o)
return float(o)
if isinstance(o, set):
return list(o)
if isinstance(o, datetime.datetime):
return o.strftime(self.iso_format)
if isinstance(o, datetime.date):
return o.strftime(self.simple_date_format)
if isinstance(o, BaseException):
return f"{o.__class__.__name__}('{o}')"
return repr(o)
def dumps(
data: Any, sort_keys: bool = True, cls: Type[json.JSONEncoder] = SafeJSONEncoder, **kwargs: Any,
) -> str:
"""
Alias for `json.dumps`. Uses `SafeJSONEncoder` to serialize
Decimals and non-serializeable objects. Sorts dict keys by default.
Arguments:
data -- JSON-serializeable object.
sort_keys -- Sort output of dictionaries by key.
cls -- JSON encoder for Python data structures.
kwargs -- List of additional parameters to pass to `json.dumps`.
Returns:
A string with serialized JSON.
"""
return json.dumps(data, sort_keys=sort_keys, cls=cls, **kwargs,)
def loads(data: str, **kwargs: Any) -> Any:
"""
Alias for `json.loads`.
Arguments:
data -- A string with valid JSON.
kwargs -- List of additional parameters to pass to `json.loads`.
Returns:
An object created from JSON data.
"""
return json.loads(data, **kwargs)
| StarcoderdataPython |
11225745 | <filename>seekr2/tests/test_markov_chain_monte_carlo.py
"""
test_markov_chain_monte_carlo.py
Unit tests for the MCMC sampling algorithms to estimate error bars in
milestoning calculations.
"""
from collections import defaultdict
import pytest
import numpy as np
import scipy.linalg as la
import matplotlib.pyplot as plt
import seekr2.modules.common_base as base
import seekr2.modules.mmvt_base as mmvt_base
import seekr2.modules.elber_base as elber_base
import seekr2.modules.common_analyze as common_analyze
import seekr2.modules.mmvt_analyze as mmvt_analyze
import seekr2.modules.elber_analyze as elber_analyze
import seekr2.analyze as analyze
import seekr2.modules.markov_chain_monte_carlo as markov_chain_monte_carlo
import seekr2.tests.create_model_input as create_model_input
def make_2d_test_plots(v1_data_points, v2_data_points, X, Y, Z, max_x, max_y):
"""
Make the plots showing similarities between MCMC procedure and
analytical distribution.
"""
fig, axs = plt.subplots(nrows=1, ncols=2)
#im = axs[0].imshow(Z, vmin=abs(Z).min(), vmax=abs(Z).max(), extent=[0, 1, 0, 1],
# cmap=plt.cm.jet)
#im.set_interpolation('bilinear')
p = axs[0].pcolor(X, Y, Z, cmap=plt.cm.jet, vmin=abs(Z).min(),
vmax=abs(Z).max())
axs[0].set_title("Analytical distribution")
axs[0].set_xlabel("$q_{0,1}$")
axs[0].set_ylabel("$q_{1,0}$")
axs[1].hist2d(v1_data_points, v2_data_points, bins=50,
range=[[0.0, max_x],[0.0, max_y]],
cmap=plt.cm.jet)
axs[1].set_title("Sampled using MCMC procedure")
axs[1].set_xlabel("$q_{0,1}$")
axs[1].set_ylabel("$q_{1,0}$")
plt.show()
return
def compute_2d_distribution_avg_std(bounds_x, bounds_y, Z):
"""
Given a 2d probability distribution, compute its average and standard
deviation.
"""
n = 0.0
sum_x = 0.0
sum_y = 0.0
for i, x in enumerate(bounds_x):
for j, y in enumerate(bounds_y):
value = Z[j,i]
sum_x += x * value
sum_y += y * value
n += value
avg_x = sum_x / n
avg_y = sum_y / n
sum_x2 = 0.0
sum_y2 = 0.0
for i, x in enumerate(bounds_x):
for j, y in enumerate(bounds_y):
value = Z[j,i]
sum_x2 += (x-avg_x)**2 * value
sum_y2 += (y-avg_y)**2 * value
std_x = np.sqrt(sum_x2 / n)
std_y = np.sqrt(sum_y2 / n)
return avg_x, avg_y, std_x, std_y
def mcmc_algorithm_any_2x2_common(algorithm, Q, N, R, max_x, max_y,
make_plot=False):
"""
"""
num = 100000
stride = 4
skip = 40
q1_distribution = []
q2_distribution = []
for counter in range(num * (stride) + skip):
Qnew = algorithm(Q, N, R)
Q = Qnew
q1_distribution.append(Q[0,1])
q2_distribution.append(Q[1,0])
n = 100
bounds_x = np.linspace(0, max_x, n)
bounds_y = np.linspace(0, max_y, n)
X,Y = np.meshgrid(bounds_x, bounds_y)
distribution_2d_histogram = np.zeros((n, n))
for i, x in enumerate(bounds_x):
for j, y in enumerate(bounds_y):
# fill out distribution here
value = x**N[0,1] * np.exp(-x*R[0,0] - Q[0,1]) \
* y**N[1,0] * np.exp(-y*R[1,0] - Q[1,0])
distribution_2d_histogram[j,i] = value
# TODO: check std. dev's
avg_q1, avg_q2, std_q1, std_q2 = compute_2d_distribution_avg_std(
bounds_x, bounds_y, distribution_2d_histogram)
expected_avg_q1 = np.mean(q1_distribution)
expected_avg_q2 = np.mean(q2_distribution)
expected_std_q1 = np.std(q1_distribution)
expected_std_q2 = np.std(q2_distribution)
assert np.isclose(avg_q1, expected_avg_q1, rtol=0.5, atol=0.01)
assert np.isclose(avg_q2, expected_avg_q2, rtol=0.5, atol=0.01)
assert np.isclose(std_q1, expected_std_q1, rtol=0.5, atol=0.01)
assert np.isclose(std_q2, expected_std_q2, rtol=0.5, atol=0.01)
# make plots
if make_plot:
make_2d_test_plots(q1_distribution, q2_distribution, X, Y,
distribution_2d_histogram, max_x, max_y)
return
def test_mcmc_algorithm_1_2x2_elber():
"""
"""
max_x = 3.0 * 12.0 / 500.0
max_y = 3.0 * 30.0 / 150.0
N = np.array([[0, 12], [30, 0]])
t = np.array([[500],[150]])
R = np.array([[500], [150]])
model = base.Model()
model.num_milestones = 2
data_sample = common_analyze.Data_sample(model)
data_sample.N_ij = N
data_sample.R_i = t
data_sample.compute_rate_matrix()
Q = data_sample.Q
assert Q[0,0] == -12.0 / 500.0
assert Q[0,1] == 12.0 / 500.0
assert Q[1,0] == 30.0 / 150.0
assert Q[1,1] == -30.0 / 150.0
algorithm = markov_chain_monte_carlo\
.irreversible_stochastic_matrix_algorithm_sample
mcmc_algorithm_any_2x2_common(algorithm, Q, N, R, max_x, max_y,
make_plot=False)
return
def make_k_N_T_matrices(n_anchors, k_alpha_beta, N_alpha_beta, T_alpha_total):
k_alpha_beta_matrix = np.zeros((n_anchors, n_anchors))
N_alpha_beta_matrix = np.zeros((n_anchors, n_anchors))
T_alpha_matrix = np.zeros((n_anchors, 1))
for alpha in range(n_anchors):
for beta in range(n_anchors):
if alpha == beta: continue
if (alpha,beta) in k_alpha_beta:
k_alpha_beta_matrix[alpha,beta] = k_alpha_beta[(alpha,beta)]
k_alpha_beta_matrix[alpha,alpha] -= k_alpha_beta[(alpha,beta)]
N_alpha_beta_matrix[alpha,beta] = N_alpha_beta[(alpha,beta)]
T_alpha_matrix[alpha,0] = T_alpha_total[alpha]
return k_alpha_beta_matrix, N_alpha_beta_matrix, T_alpha_matrix
def make_N_R_T_alpha_matrices(
n_anchors, n_milestones, pi_alpha, N_i_j_alpha, R_i_alpha_total,
T_alpha_total):
invT = 0.0
mmvt_Nij_alpha = []
mmvt_Ri_alpha = []
mmvt_Qij_alpha = []
for alpha in range(n_anchors):
invT += pi_alpha[alpha] / T_alpha_total[alpha]
this_mmvt_Nij_alpha = np.zeros((n_milestones, n_milestones))
this_mmvt_Ri_alpha = np.zeros((n_milestones, 1))
this_mmvt_Qij_alpha = np.zeros((n_milestones, n_milestones))
for i in range(n_milestones):
for j in range(n_milestones):
key = (i,j)
if key in N_i_j_alpha[alpha]:
this_mmvt_Nij_alpha[i,j] = N_i_j_alpha[alpha][key]
if i in R_i_alpha_total[alpha]:
this_mmvt_Ri_alpha[i,0] = R_i_alpha_total[alpha][i]
for i in range(n_milestones):
for j in range(n_milestones):
if i == j: continue
if this_mmvt_Ri_alpha[i,0] > 0.0:
this_mmvt_Qij_alpha[i,j] = this_mmvt_Nij_alpha[i,j] \
/ this_mmvt_Ri_alpha[i,0]
this_mmvt_Qij_alpha[i,i] -= this_mmvt_Nij_alpha[i,j] \
/ this_mmvt_Ri_alpha[i,0]
mmvt_Nij_alpha.append(this_mmvt_Nij_alpha)
mmvt_Ri_alpha.append(this_mmvt_Ri_alpha)
mmvt_Qij_alpha.append(this_mmvt_Qij_alpha)
T = 1.0 / invT
return mmvt_Nij_alpha, mmvt_Ri_alpha, mmvt_Qij_alpha, T
def pi_alpha_from_K_alpha_beta(k_alpha_beta, n_anchors):
"""
"""
flux_matrix = np.zeros((n_anchors+1, n_anchors+1))
for i in range(n_anchors):
for j in range(n_anchors):
flux_matrix[i,j] = k_alpha_beta[i,j]
flux_matrix[i, -1] = 1.0
flux_matrix[-1,-1] = 1.0
flux_matrix[-1,-2] = 1.0e5
prob_equil = np.zeros((n_anchors+1,1), dtype=np.longdouble)
prob_equil[-1] = 1.0
pi_alpha = abs(la.solve(flux_matrix.T, prob_equil))
return pi_alpha
def test_mcmc_3x3_mmvt(tmpdir_factory):
"""
"""
num = 10000 #100000
stride = 4
skip = 90
n_anchors = 4
n_milestones = 3
# generate data to feed directly into MMVT_data_sample()
model = base.Model()
model.num_milestones = n_milestones
model.num_anchors = n_anchors
# MMVT stats
N_alpha_beta = {(0,1):12, (1,0):12,
(1,2):12, (2,1):12,
(2,3):6, (3,2):6}
k_alpha_beta = {(0,1):20.0, (1,0):10.0,
(1,2):10.0, (2,1):(40.0/3.0),
(2,3):(20.0/3.0), (3,2):20.0}
N_i_j_alpha = [{},
{(0,1):4, (1,0):4},
{(1,2):2, (2,1):2},
{}]
R_i_alpha_total = [{0: 1.2},
{0: 1.2, 1:1.2},
{1: 1.2, 2:0.6},
{2: 0.6}]
T_alpha_total = [1.2,
2.4,
1.8,
0.6]
k_alpha_beta_matrix, N_alpha_beta_matrix, T_alpha_matrix \
= make_k_N_T_matrices(
n_anchors, k_alpha_beta, N_alpha_beta, T_alpha_total)
pi_alpha = pi_alpha_from_K_alpha_beta(
k_alpha_beta_matrix, n_anchors)
mmvt_Nij_alpha, mmvt_Ri_alpha, mmvt_Qij_alpha, T \
= make_N_R_T_alpha_matrices(
n_anchors, n_milestones, pi_alpha, N_i_j_alpha, R_i_alpha_total,
T_alpha_total)
mmvt_Nij, mmvt_Ri, mmvt_Q = mmvt_analyze.mmvt_Q_N_R(
n_milestones, n_anchors, mmvt_Nij_alpha, mmvt_Ri_alpha,
T_alpha_total, T, pi_alpha)
# Elber stats
N_i_j = {(0,1): 4, (1,0): 4, (1,2): 2, (2,1): 2}
R_i = {0: 2.4, 1: 2.4, 2: 1.2}
elber_N = np.array([[0, 4, 0],
[4, 0, 2],
[0, 2, 0]])
elber_R = np.array([[2.4],
[2.4],
[1.2]])
elber_Q = np.zeros((n_milestones, n_milestones))
for i in range(n_milestones):
for j in range(n_milestones):
key = (i,j)
if key in N_i_j:
elber_Q[i,j] = N_i_j[key] / R_i[i]
for i in range(n_milestones):
elber_Q[i,i] = -np.sum(elber_Q[i,:])
# Make sure the two models make identical matrices
assert np.isclose(mmvt_Q, elber_Q).all()
# Now compare the distributions of both of them
# MMVT matrix sampler
mmvt_q1_distribution = []
mmvt_q2_distribution = []
pi_alpha_dist = []
for counter in range(num * (stride) + skip):
#if verbose: print("MCMC stepnum: ", counter)
k_alpha_beta_matrix_new = markov_chain_monte_carlo\
.irreversible_stochastic_matrix_algorithm_sample(
k_alpha_beta_matrix, N_alpha_beta_matrix, T_alpha_matrix)
pi_alpha_new = pi_alpha_from_K_alpha_beta(k_alpha_beta_matrix_new, n_anchors)
pi_alpha_dist.append(pi_alpha_new)
mmvt_Qnew_list = []
mmvt_Nij_list = []
for alpha in range(n_anchors):
mmvt_Qnew_alpha = markov_chain_monte_carlo\
.irreversible_stochastic_matrix_algorithm_sample(
mmvt_Qij_alpha[alpha], mmvt_Nij_alpha[alpha],
mmvt_Ri_alpha[alpha])
new_mmvt_Nij_alpha = mmvt_analyze.make_new_Nij_alpha(
mmvt_Qij_alpha[alpha], mmvt_Ri_alpha[alpha])
mmvt_Qnew_list.append(mmvt_Qnew_alpha)
mmvt_Nij_list.append(new_mmvt_Nij_alpha)
if counter > skip and counter % stride == 0:
new_mmvt_Nij, new_mmvt_Ri, new_mmvt_Q = mmvt_analyze.mmvt_Q_N_R(
n_milestones, n_anchors, mmvt_Nij_list, mmvt_Ri_alpha,
T_alpha_total, T, pi_alpha_new)
mmvt_q1_distribution.append(new_mmvt_Q[0,1])
mmvt_q2_distribution.append(new_mmvt_Q[1,0])
k_alpha_beta_matrix = k_alpha_beta_matrix_new
mmvt_Qij_alpha = mmvt_Qnew_list
# Elber matrix sampler
elber_q1_distribution = []
elber_q2_distribution = []
for counter in range(num * (stride) + skip):
#if verbose: print("MCMC stepnum: ", counter)
elber_Qnew = markov_chain_monte_carlo\
.irreversible_stochastic_matrix_algorithm_sample(
elber_Q, elber_N, elber_R)
if counter > skip and counter % stride == 0:
elber_q1_distribution.append(elber_Q[0,1])
elber_q2_distribution.append(elber_Q[1,0])
elber_Q = elber_Qnew
assert np.isclose(np.mean(elber_q1_distribution), np.mean(mmvt_q1_distribution), rtol=0.5, atol=0.01)
assert np.isclose(np.std(elber_q1_distribution), np.std(mmvt_q1_distribution), rtol=0.5, atol=0.01)
assert np.isclose(np.mean(elber_q2_distribution), np.mean(mmvt_q2_distribution), rtol=0.5, atol=0.01)
assert np.isclose(np.std(elber_q2_distribution), np.std(mmvt_q2_distribution), rtol=0.5, atol=0.01)
return
| StarcoderdataPython |
4937708 | #!/usr/bin/env python3
import sys
import requests
f = open(sys.argv[1], "r")
for line in f.readlines():
url = "http://localhost:4000/submit?run_id=" + line.strip()
print(url)
r = requests.get(url)
print(r.text)
| StarcoderdataPython |
9744471 | """Main class, holding information about models and training/testing routines."""
import torch
import warnings
import time
import pickle
from ..utils import cw_loss, reverse_xent, reverse_xent_avg
from ..consts import NON_BLOCKING, BENCHMARK
torch.backends.cudnn.benchmark = BENCHMARK
class _Forgemaster():
"""Brew poison with given arguments.
Base class.
This class implements _forge(), which is the main loop for iterative poisoning.
New iterative poisoning methods overwrite the _define_objective method.
Noniterative poison methods overwrite the _forge() method itself.
“Double, double toil and trouble;
Fire burn, and cauldron bubble....
Round about the cauldron go;
In the poison'd entrails throw.”
"""
def __init__(self, args, setup=dict(device=torch.device('cpu'), dtype=torch.float)):
"""Initialize a model with given specs..."""
self.args, self.setup = args, setup
self.retain = True if self.args.ensemble > 1 and self.args.local_rank is None else False
self.stat_optimal_loss = None
""" BREWING RECIPES """
def forge(self, client, furnace):
"""Recipe interface."""
if self.args.resume != '':
resume_info = pickle.load( open( f'{self.args.resume}/info.pkl', 'rb'))
global_poison_ids, idx = resume_info[0], resume_info[1] + 1
if self.args.resume_idx is not None:
idx = self.args.resume_idx
#poison_ids, idx
furnace.batched_construction_reset(global_poison_ids, idx)
poison_delta = self._forge(client, furnace)
return poison_delta
def _forge(self, client, furnace):
"""Run generalized iterative routine."""
print(f'Starting forgeing procedure ...')
self._initialize_forge(client, furnace)
poisons, scores = [], torch.ones(self.args.restarts) * 10_000
for trial in range(self.args.restarts):
poison_delta, target_losses = self._run_trial(client, furnace)
scores[trial] = target_losses
poisons.append(poison_delta.detach())
if self.args.dryrun:
break
optimal_score = torch.argmin(scores)
self.stat_optimal_loss = scores[optimal_score].item()
print(f'Poisons with minimal target loss {self.stat_optimal_loss:6.4e} selected.')
poison_delta = poisons[optimal_score]
return poison_delta
def _initialize_forge(self, client, furnace):
"""Implement common initialization operations for forgeing."""
client.eval(dropout=True)
# The PGD tau that will actually be used:
# This is not super-relevant for the adam variants
# but the PGD variants are especially sensitive
# E.G: 92% for PGD with rule 1 and 20% for rule 2
if self.args.attackoptim in ['PGD', 'GD']:
# Rule 1
#self.tau0 = self.args.eps / 255 / furnace.ds * self.args.tau * (self.args.pbatch / 512) / self.args.ensemble
self.tau0 = self.args.eps / 255 / furnace.ds * self.args.tau
elif self.args.attackoptim in ['momSGD', 'momPGD']:
# Rule 1a
self.tau0 = self.args.eps / 255 / furnace.ds * self.args.tau * (self.args.pbatch / 512) / self.args.ensemble
self.tau0 = self.tau0.mean()
else:
# Rule 2
self.tau0 = self.args.tau * (self.args.pbatch / 512) / self.args.ensemble
def _run_trial(self, client, furnace):
"""Run a single trial."""
poison_delta = furnace.initialize_poison()
if self.args.full_data:
dataloader = furnace.trainloader
else:
dataloader = furnace.poisonloader
if self.args.attackoptim in ['Adam', 'signAdam', 'momSGD', 'momPGD']:
# poison_delta.requires_grad_()
if self.args.attackoptim in ['Adam', 'signAdam']:
att_optimizer = torch.optim.Adam([poison_delta], lr=self.tau0, weight_decay=0)
else:
att_optimizer = torch.optim.SGD([poison_delta], lr=self.tau0, momentum=0.9, weight_decay=0)
if self.args.scheduling:
scheduler = torch.optim.lr_scheduler.MultiStepLR(att_optimizer, milestones=[self.args.attackiter // 2.667, self.args.attackiter // 1.6,
self.args.attackiter // 1.142], gamma=0.1)
poison_delta.grad = torch.zeros_like(poison_delta)
dm, ds = furnace.dm.to(device=torch.device('cpu')), furnace.ds.to(device=torch.device('cpu'))
poison_bounds = torch.zeros_like(poison_delta)
else:
poison_bounds = None
for step in range(self.args.attackiter):
if step % 10 == 0:
print(f'Step {step}')
target_losses = 0
poison_correct = 0
for batch, example in enumerate(dataloader):
if batch == 0:
start = time.time()
elif batch % 100 == 0:
end = time.time()
avg = (end-start)/100
start = end
print(f'average time per epoch: {len(dataloader) * avg}')
loss, prediction = self._batched_step(poison_delta, poison_bounds, example, client, furnace)
target_losses += loss
poison_correct += prediction
if self.args.dryrun:
break
# Note that these steps are handled batch-wise for PGD in _batched_step
# For the momentum optimizers, we only accumulate gradients for all poisons
# and then use optimizer.step() for the update. This is math. equivalent
# and makes it easier to let pytorch track momentum.
if self.args.attackoptim in ['Adam', 'signAdam', 'momSGD', 'momPGD']:
if self.args.attackoptim in ['momPGD', 'signAdam']:
poison_delta.grad.sign_()
att_optimizer.step()
if self.args.scheduling:
scheduler.step()
att_optimizer.zero_grad()
with torch.no_grad():
# Projection Step
poison_delta.data = torch.max(torch.min(poison_delta, self.args.eps /
ds / 255), -self.args.eps / ds / 255)
poison_delta.data = torch.max(torch.min(poison_delta, (1 - dm) / ds -
poison_bounds), -dm / ds - poison_bounds)
target_losses = target_losses / (batch + 1)
poison_acc = poison_correct / len(dataloader.dataset)
if step % (self.args.attackiter // 5) == 0 or step == (self.args.attackiter - 1):
print(f'Iteration {step}: Target loss is {target_losses:2.4f}, '
f'Poison clean acc is {poison_acc * 100:2.2f}%')
if self.args.step:
if self.args.clean_grad:
client.step(furnace, None, self.targets, self.true_classes)
else:
client.step(furnace, poison_delta, self.targets, self.true_classes)
if self.args.dryrun:
break
return poison_delta, target_losses
def _batched_step(self, poison_delta, poison_bounds, example, client, furnace):
"""Take a step toward minmizing the current target loss."""
inputs, labels, ids = example
inputs = inputs.to(**self.setup)
labels = labels.to(dtype=torch.long, device=self.setup['device'], non_blocking=NON_BLOCKING)
# Add adversarial pattern
poison_slices, batch_positions = [], []
for batch_id, image_id in enumerate(ids.tolist()):
lookup = furnace.poison_lookup.get(image_id)
if lookup is not None:
poison_slices.append(lookup)
batch_positions.append(batch_id)
if len(batch_positions) > 0:
delta_slice = poison_delta[poison_slices].detach().to(**self.setup)
if self.args.clean_grad:
delta_slice = torch.zeros_like(delta_slice)
delta_slice.requires_grad_()
poison_images = inputs[batch_positions]
if self.args.recipe == 'poison-frogs':
self.targets = inputs.clone().detach()
inputs[batch_positions] += delta_slice
# Perform differentiable data augmentation
if self.args.paugment:
inputs = furnace.augment(inputs, randgen=None)
# Define the loss objective and compute gradients
closure = self._define_objective(inputs, labels)
loss, prediction = client.compute(closure)
delta_slice = client.sync_gradients(delta_slice)
if self.args.clean_grad:
delta_slice.data = poison_delta[poison_slices].detach().to(**self.setup)
# Update Step
if self.args.attackoptim in ['PGD', 'GD']:
delta_slice = self._pgd_step(delta_slice, poison_images, self.tau0, furnace.dm, furnace.ds)
# Return slice to CPU:
poison_delta[poison_slices] = delta_slice.detach().to(device=torch.device('cpu'))
elif self.args.attackoptim in ['Adam', 'signAdam', 'momSGD', 'momPGD']:
poison_delta.grad[poison_slices] = delta_slice.grad.detach().to(device=torch.device('cpu'))
poison_bounds[poison_slices] = poison_images.detach().to(device=torch.device('cpu'))
else:
raise NotImplementedError('Unknown attack optimizer.')
else:
loss, prediction = torch.tensor(0), torch.tensor(0)
return loss.item(), prediction.item()
def _define_objective():
"""Implement the closure here."""
def closure(model, criterion, *args):
"""This function will be evaluated on all GPUs.""" # noqa: D401
raise NotImplementedError()
return target_loss.item(), prediction.item()
def _pgd_step(self, delta_slice, poison_imgs, tau, dm, ds):
"""PGD step."""
with torch.no_grad():
# Gradient Step
if self.args.attackoptim == 'GD':
delta_slice.data -= delta_slice.grad * tau
else:
delta_slice.data -= delta_slice.grad.sign() * tau
# Projection Step
delta_slice.data = torch.max(torch.min(delta_slice, self.args.eps /
ds / 255), -self.args.eps / ds / 255)
delta_slice.data = torch.max(torch.min(delta_slice, (1 - dm) / ds -
poison_imgs), -dm / ds - poison_imgs)
return delta_slice
| StarcoderdataPython |
6689092 | from __future__ import absolute_import
from __future__ import unicode_literals
import copy
import re
from collections import OrderedDict
from django import forms
from django.forms.forms import NON_FIELD_ERRORS
from django.core.validators import EMPTY_VALUES
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields.related import ForeignObjectRel
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from .compat import remote_field, remote_queryset
from .filters import (Filter, CharFilter, BooleanFilter, BaseInFilter, BaseRangeFilter,
ChoiceFilter, DateFilter, DateTimeFilter, TimeFilter, ModelChoiceFilter,
ModelMultipleChoiceFilter, NumberFilter, UUIDFilter,
DurationFilter)
from .utils import try_dbfield, get_all_model_fields, get_model_field, resolve_field, deprecate
ORDER_BY_FIELD = 'o'
class STRICTNESS(object):
"""
Values of False & True chosen for backward compatability reasons.
Originally, these were the only options.
"""
IGNORE = False
RETURN_NO_RESULTS = True
RAISE_VALIDATION_ERROR = "RAISE"
def get_declared_filters(bases, attrs, with_base_filters=True):
filters = []
for filter_name, obj in list(attrs.items()):
if isinstance(obj, Filter):
obj = attrs.pop(filter_name)
if getattr(obj, 'name', None) is None:
obj.name = filter_name
filters.append((filter_name, obj))
filters.sort(key=lambda x: x[1].creation_counter)
if with_base_filters:
for base in bases[::-1]:
if hasattr(base, 'base_filters'):
filters = list(base.base_filters.items()) + filters
else:
for base in bases[::-1]:
if hasattr(base, 'declared_filters'):
filters = list(base.declared_filters.items()) + filters
return OrderedDict(filters)
def filters_for_model(model, fields=None, exclude=None, filter_for_field=None,
filter_for_reverse_field=None):
field_dict = OrderedDict()
# Setting exclude with no fields implies all other fields.
if exclude is not None and fields is None:
fields = '__all__'
# All implies all db fields associated with a filter_class.
if fields == '__all__':
fields = get_all_model_fields(model)
# Loop through the list of fields.
for f in fields:
# Skip the field if excluded.
if exclude is not None and f in exclude:
continue
field = get_model_field(model, f)
# Do nothing if the field doesn't exist.
if field is None:
field_dict[f] = None
continue
if isinstance(field, ForeignObjectRel):
filter_ = filter_for_reverse_field(field, f)
if filter_:
field_dict[f] = filter_
# If fields is a dictionary, it must contain lists.
elif isinstance(fields, dict):
# Create a filter for each lookup type.
for lookup_expr in fields[f]:
filter_ = filter_for_field(field, f, lookup_expr)
if filter_:
filter_name = LOOKUP_SEP.join([f, lookup_expr])
# Don't add "exact" to filter names
_exact = LOOKUP_SEP + 'exact'
if filter_name.endswith(_exact):
filter_name = filter_name[:-len(_exact)]
field_dict[filter_name] = filter_
# If fields is a list, it contains strings.
else:
filter_ = filter_for_field(field, f)
if filter_:
field_dict[f] = filter_
return field_dict
def get_full_clean_override(together):
def full_clean(form):
def add_error(message):
try:
form.add_error(None, message)
except AttributeError:
form._errors[NON_FIELD_ERRORS] = message
def all_valid(fieldset):
cleaned_data = form.cleaned_data
count = len([i for i in fieldset if cleaned_data.get(i)])
return 0 < count < len(fieldset)
super(form.__class__, form).full_clean()
message = 'Following fields must be together: %s'
if isinstance(together[0], (list, tuple)):
for each in together:
if all_valid(each):
return add_error(message % ','.join(each))
elif all_valid(together):
return add_error(message % ','.join(together))
return full_clean
class FilterSetOptions(object):
def __init__(self, options=None):
if getattr(options, 'model', None) is not None:
if not hasattr(options, 'fields') and not hasattr(options, 'exclude'):
deprecate(
"Not setting Meta.fields with Meta.model is undocumented behavior "
"and may result in unintentionally exposing filter fields. This has "
"been deprecated in favor of setting Meta.fields = '__all__' or by "
"setting the Meta.exclude attribute.", 1)
elif getattr(options, 'fields', -1) is None:
deprecate(
"Setting 'Meta.fields = None' is undocumented behavior and has been "
"deprecated in favor of Meta.fields = '__all__'.", 1)
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.order_by = getattr(options, 'order_by', False)
self.form = getattr(options, 'form', forms.Form)
self.together = getattr(options, 'together', None)
class FilterSetMetaclass(type):
def __new__(cls, name, bases, attrs):
try:
parents = [b for b in bases if issubclass(b, FilterSet)]
except NameError:
# We are defining FilterSet itself here
parents = None
declared_filters = get_declared_filters(bases, attrs, False)
new_class = super(
FilterSetMetaclass, cls).__new__(cls, name, bases, attrs)
if not parents:
return new_class
opts = new_class._meta = FilterSetOptions(
getattr(new_class, 'Meta', None))
# TODO: replace with deprecations
# if opts.model and opts.fields:
if opts.model:
filters = new_class.filters_for_model(opts.model, opts)
filters.update(declared_filters)
else:
filters = declared_filters
not_defined = next((k for k, v in filters.items() if v is None), False)
if not_defined:
raise TypeError("Meta.fields contains a field that isn't defined "
"on this FilterSet: {}".format(not_defined))
new_class.declared_filters = declared_filters
new_class.base_filters = filters
return new_class
FILTER_FOR_DBFIELD_DEFAULTS = {
models.AutoField: {
'filter_class': NumberFilter
},
models.CharField: {
'filter_class': CharFilter
},
models.TextField: {
'filter_class': CharFilter
},
models.BooleanField: {
'filter_class': BooleanFilter
},
models.DateField: {
'filter_class': DateFilter
},
models.DateTimeField: {
'filter_class': DateTimeFilter
},
models.TimeField: {
'filter_class': TimeFilter
},
models.DurationField: {
'filter_class': DurationFilter
},
models.OneToOneField: {
'filter_class': ModelChoiceFilter,
'extra': lambda f: {
'queryset': remote_queryset(f),
'to_field_name': remote_field(f).field_name,
}
},
models.ForeignKey: {
'filter_class': ModelChoiceFilter,
'extra': lambda f: {
'queryset': remote_queryset(f),
'to_field_name': remote_field(f).field_name,
}
},
models.ManyToManyField: {
'filter_class': ModelMultipleChoiceFilter,
'extra': lambda f: {
'queryset': remote_queryset(f),
}
},
models.DecimalField: {
'filter_class': NumberFilter,
},
models.SmallIntegerField: {
'filter_class': NumberFilter,
},
models.IntegerField: {
'filter_class': NumberFilter,
},
models.PositiveIntegerField: {
'filter_class': NumberFilter,
},
models.PositiveSmallIntegerField: {
'filter_class': NumberFilter,
},
models.FloatField: {
'filter_class': NumberFilter,
},
models.NullBooleanField: {
'filter_class': BooleanFilter,
},
models.SlugField: {
'filter_class': CharFilter,
},
models.EmailField: {
'filter_class': CharFilter,
},
models.FilePathField: {
'filter_class': CharFilter,
},
models.URLField: {
'filter_class': CharFilter,
},
models.GenericIPAddressField: {
'filter_class': CharFilter,
},
models.CommaSeparatedIntegerField: {
'filter_class': CharFilter,
},
models.UUIDField: {
'filter_class': UUIDFilter,
},
}
class BaseFilterSet(object):
filter_overrides = {}
order_by_field = ORDER_BY_FIELD
# What to do on on validation errors
strict = STRICTNESS.RETURN_NO_RESULTS
def __init__(self, data=None, queryset=None, prefix=None, strict=None):
self.is_bound = data is not None
self.data = data or {}
if queryset is None:
queryset = self._meta.model._default_manager.all()
self.queryset = queryset
self.form_prefix = prefix
if strict is not None:
self.strict = strict
self.filters = copy.deepcopy(self.base_filters)
# propagate the model being used through the filters
for filter_ in self.filters.values():
filter_.model = self._meta.model
# Apply the parent to the filters, this will allow the filters to access the filterset
for filter_key, filter_ in six.iteritems(self.filters):
filter_.parent = self
def __iter__(self):
deprecate('QuerySet methods are no longer proxied.')
for obj in self.qs:
yield obj
def __len__(self):
deprecate('QuerySet methods are no longer proxied.')
return self.qs.count()
def __getitem__(self, key):
deprecate('QuerySet methods are no longer proxied.')
return self.qs[key]
def count(self):
deprecate('QuerySet methods are no longer proxied.')
return self.qs.count()
@property
def qs(self):
if not hasattr(self, '_qs'):
valid = self.is_bound and self.form.is_valid()
if self.is_bound and not valid:
if self.strict == STRICTNESS.RAISE_VALIDATION_ERROR:
raise forms.ValidationError(self.form.errors)
elif bool(self.strict) == STRICTNESS.RETURN_NO_RESULTS:
self._qs = self.queryset.none()
return self._qs
# else STRICTNESS.IGNORE... ignoring
# start with all the results and filter from there
qs = self.queryset.all()
for name, filter_ in six.iteritems(self.filters):
value = None
if valid:
value = self.form.cleaned_data[name]
else:
raw_value = self.form[name].value()
try:
value = self.form.fields[name].clean(raw_value)
except forms.ValidationError:
if self.strict == STRICTNESS.RAISE_VALIDATION_ERROR:
raise
elif bool(self.strict) == STRICTNESS.RETURN_NO_RESULTS:
self._qs = self.queryset.none()
return self._qs
# else STRICTNESS.IGNORE... ignoring
if value is not None: # valid & clean data
qs = filter_.filter(qs, value)
if self._meta.order_by:
order_field = self.form.fields[self.order_by_field]
data = self.form[self.order_by_field].data
ordered_value = None
try:
ordered_value = order_field.clean(data)
except forms.ValidationError:
pass
# With a None-queryset, ordering must be enforced (#84).
if (ordered_value in EMPTY_VALUES and
self.strict == STRICTNESS.RETURN_NO_RESULTS):
ordered_value = self.form.fields[self.order_by_field].choices[0][0]
if ordered_value:
qs = qs.order_by(*self.get_order_by(ordered_value))
self._qs = qs
return self._qs
@property
def form(self):
if not hasattr(self, '_form'):
fields = OrderedDict([
(name, filter_.field)
for name, filter_ in six.iteritems(self.filters)])
fields[self.order_by_field] = self.ordering_field
Form = type(str('%sForm' % self.__class__.__name__),
(self._meta.form,), fields)
if self._meta.together:
Form.full_clean = get_full_clean_override(self._meta.together)
if self.is_bound:
self._form = Form(self.data, prefix=self.form_prefix)
else:
self._form = Form(prefix=self.form_prefix)
return self._form
def get_ordering_field(self):
if self._meta.order_by:
if isinstance(self._meta.order_by, (list, tuple)):
if isinstance(self._meta.order_by[0], (list, tuple)):
# e.g. (('field', 'Display name'), ...)
choices = [(f[0], f[1]) for f in self._meta.order_by]
else:
choices = []
for f in self._meta.order_by:
if f[0] == '-':
label = _('%s (descending)' % capfirst(f[1:]))
else:
label = capfirst(f)
choices.append((f, label))
else:
# add asc and desc field names
# use the filter's label if provided
choices = []
for f, fltr in self.filters.items():
choices.extend([
(f, fltr.label or capfirst(f)),
("-%s" % (f), _('%s (descending)' % (fltr.label or capfirst(f))))
])
return forms.ChoiceField(label=_("Ordering"), required=False,
choices=choices)
@property
def ordering_field(self):
if not hasattr(self, '_ordering_field'):
self._ordering_field = self.get_ordering_field()
return self._ordering_field
def get_order_by(self, order_choice):
re_ordering_field = re.compile(r'(?P<inverse>\-?)(?P<field>.*)')
m = re.match(re_ordering_field, order_choice)
inverted = m.group('inverse')
filter_api_name = m.group('field')
_filter = self.filters.get(filter_api_name, None)
if _filter and filter_api_name != _filter.name:
return [inverted + _filter.name]
return [order_choice]
@classmethod
def filters_for_model(cls, model, opts):
# TODO: remove with deprecations - this emulates the old behavior
fields = opts.fields
if fields is None:
DEFAULTS = dict(FILTER_FOR_DBFIELD_DEFAULTS)
DEFAULTS.update(cls.filter_overrides)
fields = get_all_model_fields(model, field_types=DEFAULTS.keys())
return filters_for_model(
model, fields, opts.exclude,
cls.filter_for_field,
cls.filter_for_reverse_field
)
@classmethod
def filter_for_field(cls, f, name, lookup_expr='exact'):
f, lookup_type = resolve_field(f, lookup_expr)
default = {
'name': name,
'label': capfirst(f.verbose_name),
'lookup_expr': lookup_expr
}
filter_class, params = cls.filter_for_lookup(f, lookup_type)
default.update(params)
assert filter_class is not None, (
"%s resolved field '%s' with '%s' lookup to an unrecognized field "
"type %s. Try adding an override to 'filter_overrides'. See: "
"https://django-filter.readthedocs.io/en/latest/usage.html#overriding-default-filters"
) % (cls.__name__, name, lookup_expr, f.__class__.__name__)
return filter_class(**default)
@classmethod
def filter_for_reverse_field(cls, f, name):
rel = remote_field(f.field)
queryset = f.field.model._default_manager.all()
default = {
'name': name,
'label': capfirst(rel.related_name),
'queryset': queryset,
}
if rel.multiple:
return ModelMultipleChoiceFilter(**default)
else:
return ModelChoiceFilter(**default)
@classmethod
def filter_for_lookup(cls, f, lookup_type):
DEFAULTS = dict(FILTER_FOR_DBFIELD_DEFAULTS)
DEFAULTS.update(cls.filter_overrides)
data = try_dbfield(DEFAULTS.get, f.__class__) or {}
filter_class = data.get('filter_class')
params = data.get('extra', lambda f: {})(f)
# if there is no filter class, exit early
if not filter_class:
return None, {}
# perform lookup specific checks
if lookup_type == 'exact' and f.choices:
return ChoiceFilter, {'choices': f.choices}
if lookup_type == 'isnull':
data = try_dbfield(DEFAULTS.get, models.BooleanField)
filter_class = data.get('filter_class')
params = data.get('extra', lambda f: {})(f)
return filter_class, params
if lookup_type == 'in':
class ConcreteInFilter(BaseInFilter, filter_class):
pass
ConcreteInFilter.__name__ = cls._csv_filter_class_name(
filter_class, lookup_type
)
return ConcreteInFilter, params
if lookup_type == 'range':
class ConcreteRangeFilter(BaseRangeFilter, filter_class):
pass
ConcreteRangeFilter.__name__ = cls._csv_filter_class_name(
filter_class, lookup_type
)
return ConcreteRangeFilter, params
return filter_class, params
@classmethod
def _csv_filter_class_name(cls, filter_class, lookup_type):
"""
Generate a suitable class name for a concrete filter class. This is not
completely reliable, as not all filter class names are of the format
<Type>Filter.
ex::
FilterSet._csv_filter_class_name(DateTimeFilter, 'in')
returns 'DateTimeInFilter'
"""
# DateTimeFilter => DateTime
type_name = filter_class.__name__
if type_name.endswith('Filter'):
type_name = type_name[:-6]
# in => In
lookup_name = lookup_type.capitalize()
# DateTimeInFilter
return str('%s%sFilter' % (type_name, lookup_name))
class FilterSet(six.with_metaclass(FilterSetMetaclass, BaseFilterSet)):
pass
def filterset_factory(model):
meta = type(str('Meta'), (object,), {'model': model, 'fields': '__all__'})
filterset = type(str('%sFilterSet' % model._meta.object_name),
(FilterSet,), {'Meta': meta})
return filterset
| StarcoderdataPython |
8043003 | from sys import argv
from math import sqrt, trunc
from numpy import zeros, sum, ndarray
import time
from numba import jit, float32, boolean, int32, float64, int64
compile_start = time.time()
@jit([boolean(int32)])
def check_prime(num):
has_divisor = False
if(num ==2):
return True
#Check 2 specially
if num%2 == 0:
# Don't need to check any further - return right now
return False
for i in range(3, trunc(sqrt(num))+1, 2):
if num%i == 0:
has_divisor = True
# Don't need to check any further - at least one divisor exists
break
#Either we found a divisor, or there are none
return not has_divisor
compile_end = time.time()
print("Compilation of check_prime took {} seconds".format(compile_end-compile_start))
def check_prime_nj(num):
has_divisor = False
if(num ==2):
return True
#Check 2 specially
if num%2 == 0:
# Don't need to check any further - return right now
return False
for i in range(3, trunc(sqrt(num))+1, 2):
if num%i == 0:
has_divisor = True
# Don't need to check any further - at least one divisor exists
break
#Either we found a divisor, or there are none
return not has_divisor
compile_start = time.time()
@jit((int64, int64, float64[:]))
def main_loop(lower, length, flags):
for i in range(1, length):
flags[i] = check_prime(lower+i)
compile_end = time.time()
print("Compilation of main_loop took {} seconds".format(compile_end-compile_start))
def main_loop_nj(lower, length, flags):
for i in range(1, length):
flags[i] = check_prime_nj(lower+i)
def main(lower, upper, do_jit=True):
length = upper - lower
flags = zeros(length)
start_time = time.time()
if(do_jit):
main_loop(lower, length, flags)
else:
main_loop_nj(lower, length, flags)
end_time = time.time()
print("Found ", int(sum(flags)), " primes in", end_time-start_time, ' s')
if __name__ == "__main__":
try:
lower = int(input('Enter lower bnd: '))
except:
print("I didn't understand. I'll try 100000")
lower = 100000
try:
upper = int(input('Enter upper bnd: '))
except:
print("I didn't understand. I'll try 200000")
upper = 200000
main(lower, upper)
| StarcoderdataPython |
4934461 | #!env python3
#-*- coding: utf-8 -*-
"""
feasibility research No.0 for 'editor'
1. get current tty device path and write this path to a file.
2. execute vim to editor this file, restart if user exit vim
"""
import logging
import os
import sys
def main():
"""main function"""
# get tty path name, write to a file
file_name = 'test.tmp'
file_obj = open(file_name, 'w')
tty_name = os.ttyname(sys.stdout.fileno())
file_obj.write(tty_name)
file_obj.close()
logging.info("tty name is %s", tty_name)
# start vim, and reboot vim when user quit
while True:
os.system('vim '+file_name)
answer = input('pricess x to exit, any other key to restart vim: ')
if answer == 'x':
break
return 0
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
exit(main())
exit(-1)
| StarcoderdataPython |
5193110 | <reponame>Felicia56/flavio
r"""Module for Higgs production and decay.
Based on arXiv:1911.07866."""
from . import production
from . import decay
from . import width
from . import signalstrength
| StarcoderdataPython |
6614889 | <gh_stars>1-10
"""Accesses the Google Analytics API to spit out a CSV of aircraft usage"""
from __future__ import division, print_function
import argparse
import collections
import logging
from ga_library import *
from utils import *
from collections import defaultdict, OrderedDict
SHOW_ABSOLUTE_NUMBERS = False
_out = ''
def _log(s, end='\n'):
global _out
_out += s + end
file_name_suffix = ''
def main():
argparser = argparse.ArgumentParser(description='Dumps hardware stats from X-Plane Desktop; you probably want to pipe the output to a CSV file')
argparser.add_argument('--version', type=int, default=11, help='The major version of X-Plane you want data on (10 or 11)')
args = argparser.parse_args()
write_hardware_analysis_files(Version.v11 if args.version == 11 else Version.v10, UserGroup.PaidOnly)
def write_hardware_analysis_files(version: Union[int, Version], user_group: UserGroup, csv_path=None):
"""
:type csv_path: Union[str,None]
"""
global file_name_suffix
file_name_suffix = "_%s_%s_%s" % (version, user_group.name, today_file_suffix())
qm = SimpleQueryMgr(GaService.desktop(), version, Metric.Users, user_group)
perform_cpu_analysis(qm.query(CustomDimension.Cpu))
perform_flight_controls_analysis(qm.query(CustomDimension.FlightControls))
stats = HardwareStats(GaService.desktop(), version, Metric.Users, user_group)
grapher = HardwareGrapher(stats)
perform_ram_analysis(stats)
perform_gpu_analysis(stats)
perform_os_analysis(stats, grapher)
perform_vr_analysis(stats, grapher)
if not csv_path:
csv_path = "hardware_analysis%s.csv" % file_name_suffix
with open(csv_path, 'w') as out_file:
out_file.write(_out)
out_file.write('\n')
class HardwareStats:
def __init__(self, service: GaService, version: Union[int, Version]=Version.v11, user_group: UserGroup=UserGroup.PaidOnly):
self.qm = SimpleQueryMgr(service, version, Metric.Users, user_group)
def operating_systems(self) -> Dict[str, int]:
platform_count = defaultdict(int)
for row in self.qm.query(CustomDimension.Os):
val = str_to_int(row[1])
os_name = classify_platform(row[0])
platform_count[os_name] += val
return counts_to_percents(platform_count)
def operating_system_versions(self) -> Dict[str, Dict[str, int]]:
version_count = defaultdict(lambda: defaultdict(int))
for row in self.qm.query(CustomDimension.Os):
val = str_to_int(row[1])
os_name = classify_platform(row[0])
version = get_os_version(row[0])
if version:
version_count[os_name][version] += val
return version_count
def ram_amounts(self) -> Dict[str, int]:
users_with_at_least_this_much_ram = collections.defaultdict(int)
total_users = 0
for row in self.qm.query(CustomDimension.Ram):
val = str_to_int(row[1])
total_users += val
ram_class = int(row[0])
if ram_class >= 2:
users_with_at_least_this_much_ram["2GB"] += val
if ram_class >= 4:
users_with_at_least_this_much_ram["4GB"] += val
if ram_class >= 8:
users_with_at_least_this_much_ram["8GB"] += val
if ram_class >= 16:
users_with_at_least_this_much_ram["16GB"] += val
if ram_class >= 32:
users_with_at_least_this_much_ram["32GB"] += val
return counts_to_percents(users_with_at_least_this_much_ram, total_users)
def gpu_manufacturers(self) -> Dict[str, int]:
out = defaultdict(int)
for row in self.qm.query(CustomDimension.Gpu):
out[get_gpu_manufacturer(row[0])] += str_to_int(row[1])
out = counts_to_percents(out)
with suppress(KeyError):
if out['Unknown'] < 0.3:
del out['Unknown']
return out
def gpu_generation(self) -> Dict[str, int]:
out = defaultdict(int)
for row in self.qm.query(CustomDimension.Gpu):
out[get_gpu_generation(row[0])] += str_to_int(row[1])
return counts_to_percents(out)
def gpu_platform(self) -> Dict[str, int]:
out = defaultdict(int)
for row in self.qm.query(CustomDimension.Gpu):
out[get_mobile_versus_desktop(row[0])] += str_to_int(row[1])
return counts_to_percents(out)
def vr_headsets(self):
known_headsets = {
'rift': 'Oculus Rift',
'oculus': 'Oculus Rift',
'pimax 5k': 'Pimax 5K',
'psvr': 'PSVR Headset',
'windows': 'Windows Mixed Reality',
'lighthouse': 'OpenVR (like HTC Vive)',
'vive': 'OpenVR (like HTC Vive)',
'aapvr': 'Phone',
'vridge': 'Phone',
'ivry': 'Phone',
'phonevr': 'Phone',
}
headset_count = collections.defaultdict(int)
for row in self.qm.query(CustomDimension.VrHeadset):
label = row[0]
for search_term, deduped_name in known_headsets.items():
if search_term in label.lower():
label = deduped_name
break
else:
logging.debug('unknown headset: ' + label)
headset_count[label] += str_to_int(row[1])
return counts_to_percents(headset_count, smush_into_other_below_percent=1)
def vr_usage(self):
vr_start_date = Version.v1120r4.value.start_date
total_users = sum(str_to_int(row[1]) for row in self.qm.query(CustomDimension.Ram, override_start_date=vr_start_date))
vr_users = sum(str_to_int(row[1]) for row in self.qm.query(CustomDimension.VrHeadset, override_start_date=vr_start_date))
vr_pct = round((vr_users / total_users) * 100, 2)
return {
'Have Used VR': vr_pct,
'2-D Monitor Only': 100 - vr_pct
}
@property
def total_users(self):
ram_data = self.qm.query(CustomDimension.Ram)
return sum(str_to_int(row[1]) for row in ram_data)
class HardwareGrapher:
def __init__(self, stats: HardwareStats):
self.stats = stats
def operating_systems(self) -> plotly.graph_objs.Figure:
return make_pie_chart_figure(self.stats.operating_systems())
def ram_amounts(self) -> plotly.graph_objs.Figure:
return make_bar_chart_figure(self.stats.ram_amounts(), 'Users with at Least <em>x</em> GB RAM', make_x_label=lambda l: str(l) + '+')
def gpu_mobile_vs_desktop(self) -> plotly.graph_objs.Figure:
return make_pie_chart_figure(self.stats.gpu_platform())
def gpu_manufacturers(self) -> plotly.graph_objs.Figure:
return make_bar_chart_figure(self.stats.gpu_manufacturers(), 'GPU Manufacturers')
def vr_headsets(self) -> plotly.graph_objs.Figure:
return make_bar_chart_figure(self.stats.vr_headsets(), 'VR Headsets', already_sorted=True, y_label='% VR Users')
def vr_usage(self) -> plotly.graph_objs.Figure:
return make_pie_chart_figure(self.stats.vr_usage(), top_pad_px=40)
def perform_os_analysis(stats: HardwareStats, grapher: HardwareGrapher):
# Overall platform breakdown
platform_count = stats.operating_systems()
_log("PLATFORM BREAKDOWN")
dump_generic_count_dict(platform_count, "Operating System", "Machines")
plotly.offline.plot(grapher.operating_systems(), image='png', image_filename='os_breakdown' + file_name_suffix, image_width=1024, output_type='file')
version_count = stats.operating_system_versions()
_log("OS VERSIONS")
dump_generic_count_dict(version_count["Windows"], "OS Version", "Windows Machines")
dump_generic_count_dict(version_count["Mac"], "OS Version", "Macs")
dump_generic_count_dict(version_count["Linux"], "OS Version", "Linux Machines")
def clean_up_string_formatting(string):
return str(string).strip()
def perform_cpu_analysis(results_rows):
def get_cpu_core_count(cpu_line):
stats = cpu_line.split(" - ")
for stat in stats:
if stat.startswith("Cores:"):
label_and_cores = stat.split(" ")
return int(label_and_cores[1])
return 0
cpu_cores = collections.defaultdict(int)
for row in results_rows:
val = str_to_int(row[1])
core_count = get_cpu_core_count(row[0])
cpu_cores[core_count] += val
_log("NUMBER OF CPU CORES")
dump_generic_count_dict(cpu_cores, "CPU Cores", "Machines")
def perform_vr_analysis(stats: HardwareStats, grapher: HardwareGrapher):
_log("VR USAGE")
dump_generic_count_dict(stats.vr_usage(), "VR Status", "Users")
_log("VR HEADSETS")
dump_generic_count_dict(stats.vr_headsets(), "Headset Type", "Users")
plotly.offline.plot(grapher.vr_usage(), image='png', image_filename='vr_usage' + file_name_suffix, image_width=1024, output_type='file')
plotly.offline.plot(grapher.vr_headsets(), image='png', image_filename='vr_headsets' + file_name_suffix, image_width=1024, output_type='file')
def get_gpu_manufacturer(gpu_string):
if lower_contains(gpu_string, ('firepro', 'firegl', 'radeon', 'amd ')) or gpu_string.startswith(('67EF', '67DF', 'ASUS EAH', 'ASUS R')):
return "AMD/ATI"
elif lower_contains(gpu_string, ('Quadro', 'GeForce', 'TITAN')) or gpu_string.startswith(('NVS ', 'NV1')):
return "Nvidia"
elif "Intel" in gpu_string:
return "Intel"
return "Unknown"
def get_gpu_generation(gpu_string):
gpu = gpu_string.lower()
if "quadro" in gpu:
return "Nvidia Quadro (All Generations)"
elif "firepro" in gpu or "firegl" in gpu:
return "AMD FirePro (All Generations)"
if "radeon" in gpu or "asus" in gpu:
for gen in [2, 3, 4, 5, 6, 7, 8, 9]:
gen = str(gen)
if "R" + gen + " M" in gpu_string:
return "Radeon R" + gen + "M"
elif "R" + gen + " " in gpu_string:
return "Radeon R" + gen
elif re.search(gen + "\d\d\dM", gpu_string) or ("Mobility" in gpu_string and re.search(gen + "\d\d\d", gpu_string)):
return "Radeon " + gen + "xxxM"
elif re.search(gen + "\d\d\d", gpu_string):
return "Radeon " + gen + "xxxM"
else:
return "Radeon (Other)"
elif "titan x" in gpu:
return "GeForce 9xx"
elif "titan" in gpu:
return "GeForce 7xx"
elif "geforce" in gpu:
for gen in [1, 2, 3, 4, 5, 6, 7, 8, 9]:
gen = str(gen)
base_radeon_re = "GeForce (G|GT|GTX|GTS)?\s*"
if re.search(base_radeon_re + gen + "\d\d\s*(Ti)?(\s|/)", gpu_string):
return "GeForce " + gen + "xx"
elif re.search(base_radeon_re + gen + "\d\dM", gpu_string):
return "GeForce " + gen + "xxM"
elif re.search(base_radeon_re + gen + "\d\d\d\s*(Ti)?(\s|/)", gpu_string):
return "GeForce " + gen + "xxx"
elif re.search(base_radeon_re + gen + "\d\d\dM", gpu_string):
return "GeForce " + gen + "xxxM"
else:
return "GeForce (Other)"
elif "intel" in gpu:
if any(ident in gpu for ident in ["gma", "gm45", "g41", "g45", "q45", "eaglelake", "4 series"]):
return "Intel Integrated (GMA or earlier)"
elif "hd" in gpu or "iris" in gpu:
if any(ident in gpu for ident in ["2000", "3000"]):
return "Intel Integrated (6th Generation; HD 2000/3000)"
elif any(ident in gpu for ident in ["4000", "4200", "4400", "4600", "4700", "5000", "5100", "5200"]):
return "Intel Integrated (7th Generation; HD 2500/4x00/5x00)"
elif any(ident in gpu_string for ident in ["5300", "5500", "5600", "5700", "6000", "6100", "6200", "6300"]):
return "Intel Integrated (8th Generation; HD 5x00/6x00)"
elif any(ident in gpu_string for ident in ["500", "505", "510", "515", "520", "530", "540", "550", "580"]):
return "Intel Integrated (9th Generation; HD 5xx)"
else:
return "Intel Integrated (5th Generation; HD)"
elif "sandybridge" in gpu:
return "Intel Integrated (6th Generation; HD 2000/3000)"
elif "haswell" in gpu or "ivybridge" in gpu or "bay trail" in gpu:
return "Intel Integrated (7th Generation; HD 2500/4x00/5x00)"
elif "broadwell" in gpu:
return "Intel Integrated (8th Generation; HD 5x00/6x00)"
elif "skylake" in gpu:
return "Intel Integrated (9th Generation; HD 5xx)"
elif "ironlake" in gpu:
return "Intel Integrated (5th Generation; HD)"
else:
return gpu_string
return "Other"
def get_mobile_versus_desktop(gpu_string):
gen = get_gpu_generation(gpu_string)
if gen.startswith("Intel"):
return "Intel"
elif gen.endswith("M"):
return "Mobile"
else:
return "Desktop"
def perform_gpu_analysis(stats: HardwareStats):
gpu_manufacturer = stats.gpu_manufacturers()
_log("GPU PLATFORM")
dump_generic_count_dict(stats.gpu_platform(), "GPU Platform", "Machines")
_log("GPU MANUFACTURER")
dump_generic_count_dict(gpu_manufacturer, "GPU Manufacturer", "Machines")
_log("GPU GENERATION")
dump_generic_count_dict(stats.gpu_generation(), "GPU Generation", "Machines")
with suppress(KeyError):
del gpu_manufacturer['Unknown']
make_bar_chart(gpu_manufacturer, 'gpu_manufacturer' + file_name_suffix, 'Manufacturer', needs_conversion_to_percents=False, height_scaling_factor=0.7)
def perform_ram_analysis(stats: HardwareStats):
users_with_at_least_this_much_ram = stats.ram_amounts()
_log("USERS WITH AT LEAST THIS MUCH RAM")
for ram_amount, value in users_with_at_least_this_much_ram.items():
_log(','.join([str(ram_amount), str(value)]))
_log("\n" * 3)
make_bar_chart(users_with_at_least_this_much_ram, 'ram_amounts' + file_name_suffix, 'RAM Amount', make_x_label=lambda l: str(l) + '+', height_scaling_factor=0.7)
def perform_flight_controls_analysis(results_rows):
known_yokes = [
"Saitek Pro Flight Yoke",
"Saitek X52",
"CH FLIGHT SIM YOKE",
"CH ECLIPSE YOKE",
"Pro Flight Cessna Yoke",
"PFC Cirrus Yoke",
"CH 3-Axis 10-Button POV USB Yoke",
]
known_sticks = [
"Logitech 3D Pro",
"T.Flight Hotas",
"T.Flight Stick X",
"Logitech Attack 3",
"Mad Catz F.L.Y.5 Stick",
"SideWinder Precision 2",
"T.16000M",
"SideWinder Force Feedback 2",
"Saitek Pro Flight X-55 Rhino Stick",
"Cyborg",
"Saitek Cyborg USB Stick",
"AV8R",
"Logitech Freedom 2.4",
"SideWinder Joystick",
"Mad Catz V.1 Stick",
"SideWinder Precision Pro",
"SideWinder 3D Pro",
"Logitech Force 3D Pro",
"WingMan Force 3D",
"Joystick - HOTAS Warthog",
"WingMan Extreme Digital 3D",
"WingMan Extreme 3D",
"Top Gun Afterburner",
"CH FLIGHTSTICK PRO",
"CH FIGHTERSTICK",
"CH COMBATSTICK",
"Saitek ST290",
"Saitek ST90",
"Top Gun Fox 2",
"Aviator for Playstation 3",
"Dark Tornado Joystick",
"Saitek X45",
"Saitek X36",
"USB Joystick",
"Pro Flight X65",
"G940",
"HOTAS Cougar Joystick",
"MetalStrik 3D",
"WingMan Attack 2"
]
known_controllers = [
"XBOX",
"Playstation(R)3 Controller",
"WingMan Cordless Gamepad",
"WingMan RumblePad",
"Logitech Dual Action",
"RumblePad 2",
"ASUS Gamepad",
"USB WirelessGamepad",
"Betop Controller",
"Logitech(R) Precision(TM) Gamepad",
"Wireless Gamepad F710"
]
known_rc_controllers = [
"InterLink Elite",
"RealFlight Interface"
]
def canonicalize_stick_or_yoke_name(flight_control_row):
flight_control_row = clean_up_string_formatting(flight_control_row)
if "Mouse" in flight_control_row:
return "Mouse"
elif "VID:1133PID:49685" in flight_control_row:
return "Logitech Extreme 3D"
elif "WingMan Ext Digital 3D" in flight_control_row:
return "WingMan Extreme Digital 3D"
elif "VID:1699PID:1890" in flight_control_row:
return "Saitek X52"
elif "Wireless 360 Controller" in flight_control_row:
return "XBOX"
elif "VID:121PID:6" in flight_control_row:
return "Generic USB Joystick"
elif "VID:1678PID:49402" in flight_control_row:
return "CH Products (Unknown)"
for control in known_yokes + known_sticks + known_controllers:
if control.lower() in flight_control_row.lower():
return control
if "," in flight_control_row:
return flight_control_row.replace(",", ";")
return flight_control_row
def classify_stick_or_yoke(flight_control_row):
flight_control_row = canonicalize_stick_or_yoke_name(flight_control_row)
if flight_control_row == "Mouse":
return "Mouse"
elif flight_control_row in known_yokes:
return "Yoke"
elif flight_control_row in known_sticks:
return "Joystick"
elif flight_control_row in known_controllers:
return "Gamepad"
elif flight_control_row in known_rc_controllers:
return "RC Controller"
elif "yoke" in flight_control_row.lower():
return "Yoke"
elif "stick" in flight_control_row.lower():
return "Joystick"
elif "pad" in flight_control_row.lower():
return "Gamepad"
else:
return "Unknown"
flight_controls = collections.defaultdict(int)
flight_control_type = collections.defaultdict(int)
has_rudder_pedals = collections.defaultdict(int)
for row in results_rows:
val = str_to_int(row[1])
flight_controls[canonicalize_stick_or_yoke_name(row[0])] += val
flight_control_type[classify_stick_or_yoke(row[0])] += val
row = clean_up_string_formatting(row[0])
if "rudder" in row.lower() or "pedals" in row.lower():
has_rudder_pedals[True] += val
else:
has_rudder_pedals[False] += val
nuke_these_keys = []
for controls, count in flight_controls.items():
if count < 5:
nuke_these_keys.append(controls)
for key in nuke_these_keys:
flight_controls["Other"] += flight_controls[key]
del flight_controls[key]
_log("PRIMARY FLIGHT CONTROLS TYPE")
dump_generic_count_dict(flight_control_type, "Flight Controls Type", "Users")
_log("PRIMARY FLIGHT CONTROLS MODEL (for non-mouse users)")
del flight_controls["Mouse"]
dump_generic_count_dict(flight_controls, "Flight Controls Model", "Users")
_log("USERS FLYING WITH PEDALS")
dump_generic_count_dict(has_rudder_pedals, "Has Pedals?", "Users")
def dump_generic_count_dict(dictionary, label, metric_category):
if SHOW_ABSOLUTE_NUMBERS:
_log(label + ",Num " + metric_category + ",% of All " + metric_category)
else:
_log(label + ",% of All " + metric_category)
total = total_entries_in_dict(dictionary)
sorted_dict = sorted(dictionary.items(), key=operator.itemgetter(1), reverse=True)
for i, label_and_count in enumerate(sorted_dict):
if SHOW_ABSOLUTE_NUMBERS:
_log(','.join([str(label_and_count[0]), str(label_and_count[1]), str((label_and_count[1] / total) * 100) + "%"]))
else:
# Coerce to ASCII
label = clean_up_string_formatting(label_and_count[0])
percent_str = clean_up_string_formatting(str((label_and_count[1] / total) * 100) + u"%")
_log(label, end="")
_log(",", end="")
_log(percent_str)
_log("\n" * 3)
def lower_contains(s: str, check: Iterable[str]) -> bool:
return any(sub.lower() in s.lower() for sub in check)
if __name__ == '__main__':
main()
| StarcoderdataPython |
39211 | <filename>016 3Sum Closest.py<gh_stars>100-1000
"""
Given an array S of n integers, find three integers in S such that the sum is closest to a given number, target. Return
the sum of the three integers. You may assume that each input would have exactly one solution.
For example, given array S = {-1 2 1 -4}, and target = 1.
The sum that is closest to the target is 2. (-1 + 2 + 1 = 2).
"""
__author__ = 'Danyang'
class Solution:
def threeSumClosest(self, num, target):
"""
Three pointers scanning algorithm
Similar to 014 3Sum
:param num: array
:param target: target
:return: sum of the three digits
"""
min_distance = 1<<32
num.sort()
min_summation = 0
for i, val in enumerate(num):
j = i+1
k = len(num)-1
while j<k:
lst = [val, num[j], num[k]]
if min_distance>abs(target-sum(lst)):
min_summation = sum(lst)
if sum(lst)==target:
return min_summation
min_distance = abs(target-min_summation)
elif sum(lst)>target:
k -= 1
else:
j += 1
return min_summation
if __name__=="__main__":
print Solution().threeSumClosest([1, 1, 1, 1], 0)
| StarcoderdataPython |
1750200 | <gh_stars>0
import rospy
import tf
import numpy as np
from matplotlib import pyplot as plt
class VSCaleCalibrator(object):
def __init__(self):
rospy.init_node('vscale_calibrator')
self._tfl = tf.TransformListener()
self._data = [] # (timestamp, distance)
self._t0 = rospy.Time.now()
def step(self):
try:
t, q = self._tfl.lookupTransform('map', 'camera_link', rospy.Time(0))
x, y = t[0], t[1]
d = np.sqrt(x**2 + y**2)
time = rospy.Time.now()
print (time - self._t0).to_sec()
self._data.append( ((time - self._t0).to_sec(), d) )
except Exception:
#print 'life is terrible'
pass
def show(self):
data = np.asarray(self._data, dtype=np.float32)
#print data[:,0]
plt.plot(data[:,0] - data[0,0], data[:,1])
plt.show()
#print self._data
#print 'shutdown'
def run(self):
rate = rospy.Rate(100)
rospy.on_shutdown(self.show)
while self._t0.to_sec() == 0:
self._t0 = rospy.Time.now()
while not rospy.is_shutdown():
self.step() # << where stuff happens
rate.sleep()
def main():
app = VSCaleCalibrator()
app.run()
if __name__ == "__main__":
main()
| StarcoderdataPython |
4936820 | # -*- coding: utf-8 -*-
"""
This is for database upgrades, you can ignore it and preferably don't change anything.
"""
from sys import stderr
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import BaseUserManager
from django.db import migrations
def create_admin(apps, schema_editor):
Learner = apps.get_model('learners', 'Learner')
email = BaseUserManager.normalize_email('admin<EMAIL>')
admin = Learner(
email = email,
is_staff = True,
is_superuser = True,
name = 'Admin',
need_active_update = True,
minimum_delay = 3,
new_count = 5
)
admin.password = make_password('<PASSWORD>')
admin.save()
stderr.write('\n*********************************************************************\nA user with email "admin<EMAIL>" and password "<PASSWORD>" was created.\nYou should update the password!\n*********************************************************************\n')
class Migration(migrations.Migration):
dependencies = [
('learners', '0009_current_active'),
]
operations = [
migrations.RunPython(create_admin)
]
| StarcoderdataPython |
5176101 | n,k,x = list(map(int,input().split()))
l = list(map(int,input().split()))
l=sorted(l)
ans = []
for i in range(n):
if l[i]-l[i-1]>x:
ans.append((l[i]-l[i-1]-1)//x)
ans = sorted(ans)[::-1]
t = len(ans)
while t:
if ans[t-1] <= k:
k-=ans[t-1]
t-=1
else:
break
final=t+1
print(final) | StarcoderdataPython |
9614305 | <filename>frappe-bench/apps/erpnext/erpnext/config/setup.py
from __future__ import unicode_literals
from frappe import _
from frappe.desk.moduleview import add_setup_section
def get_data():
data = [
{
"label": _("Settings"),
"icon": "fa fa-wrench",
"items": [
{
"type": "doctype",
"name": "Global Defaults",
"label": _("Global Settings"),
"description": _("Set Default Values like Company, Currency, Current Fiscal Year, etc."),
"hide_count": True
}
]
},
{
"label": _("Printing"),
"icon": "fa fa-print",
"items": [
{
"type": "doctype",
"name": "Letter Head",
"description": _("Letter Heads for print templates.")
},
{
"type": "doctype",
"name": "Print Heading",
"description": _("Titles for print templates e.g. Proforma Invoice.")
},
{
"type": "doctype",
"name": "Address Template",
"description": _("Country wise default Address Templates")
},
{
"type": "doctype",
"name": "Terms and Conditions",
"description": _("Standard contract terms for Sales or Purchase.")
},
]
},
{
"label": _("Help"),
"items": [
{
"type": "help",
"name": _("Data Import and Export"),
"youtube_id": "6wiriRKPhmg"
},
{
"type": "help",
"label": _("Setting up Email"),
"youtube_id": "YFYe0DrB95o"
},
{
"type": "help",
"label": _("Printing and Branding"),
"youtube_id": "cKZHcx1znMc"
},
{
"type": "help",
"label": _("Users and Permissions"),
"youtube_id": "fnBoRhBrwR4"
},
{
"type": "help",
"label": _("Workflow"),
"youtube_id": "yObJUg9FxFs"
},
]
},
{
"label": _("Customize"),
"icon": "fa fa-glass",
"items": [
{
"type": "doctype",
"name": "Authorization Rule",
"description": _("Create rules to restrict transactions based on values.")
},
{
"type": "doctype",
"name": "Notification Control",
"label": _("Email Notifications"),
"description": _("Automatically compose message on submission of transactions.")
}
]
},
{
"label": _("Email"),
"icon": "fa fa-envelope",
"items": [
{
"type": "doctype",
"name": "Feedback Trigger",
"label": _("Feedback Trigger"),
"description": _("Automatically triggers the feedback request based on conditions.")
},
{
"type": "doctype",
"name": "Email Digest",
"description": _("Create and manage daily, weekly and monthly email digests.")
},
{
"type": "doctype",
"name": "SMS Settings",
"description": _("Setup SMS gateway settings")
},
]
}
]
for module, label, icon in (
("accounts", _("Accounts"), "fa fa-money"),
("stock", _("Stock"), "fa fa-truck"),
("selling", _("Selling"), "fa fa-tag"),
("buying", _("Buying"), "fa fa-shopping-cart"),
("hr", _("Human Resources"), "fa fa-group"),
("support", _("Support"), "fa fa-phone")):
add_setup_section(data, "erpnext", module, label, icon)
return data
| StarcoderdataPython |
225148 | <reponame>peombwa/Sample-Graph-Python-Client<gh_stars>0
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MicrosoftgraphloggedOnUser(Model):
"""loggedOnUser.
:param user_id: User id
:type user_id: str
:param last_log_on_date_time: Date time when user logs on
:type last_log_on_date_time: datetime
"""
_attribute_map = {
'user_id': {'key': 'userId', 'type': 'str'},
'last_log_on_date_time': {'key': 'lastLogOnDateTime', 'type': 'iso-8601'},
}
def __init__(self, user_id=None, last_log_on_date_time=None):
super(MicrosoftgraphloggedOnUser, self).__init__()
self.user_id = user_id
self.last_log_on_date_time = last_log_on_date_time
| StarcoderdataPython |
1857567 | <gh_stars>0
import os
import json
import logging
from pipeline_model import TensorFlowServingModel
from pipeline_monitor import prometheus_monitor as monitor
from pipeline_logger import log
import tensorflow as tf
import requests
from PIL import Image
from io import StringIO, BytesIO
_logger = logging.getLogger('pipeline-logger')
_logger.setLevel(logging.INFO)
_logger_stream_handler = logging.StreamHandler()
_logger_stream_handler.setLevel(logging.INFO)
_logger.addHandler(_logger_stream_handler)
__all__ = ['predict']
_labels= {'model_runtime': 'tfserving',
'model_type': 'tensorflow',
'model_name': 'inception',
'model_tag': 'cpu',
'model_chip': 'cpu',
}
def _initialize_upon_import() -> TensorFlowServingModel:
''' Initialize / Restore Model Object.
'''
return TensorFlowServingModel(host='localhost',
port=9000,
model_name='inception',
model_signature_name='predict_images',
timeout_seconds=10.0)
# This is called unconditionally at *module import time*...
_model = _initialize_upon_import()
# https://www.tensorflow.org/serving/serving_inception
@log(labels=_labels, logger=_logger)
def predict(request: bytes) -> bytes:
'''Where the magic happens...'''
with monitor(labels=_labels, name="transform_request"):
transformed_request = _transform_request(request)
with monitor(labels=_labels, name="predict"):
predictions = _model.predict(transformed_request)
with monitor(labels=_labels, name="transform_response"):
transformed_response = _transform_response(predictions)
return transformed_response
def _transform_request(request: bytes) -> dict:
# Convert from bytes to tf.tensor, np.array, etc.
# This needs to be a JPEG
request_str = request.decode('utf-8')
request_json = json.loads(request_str)
# image_url = request_json['image_url']
# image = Image.open(requests.get(image_url, stream=True).raw)
image_response = requests.get(request_json['image_url'])
# with BytesIO(image_response.content) as f:
# with Image.open(f) as img:
# print(img)
# image = img
# image_file_path = '%s/images/fregly_avatar.jpg' % os.environ['PIPELINE_INPUT_PATH']
from datetime import datetime
version = int(datetime.now().strftime("%s"))
image_file_path = 'blah-%s.jpg' % version
with open(image_file_path, 'wb') as f:
f.write(image_response.content)
with open(image_file_path, 'rb') as f:
image = f.read()
# TODO: https://towardsdatascience.com/tensorflow-serving-client-make-it-slimmer-and-faster-b3e5f71208fb
# https://github.com/Vetal1977/tf_serving_example/tree/master/tensorflow/core/framework
#image_tensor = tf.make_tensor_proto(image)
# shape=[1])
# NEW STUFF - pipeline_model==1.10+
# Replacement for tf.make_tensor_proto(image, shape=[1])
# Create TensorProto object for a request
#
#from tensorflow.core.framework import tensor_pb2
#from tensorflow.core.framework import tensor_shape_pb2
#from tensorflow.core.framework import types_pb2
#
#dims = [tensor_shape_pb2.TensorShapeProto.Dim(size=1)]
#tensor_shape_proto = tensor_shape_pb2.TensorShapeProto(dim=dims)
#image_tensor_proto = tensor_pb2.TensorProto(dtype=types_pb2.DT_STRING,
# tensor_shape=tensor_shape_proto,
# string_val=[image])
#
image_tensor_proto = tf.make_tensor_proto(image,
shape=[1])
return {"images": image_tensor_proto}
def _transform_response(response: dict) -> json:
# Convert from tf.tensor, np.array, etc. to bytes
# TODO: Optimize this to avoid tf.make_ndarray similar to _transform_request() above
class_list = tf.make_ndarray(response['classes']).tolist()[0]
class_list_str = [clazz.decode('utf-8') for clazz in class_list]
score_list = tf.make_ndarray(response['scores']).tolist()[0]
return {"classes": class_list_str,
"scores": score_list}
#predict(b'{"image_url": "https://avatars1.githubusercontent.com/u/1438064?s=460&v=4"}')
| StarcoderdataPython |
1848785 | import re
from exceptions.availability_checker_exception import AvailabilityCheckerException
class Client:
def __init__(self, name, mobile, email, pincode):
self.name = name
self.mobile = mobile
self.email = email
self.pincode = pincode
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def mobile(self):
return self._mobile
@mobile.setter
def mobile(self, mobile_number):
mobile_pattern = re.compile('[6-9]\d{9}')
if mobile_pattern.match(mobile_number):
self._mobile = mobile_number
else:
raise AvailabilityCheckerException("Invalid mobile number.")
@property
def email(self):
return self._email
@email.setter
def email(self, email_id):
email_pattern = re.compile(
'^(\w|\.|\_|\-)+[@](\w|\_|\-|\.)+[.]\w{2,3}$')
if email_pattern.match(email_id):
self._email = email_id
else:
raise AvailabilityCheckerException("Invalid Email ID.")
@property
def pincode(self):
return self._pincode
@pincode.setter
def pincode(self, pincode_candidate):
pincode_pattern = re.compile('\d{6}')
if re.match(pincode_pattern, pincode_candidate):
self._pincode = pincode_candidate
else:
raise AvailabilityCheckerException("Invalid pincode.")
| StarcoderdataPython |
3502614 | <reponame>mitsuhiko/pip
#! /usr/bin/env python
# Hi There!
# You may be wondering what this giant blob of binary data here is, you might
# even be worried that we're up to something nefarious (good for you for being
# paranoid!). It is a base64 encoded bz2 stream that was stored using the
# pickle module.
#
# Pip is a thing that installs packages, pip itself is a package that someone
# might want to install, especially if they're looking to run this get-pip.py
# script. Pip has a lot of code to deal with the security of installing
# packages, various edge cases on various platforms, and other such sort of
# "tribal knowledge" that has been encoded in it's code base. Because of this
# we basically include an entire copy of pip inside this blob. We do this
# because the alternatives are attempt to implement a "minipip" that probably
# doesn't do things correctly and has weird edge cases, or compress pip itself
# down into a single file.
#
# If you're wondering how this is created, the secret is
# "contrib/build-installer" from the pip repository.
sources = """
@SOURCES@"""
import codecs
import os
import sys
import base64
import bz2
import tempfile
import shutil
def unpack(sources):
temp_dir = tempfile.mkdtemp('-scratchdir', 'unpacker-')
for package, content in sources.items():
filepath = package.split("/")
dirpath = os.sep.join(filepath[:-1])
packagedir = os.path.join(temp_dir, dirpath)
if not os.path.isdir(packagedir):
os.makedirs(packagedir)
mod = open(os.path.join(packagedir, filepath[-1]), 'wb')
try:
mod.write(base64.b64decode(content))
finally:
mod.close()
return temp_dir
if __name__ == "__main__":
if sys.version_info >= (3, 0):
exec("def do_exec(co, loc): exec(co, loc)\n")
import pickle
sources = sources.encode("ascii") # ensure bytes
sources = pickle.loads(bz2.decompress(base64.decodebytes(sources)))
else:
import cPickle as pickle
exec("def do_exec(co, loc): exec co in loc\n")
sources = pickle.loads(bz2.decompress(base64.decodestring(sources)))
try:
temp_dir = unpack(sources)
sys.path.insert(0, temp_dir)
entry = """@ENTRY@"""
do_exec(entry, locals())
finally:
shutil.rmtree(temp_dir)
| StarcoderdataPython |
1731800 | import scrapy
from OnlineParticipationDataset import items
from OnlineParticipationDataset.spiders.Bonn2017Spider import Bonn2017Spider
from datetime import datetime
import re
import locale
class Bonn2019Spider(Bonn2017Spider):
name = "bonn2019"
start_urls = ['https://www.bonn-macht-mit.de/node/2900']
def __init__(self, *args, **kwargs):
super(Bonn2019Spider, self).__init__(*args, **kwargs)
| StarcoderdataPython |
296420 | from django.conf.urls import include, url
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.test import TestCase
from django.views.generic import View
from ralph.lib.permissions.views import PermissionViewMetaClass
class SimplePermissionView(View, metaclass=PermissionViewMetaClass):
"""Simple Django generic views class for permission view test."""
def get(self, request):
return HttpResponse('ok')
urls = [url(
r'^test-view-permissions/',
SimplePermissionView.as_view(),
name='test-view-permissions'
)]
class PermissionsByFieldTestCase(TestCase):
def setUp(self):
self.codename = 'can_view_extra_simplepermissionview'
self.user_not_perm = get_user_model().objects.create_user(
username='user_not_perm',
password='password',
)
self.user_with_perm = get_user_model().objects.create_user(
username='user_with_perm',
password='password',
)
self.user_with_perm.user_permissions.add(
Permission.objects.get(codename=self.codename)
)
self.root = get_user_model().objects.create_superuser(
username='root',
password='password',
email='<EMAIL>'
)
self.url = reverse('test-view-permissions')
def test_codename(self):
self.assertEqual(
SimplePermissionView.permision_codename,
self.codename
)
def test_user_not_perm(self):
self.client.login(
username=self.user_not_perm.username, password='password'
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 403)
def test_user_with_perm(self):
self.client.login(
username=self.user_with_perm.username, password='password'
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_user_root(self):
self.client.login(
username=self.root.username, password='password'
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
| StarcoderdataPython |
5198051 | <reponame>Tenebrar/codebase
# Generated by Django 2.0.6 on 2018-08-21 22:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('charsheet', '0022_character_experience'),
]
operations = [
migrations.AddField(
model_name='race',
name='burrow_speed',
field=models.PositiveIntegerField(default=0, help_text='expressed in feet'),
),
migrations.AddField(
model_name='race',
name='fly_maneuverability',
field=models.CharField(default='', max_length=32),
),
migrations.AddField(
model_name='race',
name='fly_speed',
field=models.PositiveIntegerField(default=0, help_text='expressed in feet'),
),
migrations.AddField(
model_name='race',
name='swim_speed',
field=models.PositiveIntegerField(default=0, help_text='expressed in feet'),
),
]
| StarcoderdataPython |
366643 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="torchtrain",
version="0.4.13",
author="HQ",
author_email="<EMAIL>",
description="A small tool for PyTorch training",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/idorce/torchtrain",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
python_requires=">=3.6",
keywords="pytorch machine learning train",
install_requires=["tqdm", "torch", "numpy", "tensorboard"],
)
| StarcoderdataPython |
1693062 | <gh_stars>0
# coding: utf-8
"""
FINBOURNE Insights API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.0.238
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from finbourne_insights.configuration import Configuration
class CreateAuditEntry(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'process': 'AuditProcess',
'data': 'AuditData'
}
attribute_map = {
'process': 'process',
'data': 'data'
}
required_map = {
'process': 'required',
'data': 'required'
}
def __init__(self, process=None, data=None, local_vars_configuration=None): # noqa: E501
"""CreateAuditEntry - a model defined in OpenAPI"
:param process: (required)
:type process: finbourne_insights.AuditProcess
:param data: (required)
:type data: finbourne_insights.AuditData
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._process = None
self._data = None
self.discriminator = None
self.process = process
self.data = data
@property
def process(self):
"""Gets the process of this CreateAuditEntry. # noqa: E501
:return: The process of this CreateAuditEntry. # noqa: E501
:rtype: finbourne_insights.AuditProcess
"""
return self._process
@process.setter
def process(self, process):
"""Sets the process of this CreateAuditEntry.
:param process: The process of this CreateAuditEntry. # noqa: E501
:type process: finbourne_insights.AuditProcess
"""
if self.local_vars_configuration.client_side_validation and process is None: # noqa: E501
raise ValueError("Invalid value for `process`, must not be `None`") # noqa: E501
self._process = process
@property
def data(self):
"""Gets the data of this CreateAuditEntry. # noqa: E501
:return: The data of this CreateAuditEntry. # noqa: E501
:rtype: finbourne_insights.AuditData
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this CreateAuditEntry.
:param data: The data of this CreateAuditEntry. # noqa: E501
:type data: finbourne_insights.AuditData
"""
if self.local_vars_configuration.client_side_validation and data is None: # noqa: E501
raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501
self._data = data
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateAuditEntry):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CreateAuditEntry):
return True
return self.to_dict() != other.to_dict()
| StarcoderdataPython |
11258059 | <reponame>71unxv/Python_DTG
def doublePower(x) :
hasil = x ** x
return hasil
result = doublePower(2)
print(result) # 4
result = doublePower(3)
print(result) # 27
result = doublePower(4)
print(result)
| StarcoderdataPython |
1741455 | <filename>Exercise05/5-31.py
'''
@Date: 2019-11-02 10:28:29
@Author: ywyz
@LastModifiedBy: ywyz
@Github: https://github.com/ywyz
@LastEditors: ywyz
@LastEditTime: 2019-11-10 00:22:14
'''
year = eval(input("Enter the year: "))
day = eval(input("Enter the day of the week: "))
for months in range(1, 13):
if months == 1:
month = "January"
dayOfMonths = 31
firstday = day
total = firstday
elif months == 2:
month = "February"
if (year % 400 == 0 or (year % 4 == 0 and year % 100 != 0)):
dayOfMonths = 29
else:
dayOfMonths = 28
elif months == 3:
month = "March"
dayOfMonths = 31
elif months == 4:
month = "April"
dayOfMonths = 30
elif months == 5:
month = "May"
dayOfMonths = 31
elif months == 6:
month = "June"
dayOfMonths = 30
elif months == 7:
month = "July"
dayOfMonths = 31
elif months == 8:
month = "Augest"
dayOfMonths = 31
elif months == 9:
month = "September"
dayOfMonths = 30
elif months == 10:
month = "October"
dayOfMonths = 31
elif months == 11:
month = "November"
dayOfMonths = 30
elif months == 12:
month = "December"
dayOfMonths = 31
print(" ", month)
print("-----------------------------")
print(" Sun Mon Tue Wed Thu Fri Sat")
i = 0
for i in range(firstday):
print(" ", end="")
for i in range(1, dayOfMonths + 1):
if (i < 10):
print(" " + str(i), end="")
else:
print(" " + str(i), end="")
if ((i + firstday) % 7 == 0):
print()
firstday = (firstday + dayOfMonths) % 7
total = 0
print('\n')
| StarcoderdataPython |
11222943 | <reponame>chandrakant1991/python0123
#Assignment
#create dictionary of user data
dict_03 = {
'Name': '<NAME>',
'User name': '<EMAIL>',
'Password': <PASSWORD>,
'Address': 'maji sainik nagar,Yerwada,pune-6,',
'Mobile No': 9637646900,
'Security Question': 'What is your Favourite Game'
}
print(dict_03)
dic_04 = {
'Name': '<NAME>',
'User name': '<EMAIL>',
'Password': <PASSWORD>,
'Address': 'vishrantwadi pune',
'Mobile No': 9036678400,
}
print(dic_04) | StarcoderdataPython |
6419383 | # https://github.com/lucidrains/vit-pytorch/blob/main/vit_pytorch/vit_pytorch.py
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import nn
MIN_NUM_PATCHES = 16
defaultcfg = {
# 6 : [512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512]
# 6 : [[510, 375, 512, 443], [512, 399, 479, 286], [511, 367, 370, 196], [512, 404, 111, 95], [512, 425, 60, 66], [512, 365, 356, 223]]
6 : [[360, 512], [408, 479], [360, 370], [408, 111], [432, 60], [360, 356]]
}
class channel_selection(nn.Module):
def __init__(self, num_channels):
"""
Initialize the `indexes` with all one vector with the length same as the number of channels.
During pruning, the places in `indexes` which correpond to the channels to be pruned will be set to 0.
"""
super(channel_selection, self).__init__()
self.indexes = nn.Parameter(torch.ones(num_channels))
def forward(self, input_tensor):
"""
Parameter
---------
input_tensor: (B, num_patches + 1, dim).
"""
output = input_tensor.mul(self.indexes)
return output
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
# self.net = nn.Sequential(
# nn.Linear(dim, hidden_dim),
# nn.GELU(),
# nn.Dropout(dropout),
# nn.Linear(hidden_dim, dim),
# nn.Dropout(dropout)
# )
self.net1 = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout)
)
self.net2 = nn.Sequential(
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout)
)
# self.select1 = channel_selection(dim)
# self.select2 = channel_selection(dim)
def forward(self, x):
# pruning torch.Size([4, 65, 512])
# x = self.select1(x)
x = self.net1(x)
# pruning torch.Size([4, 65, 512])
# x = self.select2(x)
x = self.net2(x)
return x
# return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, dim1, heads = 8, dropout = 0.):
super().__init__()
self.heads = heads
self.scale = dim1 ** -0.5
# self.to_qkv = nn.Linear(dim, dim1 * 3, bias = False)
self.to_q = nn.Linear(dim, dim1 , bias = False)
self.to_k = nn.Linear(dim, dim1, bias = False)
self.to_v = nn.Linear(dim, dim1, bias = False)
self.to_out = nn.Sequential(
nn.Linear(dim1, dim),
nn.Dropout(dropout)
)
# self.select1 = channel_selection(dim1)
# self.select2 = channel_selection(dim2)
def forward(self, x, mask = None):
b, n, _, h = *x.shape, self.heads
# pruning torch.Size([4, 65, 512])
# x = self.select1(x)
q = self.to_q(x)
# q = self.select1(q)
q = rearrange(q, 'b n (h d) -> b h n d', h = h)
k = self.to_k(x)
# k = self.select1(k)
k = rearrange(k, 'b n (h d) -> b h n d', h = h)
v = self.to_v(x)
# v = self.select1(v)
v = rearrange(v, 'b n (h d) -> b h n d', h = h)
# qkv = self.to_qkv(x).chunk(3, dim = -1)
# q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale
if mask is not None:
mask = F.pad(mask.flatten(1), (1, 0), value = True)
assert mask.shape[-1] == dots.shape[-1], 'mask has incorrect dimensions'
mask = mask[:, None, :] * mask[:, :, None]
dots.masked_fill_(~mask, float('-inf'))
del mask
attn = dots.softmax(dim=-1)
out = torch.einsum('bhij,bhjd->bhid', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
# pruning torch.Size([4, 65, 512])
# out = self.select2(out)
out = self.to_out(out)
return out
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, mlp_dim, dropout, cfg):
super().__init__()
self.layers = nn.ModuleList([])
if cfg is not None:
for num in cfg:
self.layers.append(nn.ModuleList([
Residual(PreNorm(dim, Attention(dim, num[0], heads = heads, dropout = dropout))),
Residual(PreNorm(dim, FeedForward(dim, num[1], dropout = dropout)))
]))
else:
for _ in range(depth):
self.layers.append(nn.ModuleList([
Residual(PreNorm(dim, Attention(dim, dim, heads = heads, dropout = dropout))),
Residual(PreNorm(dim, FeedForward(dim, dim, dropout = dropout)))
]))
def forward(self, x, mask = None):
for attn, ff in self.layers:
x = attn(x, mask = mask)
x = ff(x)
return x
class ViT_slim(nn.Module):
def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, cfg=None, channels = 3, dropout = 0., emb_dropout = 0.):
super().__init__()
assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size'
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
assert num_patches > MIN_NUM_PATCHES, f'your number of patches ({num_patches}) is way too small for attention to be effective. try decreasing your patch size'
self.patch_size = patch_size
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
self.patch_to_embedding = nn.Linear(patch_dim, dim)
self.cls_token = nn.Parameter(torch.randn(1, 1, dim))
self.dropout = nn.Dropout(emb_dropout)
self.transformer = Transformer(dim, depth, heads, mlp_dim, dropout, cfg)
self.to_cls_token = nn.Identity()
self.mlp_head = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, mlp_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(mlp_dim, num_classes)
)
def forward(self, img, mask = None):
p = self.patch_size
x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
x = self.patch_to_embedding(x)
b, n, _ = x.shape
cls_tokens = self.cls_token.expand(b, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x += self.pos_embedding[:, :(n + 1)]
x = self.dropout(x)
x = self.transformer(x, mask)
x = self.to_cls_token(x[:, 0])
return self.mlp_head(x)
def setup_seed(seed):
torch.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
# np.random.seed(seed)
# random.seed(seed)
torch.backends.cudnn.deterministic = True
if __name__ == "__main__":
# setup_seed(200)
b,c,h,w = 4, 3, 32, 32
x = torch.randn(b, c, h, w)
net = ViT(
image_size = 32,
patch_size = 4,
num_classes = 10,
dim = 512,
depth = 6,
heads = 8,
mlp_dim = 512,
dropout = 0.1,
emb_dropout = 0.1
)
y = net(x)
# print(y)
print(y.size()) | StarcoderdataPython |
4822052 | class UserNotInGroupError(Exception):
"""Raised when the user doesn't have access to the related group."""
def __init__(self, user=None, group=None, *args, **kwargs):
if user and group:
super().__init__(f'User {user} doesn\'t belong to group {group}.', *args,
**kwargs)
else:
super().__init__('The user doesn\'t belong to the group', *args, **kwargs)
class GroupDoesNotExist(Exception):
"""Raised when trying to get a group that does not exist."""
class ApplicationDoesNotExist(Exception):
"""Raised when trying to get an application that does not exist."""
class InstanceTypeAlreadyRegistered(Exception):
"""
Raised when the instance model instance is already registered in the registry.
"""
class InstanceTypeDoesNotExist(Exception):
"""
Raised when a requested instance model instance isn't registered in the registry.
"""
class ApplicationTypeAlreadyRegistered(InstanceTypeAlreadyRegistered):
pass
class ApplicationTypeDoesNotExist(InstanceTypeDoesNotExist):
pass
| StarcoderdataPython |
6404036 | <reponame>theandygross/CancerData
__author__ = 'agross'
| StarcoderdataPython |
4852122 | <gh_stars>10-100
# Copyright (c) 2016 iXsystems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for iXsystems iSCSI storage systems.
This driver requires iXsystems storage systems with installed iSCSI licenses.
"""
import simplejson as json
from cinder.volume import driver
from cinder.volume.drivers.ixsystems import common
from cinder.volume.drivers.ixsystems.options import ixsystems_basicauth_opts
from cinder.volume.drivers.ixsystems.options import ixsystems_connection_opts
from cinder.volume.drivers.ixsystems.options import ixsystems_provisioning_opts
from cinder.volume.drivers.ixsystems.options import ixsystems_transport_opts
from cinder.volume.drivers.ixsystems import utils as ix_utils
from oslo_config import cfg
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(ixsystems_connection_opts)
CONF.register_opts(ixsystems_transport_opts)
CONF.register_opts(ixsystems_basicauth_opts)
CONF.register_opts(ixsystems_provisioning_opts)
class FreeNASISCSIDriver(driver.ISCSIDriver):
"""FREENAS iSCSI volume driver."""
VERSION = "2.0.0"
IGROUP_PREFIX = 'openstack-'
required_flags = ['ixsystems_transport_type', 'ixsystems_login',
'ixsystems_password', 'ixsystems_server_hostname',
'ixsystems_server_port', 'ixsystems_server_iscsi_port',
'ixsystems_volume_backend_name', 'ixsystems_vendor_name',
'ixsystems_storage_protocol', 'ixsystems_datastore_pool',
'ixsystems_dataset_path', 'ixsystems_iqn_prefix', ]
def __init__(self, *args, **kwargs):
"""Initialize FreeNASISCSIDriver Class."""
LOG.info('iXsystems: Init Cinder Driver')
super(FreeNASISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(ixsystems_connection_opts)
self.configuration.append_config_values(ixsystems_basicauth_opts)
self.configuration.append_config_values(ixsystems_transport_opts)
self.configuration.append_config_values(ixsystems_provisioning_opts)
self.configuration.ixsystems_iqn_prefix += ':'
self.common = common.TrueNASCommon(configuration=self.configuration)
self.stats = {}
def check_for_setup_error(self):
"""Check for iXsystems FREENAS configuration parameters."""
LOG.info('iXSystems: Check For Setup Error')
self.common._check_flags()
def do_setup(self, context):
"""Setup iXsystems FREENAS driver.
Check for configuration flags and setup iXsystems FREENAS client
"""
LOG.info('iXsystems Do Setup')
# TODO:add check to see if volume exist, able to connect
# truenas array
self.check_for_setup_error()
self.common._do_custom_setup()
def create_volume(self, volume):
"""Creates a volume of specified size and export it as iscsi target."""
LOG.info('iXsystems Create Volume')
LOG.debug('create_volume : volume name :: %s', volume['name'])
freenas_volume = ix_utils.generate_freenas_volume_name(
volume['name'],
self.configuration.ixsystems_iqn_prefix)
LOG.debug('volume name after freenas generate : %s',
json.dumps(freenas_volume))
freenas_volume['size'] = volume['size']
freenas_volume['target_size'] = volume['size']
self.common._create_volume(freenas_volume['name'],
freenas_volume['size'])
# Remove LUN Creation from here,check at initi
self.common._create_iscsitarget(freenas_volume['target'],
freenas_volume['name'])
def delete_volume(self, volume):
"""Deletes volume and corresponding iscsi target."""
LOG.info('iXsystems Delete Volume')
LOG.debug('delete_volume %s', volume['name'])
freenas_volume = ix_utils.generate_freenas_volume_name(
volume['name'],
self.configuration.ixsystems_iqn_prefix)
if freenas_volume['target']:
self.common._delete_iscsitarget(freenas_volume['target'])
if freenas_volume['name']:
self.common._delete_volume(freenas_volume['name'])
def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume."""
LOG.info('iXsystems Create Export')
LOG.debug('create_export %s', volume['name'])
handle = self.common._create_export(volume['name'])
LOG.info('provider_location: %s', handle)
return {'provider_location': handle}
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
LOG.info('iXsystems Ensure Export')
LOG.debug('ensure_export %s', volume['name'])
handle = self.common._create_export(volume['name'])
LOG.info('provider_location: %s', handle)
return {'provider_location': handle}
def remove_export(self, context, volume):
"""Driver exntry point to remove an export for a volume.
we have nothing to do for unexporting.
"""
pass
def initialize_connection(self, volume, connector):
"""Driver entry point to attach a volume to an instance."""
LOG.info('iXsystems Initialise Connection')
freenas_volume = ix_utils.generate_freenas_volume_name(
volume['name'],
self.configuration.ixsystems_iqn_prefix)
if not freenas_volume['name']:
# is this snapshot?
freenas_volume = ix_utils.generate_freenas_snapshot_name(
volume['name'],
self.configuration.ixsystems_iqn_prefix)
properties = {}
properties['target_discovered'] = False
properties['target_portal'] = ix_utils.get_iscsi_portal(
self.configuration.ixsystems_server_hostname,
self.configuration.ixsystems_server_iscsi_port)
properties['target_iqn'] = freenas_volume['iqn']
properties['volume_id'] = volume['id']
LOG.debug('initialize_connection data: %s', properties)
return {'driver_volume_type': 'iscsi', 'data': properties}
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to detach a volume from an instance."""
pass
def create_snapshot(self, snapshot):
"""Driver entry point for creating a snapshot."""
LOG.info('iXsystems Create Snapshot')
LOG.debug('create_snapshot %s', snapshot['name'])
freenas_snapshot = ix_utils.generate_freenas_snapshot_name(
snapshot['name'], self.configuration.ixsystems_iqn_prefix)
freenas_volume = ix_utils.generate_freenas_volume_name(
snapshot['volume_name'], self.configuration.ixsystems_iqn_prefix)
self.common._create_snapshot(freenas_snapshot['name'],
freenas_volume['name'])
def delete_snapshot(self, snapshot):
"""Driver entry point for deleting a snapshot."""
LOG.info('iXsystems Delete Snapshot')
LOG.debug('delete_snapshot %s', snapshot['name'])
freenas_snapshot = ix_utils.generate_freenas_snapshot_name(
snapshot['name'],
self.configuration.ixsystems_iqn_prefix)
freenas_volume = ix_utils.generate_freenas_volume_name(
snapshot['volume_name'],
self.configuration.ixsystems_iqn_prefix)
self.common._delete_snapshot(freenas_snapshot['name'],
freenas_volume['name'])
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from snapshot."""
LOG.info('iXsystems Create Volume From Snapshot')
LOG.info('create_volume_from_snapshot %s', snapshot['name'])
existing_vol = ix_utils.generate_freenas_volume_name(
snapshot['volume_name'], self.configuration.ixsystems_iqn_prefix)
freenas_snapshot = ix_utils.generate_freenas_snapshot_name(
snapshot['name'], self.configuration.ixsystems_iqn_prefix)
freenas_volume = ix_utils.generate_freenas_volume_name(
volume['name'], self.configuration.ixsystems_iqn_prefix)
freenas_volume['size'] = volume['size']
freenas_volume['target_size'] = volume['size']
self.common._create_volume_from_snapshot(freenas_volume['name'],
freenas_snapshot['name'],
existing_vol['name'])
self.common._create_iscsitarget(freenas_volume['target'],
freenas_volume['name'])
def get_volume_stats(self, refresh=False):
"""Get stats info from volume group / pool."""
LOG.info('iXsystems Get Volume Status')
if refresh:
self.stats = self.common._update_volume_stats()
LOG.info('get_volume_stats: %s', self.stats)
return self.stats
def create_cloned_volume(self, volume, src_vref):
"""Creates a volume from source volume."""
LOG.info('iXsystems Create Cloned Volume')
LOG.info('create_cloned_volume: %s', src_vref['id'])
temp_snapshot = {'volume_name': src_vref['name'],
'name': 'name-c%s' % src_vref['id']}
self.create_snapshot(temp_snapshot)
self.create_volume_from_snapshot(volume, temp_snapshot)
# self.delete_snapshot(temp_snapshot)
# with API v2.0 this causes FreeNAS error
# "snapshot has dependent clones". Cannot delete while volume is
# active. Instead, added check and deletion of orphaned dependent
# clones in common._delete_volume()
def extend_volume(self, volume, new_size):
"""Driver entry point to extend an existing volumes size."""
LOG.info('iXsystems Extent Volume')
LOG.info('extend_volume %s', volume['name'])
freenas_volume = ix_utils.generate_freenas_volume_name(
volume['name'], self.configuration.ixsystems_iqn_prefix)
freenas_new_size = new_size
if volume['size'] != freenas_new_size:
self.common._extend_volume(freenas_volume['name'],
freenas_new_size)
| StarcoderdataPython |
3334153 | from __future__ import print_function
import torch.utils.data as data
import os
import sys
import numpy as np
import h5py
class MLMLoader(data.Dataset):
def __init__(self, data_path, partition, mismatch=0.5):
if data_path == None:
raise Exception('No data path specified.')
if partition is None or partition not in ['train', 'val', 'test']:
raise Exception('Unknown partition type %s.' % partition)
else:
self.partition = partition
self.h5f = h5py.File(os.path.join(data_path, f'{partition}.h5'), 'r')
self.ids = self.h5f['ids']
self.mismatch = mismatch
self.smr_counter = 0
def __getitem__(self, index):
instanceId = self.ids[index]
# we force a mismatch give the probability
match = np.random.uniform() > self.mismatch if self.partition == 'train' else True
target = match and 1 or -1
if target == 1: # load positive samples
all_img = self.h5f[f'{instanceId}_images'][()]
else:
# Negative samples are generated by picking random images
all_idx = range(len(self.ids))
rndindex = np.random.choice(all_idx)
# random index to pick image ids at random
while rndindex == index:
rndindex = np.random.choice(all_idx) # pick a random index
# load negative samples of images
rndId = self.ids[rndindex]
all_img = self.h5f[f'{rndId}_images']
# Other modalities remain unchanged
all_smr = self.h5f[f'{instanceId}_summaries'][()]
all_cls = self.h5f[f'{instanceId}_classes'][()]
cell_id = self.h5f[f'{instanceId}_onehot'][()]
if self.partition == 'train':
# for training we sample random image, summary and classes for the given id
image = all_img[np.random.choice(range(all_img.shape[0]))]
summary = all_smr[np.random.choice(range(all_smr.shape[0]))]
classes = all_cls[np.random.choice(range(all_cls.shape[0]))]
else:
# for test and validation select the first example for images and triples
# for summaries we set a counter since we have 3 languages (en, de, fr)
image = all_img[0]
summary = all_smr[self.smr_counter] if self.smr_counter < all_smr.shape[0] else all_smr[0]
classes = all_cls[0]
# update summary counter
self.smr_counter = 0 if self.smr_counter == 2 else self.smr_counter + 1
return {
'id': instanceId,
'image': image,
'summary': summary,
'classes': classes,
'target_ir': target,
'target_le': np.argmax(cell_id)
}
def __len__(self):
return len(self.ids) | StarcoderdataPython |
9738209 | <reponame>safiza-web/ru-gpts<filename>src/gpt3_data_loader.py
# coding=utf-8
# Copyright (c) 2020, Sber. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from torch.utils.data import BatchSampler, DataLoader
from src import mpu
from src.dataset_rugpt3 import RuGpt3TextDataset, RuGpt3DatasetArguments
from src.utils import print_rank_0
from transformers import GPT2Tokenizer
class InfiniteDataLoader(DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Initialize an iterator over the dataset.
self.dataset_iterator = super().__iter__()
def __iter__(self):
return self
def __next__(self):
try:
batch = next(self.dataset_iterator)
except StopIteration:
# Dataset exhausted, use a new fresh iterator.
self.dataset_iterator = super().__iter__()
batch = next(self.dataset_iterator)
return batch
class ResumableBatchSampler(BatchSampler):
start_iter = 0
def __iter__(self):
batch = []
i = 0
for idx in self.sampler:
batch.append(idx)
if len(batch) == self.batch_size:
if i >= self.start_iter:
yield batch
batch = []
i += 1
if len(batch) > 0 and not self.drop_last:
yield batch
def make_gpt3_dataloaders(args):
# Data parallel arguments
world_size = mpu.get_data_parallel_world_size()
rank = mpu.get_data_parallel_rank()
# global_batch_size = args.batch_size * world_size
num_workers = args.num_workers
# data_dir = args.train_data_path if args.train_data_path else os.path.dirname(args.test_data_path)
tokenizer_path = args.load_huggingface if args.load_huggingface else \
(args.tokenizer_path if args.tokenizer_path else os.path.join(os.path.dirname(args.train_data_path),
'_tokenizer/'))
print_rank_0('Load tokenizer from ' + tokenizer_path)
tokenizer = GPT2Tokenizer.from_pretrained(tokenizer_path)
eod_token = tokenizer.encoder['<pad>']
num_tokens = len(tokenizer)
train_dataset_args = RuGpt3DatasetArguments(
block_size=args.seq_length, max_files_load=args.max_files_per_process, overwrite_cache=args.overwrite_cache,
tqdm=False)
eval_dataset_args = RuGpt3DatasetArguments(
block_size=args.seq_length, max_files_load=args.max_files_per_process, overwrite_cache=args.overwrite_cache,
tqdm=True)
def make_data_loader_(data_path, dataset_args):
print_rank_0(f'Load RuGPT3 Dataset from {data_path}, {dataset_args.max_files_load} files per process')
dataset = RuGpt3TextDataset(
tokenizer=tokenizer,
args=dataset_args,
rank=rank,
world_size=world_size,
file_path=data_path,
# cache_prefix=args.cache_prefix
)
# Use a simple sampler with distributed batch sampler.
sampler = torch.utils.data.SequentialSampler(dataset)
batch_sampler = ResumableBatchSampler(sampler=sampler,
batch_size=args.batch_size,
drop_last=True)
return InfiniteDataLoader(dataset, batch_sampler=batch_sampler, num_workers=num_workers, pin_memory=True)
train = make_data_loader_(args.train_data_path, train_dataset_args) if args.train_data_path else None
valid = make_data_loader_(args.val_data_path, eval_dataset_args) if args.val_data_path else None
test = make_data_loader_(args.test_data_path, eval_dataset_args) if args.test_data_path else None
args.do_train = train is not None
args.do_valid = valid is not None
args.do_test = test is not None
return (train, valid, test), num_tokens, eod_token, tokenizer
| StarcoderdataPython |
8011388 | <gh_stars>0
# -*- coding: utf-8 -*-
from rest_framework import status
from rest_framework.viewsets import ViewSet
from rest_framework.response import Response
from rest_framework.decorators import detail_route
from api.serializers import ImagePutSerializer, TaskSerializer
__all__ = (
'TaskViewSet',
)
class TaskViewSet(ViewSet):
serializer_class = ImagePutSerializer
def create(self, request, *args, **kwargs):
serializer = ImagePutSerializer(data=request.FILES)
serializer.is_valid(raise_exception=True)
return Response({'id': 1234, 'status': 'processing', }, status=status.HTTP_201_CREATED)
def retrieve(self, request, pk=None):
serializer = TaskSerializer(data={'task_id': pk})
serializer.is_valid(raise_exception=True)
if pk == '1234':
response = {
'id': 1234,
'status': 'success',
}
return Response(response, status=status.HTTP_200_OK)
return Response({}, status=status.HTTP_404_NOT_FOUND)
| StarcoderdataPython |
6566559 | <filename>cdlib/algorithms/__init__.py
from .edge_clustering import *
from .crisp_partition import *
from .overlapping_partition import *
from .attribute_clustering import *
from .bipartite_clustering import *
from .temporal_partition import *
| StarcoderdataPython |
11217193 | <gh_stars>100-1000
import logging
from shlex import quote
from subprocess import run, PIPE, DEVNULL
from typing import Sequence, List
from aurman.own_exceptions import InvalidInput
def split_query_helper(max_length: int, base_length_of_query: int, length_per_append: int, to_append: Sequence[str]) -> \
List[List[str]]:
"""
Helper for splitting long queries.
:param max_length: The max length of a query
:param base_length_of_query: The base length of the query, e.g. length of expac -S '...'
:param length_per_append: A constant which is being added to the query length for every append of a parameter
e.g. 1 for a space
:param to_append: A sequence containing the parameters as str
:return: A list of lists
where the inner lists contain the parameters for a single query
len(return_value) yields the number of queries which have to be done all in all
"""
current_query_length = base_length_of_query
current_list = []
return_list = [current_list]
for append in to_append:
append_length = len(append.encode("utf8"))
if current_query_length + append_length + length_per_append <= max_length:
current_list.append(append)
current_query_length += append_length + length_per_append
else:
current_list = [append]
return_list.append(current_list)
current_query_length = base_length_of_query + append_length + length_per_append
if current_query_length > max_length:
logging.error("Query too long because of '{}'".format(append))
raise InvalidInput("Query too long because of '{}'".format(append))
return return_list
def expac(option: str, formatting: Sequence[str], targets: Sequence[str]) -> List[str]:
"""
expac wrapper. see: https://github.com/falconindy/expac
provide "option", "formatting" and "targets" as in this example:
option as string: "-S"
formatting as sequence containing strings: ("n", "v")
targets as sequence containing strings: ("package1", "package2")
:param option: option as in https://github.com/falconindy/expac
:param formatting: formatting as in https://github.com/falconindy/expac
:param targets: sequence containing strings as targets as in https://github.com/falconindy/expac
:return: list containing the lines of the expac output.
one line of output is one item in the list.
the formatters are joined by '?!', so ('n', 'v') becomes %n?!%v in the output
"""
cmd = ["expac", option, "?!".join(["%{}".format(formatter) for formatter in formatting])]
if targets:
cmd += ["-"]
expac_return = run(cmd, input='\n'.join(targets), stdout=PIPE, stderr=DEVNULL, universal_newlines=True)
if expac_return.returncode != 0:
query_stringified = ' '.join(quote(i) for i in cmd[1:])
logging.error("expac query {} for targets {} failed".format(query_stringified, targets))
raise InvalidInput("expac query {} for targets {} failed".format(query_stringified, targets))
return expac_return.stdout.strip().splitlines()
def pacman(options_as_list: List[str], fetch_output: bool, dir_to_execute: str = None, sudo: bool = True,
use_ask: bool = False, log_error: bool = True) -> List[str]:
"""
pacman wrapper. see: https://www.archlinux.org/pacman/pacman.8.html
provide the pacman options as string via "options_as_string".
e.g. "-Syu package1 package2"
:param options_as_list: the pacman options as string
:param fetch_output: True if you want to receive the output of pacman, False otherwise
:param dir_to_execute: if you want to execute the pacman command in a specific directory, provide the directory
:param sudo: True if you want to execute pacman with sudo, False otherwise
:param use_ask: Use --ask=4 when calling pacman, see: https://git.archlinux.org/pacman.git/commit/?id=90e3e026d1236ad89c142b427d7eeb842bbb7ff4
:param log_error: Whether to log an error or not
:return: empty list in case of "fetch_output"=False, otherwise the lines of the pacman output as list.
one line of output is one item in the list.
"""
if sudo:
pacman_query = ["sudo", "pacman"]
else:
pacman_query = ["pacman"]
if use_ask:
pacman_query += ["--ask=4"]
pacman_query += options_as_list
kwargs = {'cwd': dir_to_execute}
if fetch_output:
kwargs.update(stdout=PIPE, stderr=DEVNULL, universal_newlines=True)
pacman_return = run(pacman_query, **kwargs)
if pacman_return.returncode != 0:
if log_error:
logging.error("pacman query {} failed".format(pacman_query))
raise InvalidInput("pacman query {} failed".format(pacman_query))
if fetch_output:
return pacman_return.stdout.strip().splitlines()
return []
def makepkg(options_as_list: List[str], fetch_output: bool, dir_to_execute: str) -> List[str]:
"""
makepkg wrapper. see: https://www.archlinux.org/pacman/makepkg.8.html
provide the makepkg options as string via "options_as_string".
e.g. "--printsrcinfo"
:param options_as_list: the makepkg options as string
:param fetch_output: True if you want to receive the output of makepkg, False otherwise
:param dir_to_execute: provide the directory in which the makepkg command should be executed
:return: empty list in case of "fetch_output"=False, otherwise the lines of the makepkg output as list.
one line of output is one item in the list.
"""
makepkg_query = ["makepkg"] + options_as_list
if fetch_output:
makepkg_return = run(makepkg_query, stdout=PIPE, universal_newlines=True, cwd=dir_to_execute)
else:
makepkg_return = run(makepkg_query, cwd=dir_to_execute)
if makepkg_return.returncode != 0:
logging.error("makepkg query {} failed in directory {}".format(makepkg_query, dir_to_execute))
raise InvalidInput("makepkg query {} failed in directory {}".format(makepkg_query, dir_to_execute))
if fetch_output:
return makepkg_return.stdout.strip().splitlines()
return []
| StarcoderdataPython |
1958858 | <reponame>YimengYang/wol<filename>code/utils/tests/test_tree.py
#!/usr/bin/env python3
from unittest import TestCase, main
from shutil import rmtree
from tempfile import mkdtemp
from os.path import join, dirname, realpath
from skbio import TreeNode
from skbio.tree import MissingNodeError
from utils.tree import (
support, unpack, has_duplicates, compare_topology, intersect_trees,
unpack_by_func, read_taxdump, build_taxdump_tree, order_nodes,
is_ordered, lca2, cladistic, check_monophyly, _compare_length,
compare_branch_lengths, assign_taxa, assign_supports, support_to_label,
walk_copy, root_above, unroot_at, _exact_compare, calc_split_metrics,
calc_length_metrics, format_newick, root_by_outgroup, restore_rooting,
restore_node_labels, restore_node_order, get_base, calc_bidi_minlevels,
calc_bidi_mindepths)
class TreeTests(TestCase):
def setUp(self):
""" Set up working directory and test files
"""
# test output can be written to this directory
self.working_dir = mkdtemp()
# test data directory
datadir = join(dirname(realpath(__file__)), 'data')
# test data files
self.nodes_fp = join(datadir, 'nodes.dmp')
self.names_fp = join(datadir, 'names.dmp')
def tearDown(self):
# there isn't any file to remove at the moment
# but in the future there will be
rmtree(self.working_dir)
def test_support(self):
"""Test getting support value of a node."""
# test nodes with support alone as label
tree = TreeNode.read(['((a,b)75,(c,d)90);'])
node1, node2 = tree.children
self.assertEqual(support(node1), 75.0)
self.assertEqual(support(node2), 90.0)
# test nodes with support and branch length
tree = TreeNode.read(['((a,b)0.85:1.23,(c,d)0.95:4.56);'])
node1, node2 = tree.children
self.assertEqual(support(node1), 0.85)
self.assertEqual(support(node2), 0.95)
# test nodes with support and extra label (not a common scenario but
# can happen)
tree = TreeNode.read(['((a,b)\'80:X\',(c,d)\'60:Y\');'])
node1, node2 = tree.children
self.assertEqual(support(node1), 80.0)
self.assertEqual(support(node2), 60.0)
# test nodes without label, with non-numeric label, and with branch
# length only
tree = TreeNode.read(['((a,b),(c,d)x,(e,f):1.0);'])
for node in tree.children:
self.assertIsNone(support(node))
def test_unpack(self):
"""Test unpacking an internal node."""
# test unpacking a node without branch length
tree = TreeNode.read(['((c,d)a,(e,f)b);'])
unpack(tree.find('b'))
exp = '((c,d)a,e,f);\n'
self.assertEqual(str(tree), exp)
# test unpacking a node with branch length
tree = TreeNode.read(['((c:2.0,d:3.0)a:1.0,(e:2.0,f:1.0)b:2.0);'])
unpack(tree.find('b'))
exp = '((c:2.0,d:3.0)a:1.0,e:4.0,f:3.0);'
self.assertEqual(str(tree).rstrip(), exp)
# test attempting to unpack root
tree = TreeNode.read(['((d,e)b,(f,g)c)a;'])
msg = 'Cannot unpack root.'
with self.assertRaisesRegex(ValueError, msg):
unpack(tree.find('a'))
def test_has_duplicates(self):
"""Test checking for duplicated taxa."""
# test tree without duplicates
tree = TreeNode.read(['((a,b),(c,d));'])
obs = has_duplicates(tree)
self.assertFalse(obs)
# test tree with duplicates
tree = TreeNode.read(['((a,a),(c,a));'])
obs = has_duplicates(tree)
self.assertTrue(obs)
tree = TreeNode.read(['((1,(2,x)),4,(5,(6,x,8)));'])
obs = has_duplicates(tree)
self.assertTrue(obs)
# test tree with empty taxon names (not a common scenario but can
# happen)
tree = TreeNode.read(['((1,(2,,)),4,(5,(6,,8)));'])
msg = 'Empty taxon name\(s\) found.'
with self.assertRaisesRegex(ValueError, msg):
has_duplicates(tree)
def test_compare_topology(self):
"""Test comparing topologies of two trees."""
# test identical Newick strings
tree1 = TreeNode.read(['(a,b)c;'])
tree2 = TreeNode.read(['(a,b)c;'])
obs = compare_topology(tree1, tree2)
self.assertTrue(obs)
# test identical topologies with different branch lengths
tree1 = TreeNode.read(['(a:1,b:2)c:3;'])
tree2 = TreeNode.read(['(a:3,b:2)c:1;'])
obs = compare_topology(tree1, tree2)
self.assertTrue(obs)
# test identical topologies with flipped child nodes
tree1 = TreeNode.read(['(a,b)c;'])
tree2 = TreeNode.read(['(b,a)c;'])
obs = compare_topology(tree1, tree2)
self.assertTrue(obs)
tree1 = TreeNode.read(['((4,5)2,(6,7,8)3)1;'])
tree2 = TreeNode.read(['((8,7,6)3,(5,4)2)1;'])
obs = compare_topology(tree1, tree2)
self.assertTrue(obs)
tree1 = TreeNode.read(['(((9,10)4,(11,12,13)5)2,((14)6,(15,16,17,18)7,'
'(19,20)8)3)1;'])
tree2 = TreeNode.read(['(((15,16,17,18)7,(14)6,(20,19)8)3,((12,13,11)5'
',(10,9)4)2)1;'])
obs = compare_topology(tree1, tree2)
self.assertTrue(obs)
# test different topologies
tree1 = TreeNode.read(['(a,b)c;'])
tree2 = TreeNode.read(['(a,c)b;'])
obs = compare_topology(tree1, tree2)
self.assertFalse(obs)
tree1 = TreeNode.read(['((4,5)2,(6,7,8)3)1;'])
tree2 = TreeNode.read(['((4,5)3,(6,7,8)2)1;'])
obs = compare_topology(tree1, tree2)
self.assertFalse(obs)
tree1 = TreeNode.read(['((4,5)2,(6,7,8)3)1;'])
tree2 = TreeNode.read(['(((4,1)8)7,(6,3)2)5;'])
obs = compare_topology(tree1, tree2)
self.assertFalse(obs)
def test_intersect_trees(self):
"""Test intersecting two trees."""
# test trees with identical taxa
tree1 = TreeNode.read(['((a,b),(c,d));'])
tree2 = TreeNode.read(['(a,(b,c,d));'])
obs = intersect_trees(tree1, tree2)
exp = (tree1, tree2)
for i in range(2):
self.assertEqual(obs[i].compare_subsets(exp[i]), 0.0)
# test trees with partially different taxa
tree1 = TreeNode.read(['((a,b),(c,d));'])
tree2 = TreeNode.read(['((a,b),(c,e));'])
obs = intersect_trees(tree1, tree2)
tree1_lap = TreeNode.read(['((a,b),c);'])
tree2_lap = TreeNode.read(['((a,b),e);'])
exp = (tree1_lap, tree2_lap)
for i in range(2):
self.assertEqual(obs[i].compare_subsets(exp[i]), 0.0)
tree1 = TreeNode.read(['(((a,b),(c,d)),((e,f,g),h));'])
tree2 = TreeNode.read(['(a,((b,x),(d,y,(f,g,h))));'])
obs = intersect_trees(tree1, tree2)
tree1_lap = TreeNode.read(['(((a,b),d),((f,g),h));'])
tree2_lap = TreeNode.read(['(a,(b,(d,(f,g,h))));'])
exp = (tree1_lap, tree2_lap)
for i in range(2):
self.assertEqual(obs[i].compare_subsets(exp[i]), 0.0)
# test trees with completely different taxa
tree1 = TreeNode.read(['((a,b),(c,d));'])
tree2 = TreeNode.read(['((e,f),(g,h));'])
msg = 'Trees have no overlapping taxa.'
with self.assertRaisesRegex(ValueError, msg):
intersect_trees(tree1, tree2)
# test trees with duplicated taxa
tree1 = TreeNode.read(['((a,b),(c,d));'])
tree2 = TreeNode.read(['((a,a),(b,c));'])
msg = 'Either tree has duplicated taxa.'
with self.assertRaisesRegex(ValueError, msg):
intersect_trees(tree1, tree2)
def test_unpack_by_func(self):
"""Test unpacking nodes by function."""
# unpack internal nodes with branch length <= 1.0
def func(x):
return x.length <= 1.0
# will unpack node 'a', but not tip 'e'
# will add the branch length of 'a' to its child nodes 'c' and 'd'
tree = TreeNode.read(['((c:2,d:3)a:1,(e:1,f:2)b:2);'])
obs = str(unpack_by_func(tree, func)).rstrip()
exp = '((e:1.0,f:2.0)b:2.0,c:3.0,d:4.0);'
self.assertEqual(obs, exp)
# unpack internal nodes with branch length < 2.01
# will unpack both 'a' and 'b'
obs = str(unpack_by_func(tree, lambda x: x.length <= 2.0)).rstrip()
exp = '(c:3.0,d:4.0,e:3.0,f:4.0);'
self.assertEqual(obs, exp)
# unpack two nested nodes 'a' and 'c' simultaneously
tree = TreeNode.read(['(((e:3,f:2)c:1,d:3)a:1,b:4);'])
obs = str(unpack_by_func(tree, lambda x: x.length <= 2.0)).rstrip()
exp = '(b:4.0,d:4.0,e:5.0,f:4.0);'
self.assertEqual(obs, exp)
# test a complicated scenario (unpacking nodes 'g', 'h' and 'm')
def func(x):
return x.length < 2.0
tree = TreeNode.read(['(((a:1.04,b:2.32,c:1.44)d:3.20,'
'(e:3.91,f:2.47)g:1.21)h:1.75,'
'(i:4.14,(j:2.06,k:1.58)l:3.32)m:0.77);'])
obs = str(unpack_by_func(tree, func)).rstrip()
exp = ('((a:1.04,b:2.32,c:1.44)d:4.95,e:6.87,f:5.43,i:4.91,'
'(j:2.06,k:1.58)l:4.09);')
self.assertEqual(obs, exp)
# unpack nodes with support < 75
def func(x):
return support(x) < 75
tree = TreeNode.read(['(((a,b)85,(c,d)78)75,(e,(f,g)64)80);'])
obs = str(unpack_by_func(tree, func)).rstrip()
exp = '(((a,b)85,(c,d)78)75,(e,f,g)80);'
self.assertEqual(obs, exp)
# unpack nodes with support < 85
obs = str(unpack_by_func(tree, lambda x: support(x) < 85)).rstrip()
exp = '((a,b)85,c,d,e,f,g);'
self.assertEqual(obs, exp)
# unpack nodes with support < 0.95
tree = TreeNode.read(['(((a,b)0.97,(c,d)0.98)1.0,(e,(f,g)0.88)0.96);'])
obs = str(unpack_by_func(tree, lambda x: support(x) < 0.95)).rstrip()
exp = '(((a,b)0.97,(c,d)0.98)1.0,(e,f,g)0.96);'
self.assertEqual(obs, exp)
# test a case where there are branch lengths, none support values and
# node labels
def func(x):
sup = support(x)
return sup is not None and sup < 75
tree = TreeNode.read(['(((a:1.02,b:0.33)85:0.12,(c:0.86,d:2.23)'
'70:3.02)75:0.95,(e:1.43,(f:1.69,g:1.92)64:0.20)'
'node:0.35)root;'])
obs = str(unpack_by_func(tree, func)).rstrip()
exp = ('(((a:1.02,b:0.33)85:0.12,c:3.88,d:5.25)75:0.95,'
'(e:1.43,f:1.89,g:2.12)node:0.35)root;')
self.assertEqual(obs, exp)
def test_read_taxdump(self):
"""Test reading NCBI taxdump."""
obs = read_taxdump(self.nodes_fp)
exp = {
'1': {'parent': '1', 'rank': 'order',
'children': set(['2', '3'])},
'2': {'parent': '1', 'rank': 'family',
'children': set(['4', '5'])},
'3': {'parent': '1', 'rank': 'family',
'children': set(['6', '7', '8'])},
'4': {'parent': '2', 'rank': 'genus',
'children': set(['9', '10'])},
'5': {'parent': '2', 'rank': 'genus',
'children': set(['11', '12', '13'])},
'6': {'parent': '3', 'rank': 'genus',
'children': set(['14'])},
'7': {'parent': '3', 'rank': 'genus',
'children': set(['15', '16', '17', '18'])},
'8': {'parent': '3', 'rank': 'genus',
'children': set(['19', '20'])},
'9': {'parent': '4', 'rank': 'species', 'children': set()},
'10': {'parent': '4', 'rank': 'species', 'children': set()},
'11': {'parent': '5', 'rank': 'species', 'children': set()},
'12': {'parent': '5', 'rank': 'species', 'children': set()},
'13': {'parent': '5', 'rank': 'species', 'children': set()},
'14': {'parent': '6', 'rank': 'species', 'children': set()},
'15': {'parent': '7', 'rank': 'species', 'children': set()},
'16': {'parent': '7', 'rank': 'species', 'children': set()},
'17': {'parent': '7', 'rank': 'species', 'children': set()},
'18': {'parent': '7', 'rank': 'species', 'children': set()},
'19': {'parent': '8', 'rank': 'species', 'children': set()},
'20': {'parent': '8', 'rank': 'species', 'children': set()}
}
for tid in exp:
exp[tid]['name'] = ''
self.assertDictEqual(obs, exp)
obs = read_taxdump(self.nodes_fp, self.names_fp)
name_dict = {
'1': 'root', '2': 'Eukaryota', '3': 'Bacteria', '4': 'Plantae',
'5': 'Animalia', '6': 'Bacteroidetes', '7': 'Proteobacteria',
'8': 'Firmicutes', '9': 'Gymnosperms', '10': 'Angiosperms',
'11': 'Chordata', '12': 'Arthropoda', '13': 'Mollusca',
'14': 'Prevotella', '15': 'Escherichia', '16': 'Vibrio',
'17': 'Rhizobium', '18': 'Helicobacter', '19': 'Bacillus',
'20': 'Clostridia'
}
for tid in name_dict:
exp[tid]['name'] = name_dict[tid]
self.assertDictEqual(obs, exp)
def test_build_taxdump_tree(self):
"""Test building NCBI taxdump tree."""
taxdump = read_taxdump(self.nodes_fp)
obs = build_taxdump_tree(taxdump)
exp = TreeNode.read(['(((9,10)4,(11,12,13)5)2,((14)6,(15,16,17,18)7,'
'(19,20)8)3)1;'])
self.assertTrue(compare_topology(obs, exp))
def test_order_nodes(self):
"""Test order nodes"""
tree1 = TreeNode.read(['(((a,b),(c,d,i)j),((e,g),h));'])
# test increase ordering
tree1_increase = order_nodes(tree1, True)
self.assertTrue(is_ordered(tree1_increase))
# test decrease ordering
tree1_decrease = order_nodes(tree1, False)
self.assertTrue(is_ordered(tree1_decrease, False))
def test_is_ordered(self):
"""Test if a tree is ordered"""
# test tree in increasing order
tree1 = TreeNode.read(['((i,j)a,b)c;'])
self.assertTrue(is_ordered(tree1))
self.assertTrue(is_ordered(tree1, True))
self.assertFalse(is_ordered(tree1, False))
# test tree in both increasing and decreasing order
tree2 = TreeNode.read(['(a, b);'])
self.assertTrue(is_ordered(tree2))
self.assertTrue(is_ordered(tree2, False))
# test an unordered tree
tree3 = TreeNode.read(['(((a,b),(c,d,x,y,z)),((e,g),h));'])
self.assertFalse(is_ordered(tree3, True))
self.assertFalse(is_ordered(tree3, False))
# test tree in decreasing order
tree5 = TreeNode.read(['((h,(e,g)),((a,b),(c,d,i)j));'])
self.assertTrue(is_ordered(tree5, False))
def test_lca2(self):
newick = '((((a,b)n6,c)n4,(d,e)n5)n2,(f,(g,h)n7)n3,i)n1;'
tree = TreeNode.read([newick])
msg = "'TreeNode' object has no attribute 'taxa'"
with self.assertRaisesRegex(AttributeError, msg):
lca2(tree, set('ab'))
assign_taxa(tree)
self.assertEqual(lca2(tree, set('a')).name, 'a')
self.assertEqual(lca2(tree, set('ab')).name, 'n6')
self.assertEqual(lca2(tree, set('ac')).name, 'n4')
self.assertEqual(lca2(tree, set('ace')).name, 'n2')
self.assertEqual(lca2(tree, set('bgi')).name, 'n1')
def test_cladistic(self):
tree1 = TreeNode.read(['((i,j)a,b)c;'])
self.assertEqual('uni', cladistic(tree1, ['i']))
self.assertEqual('mono', cladistic(tree1, ['i', 'j']))
self.assertEqual('poly', cladistic(tree1, ['i', 'b']))
msg = 'Node x is not in self'
with self.assertRaisesRegex(MissingNodeError, msg):
cladistic(tree1, ['x', 'b'])
tree2 = TreeNode.read(['(((a,b),(c,d,x)),((e,g),h));'])
self.assertEqual('uni', cladistic(tree2, ['a']))
self.assertEqual('mono', cladistic(tree2, ['a', 'b', 'c', 'd', 'x']))
self.assertEqual('poly', cladistic(tree2, ['g', 'h']))
msg = 'Node y is not in self'
with self.assertRaisesRegex(MissingNodeError, msg):
cladistic(tree2, ['y', 'b'])
assign_taxa(tree2)
self.assertEqual('uni', cladistic(tree2, ['a']))
self.assertEqual('mono', cladistic(tree2, ['a', 'b']))
self.assertEqual('poly', cladistic(tree2, ['g', 'h']))
def test_check_monophyly(self):
newick = '(((a,b)n4,(c,d)n5,(e,f)n6)n2,(g,(h,i)n7)n3)n1;'
tree = TreeNode.read([newick])
assign_taxa(tree)
res = check_monophyly(tree, 'a')
self.assertListEqual([res[0], res[1].name], ['strict', 'a'])
res = check_monophyly(tree, 'ab')
self.assertListEqual([res[0], res[1].name], ['strict', 'n4'])
res = check_monophyly(tree, 'abc')
self.assertListEqual([res[0], res[1].name], ['rejected', 'n2'])
res = check_monophyly(tree, 'abcd')
self.assertListEqual([res[0], res[1].name], ['relaxed', 'n2'])
res = check_monophyly(tree, 'abcde')
self.assertListEqual([res[0], res[1].name], ['rejected', 'n2'])
res = check_monophyly(tree, 'abcdef')
self.assertListEqual([res[0], res[1].name], ['strict', 'n2'])
def test_compare_length(self):
tree = TreeNode.read(['((a:1.000000001,(b:1.000000002,c:1):1):3,f)g;'])
self.assertTrue(_compare_length(tree.find('f'), tree.find('g')))
self.assertTrue(_compare_length(tree.find('a'), tree.find('b')))
self.assertTrue(_compare_length(tree.find('c'), tree.find('c').parent))
self.assertFalse(_compare_length(tree.find('c'),
tree.find('a').parent))
self.assertFalse(_compare_length(tree.find('a').parent,
tree.find('f')))
self.assertFalse(_compare_length(tree.find('f'),
tree.find('a').parent))
def test_compare_branch_lengths(self):
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
self.assertTrue(compare_branch_lengths(tree1, tree1))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree2 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
self.assertTrue(compare_branch_lengths(tree1, tree2))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree3 = TreeNode.read(['(f:1,((b:1,c:1)d:1,a:1)e:1)g:1;'])
self.assertTrue(compare_branch_lengths(tree1, tree3))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree3 = TreeNode.read(['(f:1,((b:1,c:1)d:1,a:1)e:1)g:1;'])
self.assertTrue(compare_branch_lengths(tree3, tree1))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree4 = TreeNode.read(['((a:2,(b:1,c:1)d:1)e:1,f:1)g:1;'])
self.assertFalse(compare_branch_lengths(tree1, tree4))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree4 = TreeNode.read(['((a:2,(b:1,c:1)d:1)e:1,f:1)g:1;'])
self.assertFalse(compare_branch_lengths(tree4, tree1))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree5 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e,f:1)g:1;'])
self.assertFalse(compare_branch_lengths(tree1, tree5))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree5 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e,f:1)g:1;'])
self.assertFalse(compare_branch_lengths(tree5, tree1))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree7 = TreeNode.read(['((a:1,(b:1,c:1):1)e:1,f:1)g:1;'])
self.assertTrue(compare_branch_lengths(tree1, tree7))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree7 = TreeNode.read(['((a:1,(b:1,c:1):1)e:1,f:1)g:1;'])
self.assertTrue(compare_branch_lengths(tree7, tree1))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree8 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1):1;'])
self.assertTrue(compare_branch_lengths(tree1, tree8))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree8 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1):1;'])
self.assertTrue(compare_branch_lengths(tree8, tree1))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree6 = TreeNode.read(['(f:1, ((a:1, b:1)c:1 ,d:1)e:1)g:1;'])
self.assertFalse(compare_branch_lengths(tree1, tree6))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree6 = TreeNode.read(['(f:1, ((a:1, b:1)c:1 ,d:1)e:1)g:1;'])
self.assertFalse(compare_branch_lengths(tree6, tree1))
tree9 = TreeNode.read(['(((a:1,b:1)c:1,(d:1,e:1)f:1)g:1,h:1)i:1;'])
tree10 = TreeNode.read(['(((a:1,b:1)c:1,(d:1,e:1)g:1)f:1,h:1)i:1;'])
self.assertTrue(compare_branch_lengths(tree9, tree10))
tree9 = TreeNode.read(['(((a:1,b:1)c:1,(d:1,e:1)f:1)g:1,h:1)i:1;'])
tree10 = TreeNode.read(['(((a:1,b:1)c:1,(d:1,e:1)g:1)f:1,h:1)i:1;'])
self.assertTrue(compare_branch_lengths(tree10, tree9))
tree9 = TreeNode.read(['(((a:1,b:1)c:1,(d:1,e:1)f:1)g:1,h:1)i:1;'])
tree12 = TreeNode.read(['(((a:1,b:1):1,(h:1,e:1):1):1,d:1):1;'])
self.assertFalse(compare_branch_lengths(tree9, tree12))
tree9 = TreeNode.read(['(((a:1,b:1)c:1,(d:1,e:1)f:1)g:1,h:1)i:1;'])
tree12 = TreeNode.read(['(((a:1,b:1):1,(h:1,e:1):1):1,d:1):1;'])
self.assertFalse(compare_branch_lengths(tree12, tree9))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree11 = TreeNode.read(['((a:1,(x:1,c:1)d:1)e:1,f:1)g:1;'])
self.assertFalse(compare_branch_lengths(tree1, tree11))
tree1 = TreeNode.read(['((a:1,(b:1,c:1)d:1)e:1,f:1)g:1;'])
tree11 = TreeNode.read(['((a:1,(x:1,c:1)d:1)e:1,f:1)g:1;'])
self.assertFalse(compare_branch_lengths(tree11, tree1))
def test_assign_supports(self):
tree = TreeNode.read(["((a,b)95,(c,d):1.1,(e,f)'80:Dmel':1.0);"])
assign_supports(tree)
# standalone support value
self.assertEqual(tree.lca(['a', 'b']).support, 95)
# no support value
self.assertIsNone(tree.lca(['c', 'd']).support)
# support value before node name
self.assertEqual(tree.lca(['e', 'f']).support, 80)
# stripped support value from node name
self.assertEqual(tree.lca(['e', 'f']).name, 'Dmel')
def test_support_to_label(self):
# unnamed nodes
tree = TreeNode.read(['((a,b)100,((c,d)95,(e,f)99)80);'])
assign_supports(tree)
self.assertEqual(str(tree), '((a,b),((c,d),(e,f)));\n')
support_to_label(tree)
self.assertEqual(str(tree), '((a,b)100,((c,d)95,(e,f)99)80);\n')
# named nodes
tree = TreeNode.read(["((a,b)'100:n2',(c,d)'95:n3')n1;"])
assign_supports(tree)
self.assertEqual(str(tree), '((a,b)n2,(c,d)n3)n1;\n')
support_to_label(tree)
self.assertEqual(str(tree), "((a,b)'100:n2',(c,d)'95:n3')n1;\n")
# unusual cases
tree = TreeNode.read(['(((a,b)n2,(c,d)n3)n6,(e,f)n4,(g,h)n5)n1;'])
tree.find('n2').support = 100
tree.find('n3').support = 0
tree.find('n4').support = ''
tree.find('n5').support = None
# n6 has no `support` attribute
tree.find('a').support = 95 # tips shouldn't have support
support_to_label(tree)
exp = "(((a,b)'100:n2',(c,d)'0:n3')n6,(e,f)n4,(g,h)n5)n1;\n"
self.assertEqual(str(tree), exp)
def test_walk_copy(self):
tree1 = TreeNode.read(['(((a:1.0,b:0.8)c:2.4,(d:0.8,e:0.6)f:1.2)g:0.4,'
'(h:0.5,i:0.7)j:1.8)k;'])
# test pos = root
msg = 'Cannot walk from root of a rooted tree.'
with self.assertRaisesRegex(ValueError, msg):
walk_copy(tree1.find('k'), tree1.find('j'))
msg = 'Source and node are not neighbors.'
# test pos = derived
with self.assertRaisesRegex(ValueError, msg):
walk_copy(tree1.find('a'), tree1.find('b'))
with self.assertRaisesRegex(ValueError, msg):
walk_copy(tree1.find('c'), tree1.find('f'))
with self.assertRaisesRegex(ValueError, msg):
walk_copy(tree1.find('f'), tree1.find('j'))
with self.assertRaisesRegex(ValueError, msg):
walk_copy(tree1.find('f'), tree1.find('k'))
# test pos = basal
with self.assertRaisesRegex(ValueError, msg):
walk_copy(tree1.find('g'), tree1.find('a'))
with self.assertRaisesRegex(ValueError, msg):
walk_copy(tree1.find('g'), tree1.find('k'))
# pos = derived, move = up
exp = TreeNode.read(['(b:0.8,((d:0.8,e:0.6)f:1.2,(h:0.5,i:0.7)j:2.2)'
'g:2.4)c:1.0;'])
obs = walk_copy(tree1.find('c'), tree1.find('a'))
self.assertTrue(_exact_compare(exp, obs))
# pos = derived, move = down
exp = TreeNode.read(['(d:0.8,e:0.6)f:1.2;'])
obs = walk_copy(tree1.find('f'), tree1.find('g'))
self.assertTrue(_exact_compare(exp, obs))
# pos = basal, move = top
exp = TreeNode.read(['((d:0.8,e:0.6)f:1.2,(h:0.5,i:0.7)j:2.2)g:2.4;'])
obs = walk_copy(tree1.find('g'), tree1.find('c'))
self.assertTrue(_exact_compare(exp, obs))
# pos = basal, move = bottom
exp = TreeNode.read(['(h:0.5,i:0.7)j:2.2;'])
obs = walk_copy(tree1.find('j'), tree1.find('g'))
self.assertTrue(_exact_compare(exp, obs))
tree2 = TreeNode.read(['(((a:1.0,b:0.8)c:2.4,d:0.8)e:0.6,f:1.2,'
'g:0.4)h:0.5;'])
# pos = basal, move = down
exp = TreeNode.read(['((a:1.0,b:0.8)c:2.4,d:0.8)e:0.6;'])
obs = walk_copy(tree2.find('e'), tree2.find('h'))
self.assertTrue(_exact_compare(exp, obs))
# pos = basal, move = up
exp = TreeNode.read(['(d:0.8,(f:1.2,g:0.4)h:0.6)e:2.4;'])
obs = walk_copy(tree2.find('e'), tree2.find('c'))
self.assertTrue(_exact_compare(exp, obs))
def test_root_above(self):
# test rooted tree
tree1 = TreeNode.read(['(((a:1.0,b:0.8)c:2.4,(d:0.8,e:0.6)f:1.2)g:0.4,'
'(h:0.5,i:0.7)j:1.8)k;'])
tree1_cg = root_above(tree1.find('c'))
exp = TreeNode.read(['((a:1.0,b:0.8)c:1.2,((d:0.8,e:0.6)f:1.2,(h:0.5,'
'i:0.7)j:2.2)g:1.2);'])
self.assertTrue(_exact_compare(exp, tree1_cg))
tree1_ij = root_above(tree1.find('i'))
exp = TreeNode.read(['(i:0.35,(h:0.5,((a:1.0,b:0.8)c:2.4,(d:0.8,'
'e:0.6)f:1.2)g:2.2)j:0.35);'])
self.assertTrue(_exact_compare(exp, tree1_ij))
# test unrooted tree
tree2 = TreeNode.read(['(((a:0.6,b:0.5)g:0.3,c:0.8)h:0.4,(d:0.4,'
'e:0.5)i:0.5,f:0.9)j;'])
tree2_ag = root_above(tree2.find('a'))
exp = TreeNode.read(['(a:0.3,(b:0.5,(c:0.8,((d:0.4,e:0.5)i:0.5,'
'f:0.9)j:0.4)h:0.3)g:0.3);'])
self.assertTrue(_exact_compare(exp, tree2_ag))
tree2_gh = root_above(tree2.find('g'))
exp = TreeNode.read(['((a:0.6,b:0.5)g:0.15,(c:0.8,((d:0.4,e:0.5)i:0.5,'
'f:0.9)j:0.4)h:0.15);'])
self.assertTrue(_exact_compare(exp, tree2_gh))
# test unrooted tree with 1 basal node
tree3 = TreeNode.read(['(((a:0.4,b:0.3)e:0.1,(c:0.4,'
'd:0.1)f:0.2)g:0.6)h:0.2;'])
tree3_ae = root_above(tree3.find('a'))
exp = TreeNode.read(['(a:0.2,(b:0.3,((c:0.4,d:0.1)f:0.2,'
'h:0.6)g:0.1)e:0.2);'])
self.assertTrue(_exact_compare(exp, tree3_ae))
def test_unroot_at(self):
# sample example from doctest of scikit-bio's `root_at`
tree = TreeNode.read(['(((a,b)c,(d,e)f)g,h)i;'])
obs = unroot_at(tree.find('c'))
exp = TreeNode.read(['(((d,e)f,h)g,a,b)c;'])
self.assertTrue(_exact_compare(obs, exp))
# test branch support handling
tree.find('c').support = 95
tree.find('f').support = 99
obs = unroot_at(tree.find('c'))
exp = TreeNode.read(["(((d,e)'99:f',h)'95:g',a,b)c;"])
assign_supports(exp)
self.assertTrue(_exact_compare(obs, exp))
# test branch length handling
tree = TreeNode.read([
'(((a:1.1,b:2.2)c:1.3,(d:1.4,e:0.8)f:0.6)g:0.4,h:3.1)i;'])
obs = unroot_at(tree.find('c'))
exp = TreeNode.read([
'(((d:1.4,e:0.8)f:0.6,h:3.5)g:1.3,a:1.1,b:2.2)c;'])
self.assertTrue(_exact_compare(obs, exp))
def test_exact_compare(self):
# test name
tree0 = TreeNode.read(['((e,d)f,(c,(a,b)));'])
tree1 = TreeNode.read(['(((a,b),c),(d,e)f);'])
self.assertTrue(_exact_compare(tree1, tree1))
self.assertFalse(_exact_compare(tree0, tree1))
# test length
tree2 = TreeNode.read(['(((a:1,b):2,c:1),(d:1,e:2)f:1);'])
self.assertTrue(_exact_compare(tree2, tree2))
self.assertFalse(_exact_compare(tree1, tree2))
tree3 = TreeNode.read(['(((a:1,b:0.0):2,c:1):0.0,(d:1,e:2)f:1);'])
self.assertTrue(_exact_compare(tree3, tree3))
self.assertFalse(_exact_compare(tree2, tree3))
# test support
tree4 = TreeNode.read(['(((a:1,b:1)95:2,c:1)98:3,(d:1,e:2)0.0:1);'])
tree5 = TreeNode.read(['(((a:1,b:1)95:2,c:1)98:3,(d:1,e:2):1);'])
assign_supports(tree4)
self.assertTrue(_exact_compare(tree4, tree4))
self.assertFalse(_exact_compare(tree4, tree5))
assign_supports(tree5)
self.assertFalse(_exact_compare(tree4, tree5))
def test_calc_split_metrics(self):
"""Example from Fig. 9a of Puigbo, et al., 2009, J Biol.
/-A
/n9------|
/n8------| \\-B
| |
/n4------| \\-C
| |
| | /-D
| \n7------|
| \\-E
|
| /-F
-n1------| /n6------|
| | \\-G
|-n3------|
| | /-H
| \n5------|
| \\-I
|
| /-J
\n2------|
\\-K
"""
tree = TreeNode.read([
'((((A,B)n9,C)n8,(D,E)n7)n4,((F,G)n6,(H,I)n5)n3,(J,K)n2)n1;'
])
calc_split_metrics(tree)
obs = {x.name: [getattr(x, y) for y in
('n', 'splits', 'prelevel', 'postlevels')]
for x in tree.traverse()}
exp = {
'n1': [11, 9, 1, [5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3]],
'n4': [5, 4, 2, [4, 4, 3, 3, 3]],
'n3': [4, 3, 2, [3, 3, 3, 3]],
'n2': [2, 1, 2, [2, 2]],
'n8': [3, 2, 3, [3, 3, 2]],
'n7': [2, 1, 3, [2, 2]],
'n6': [2, 1, 3, [2, 2]],
'n5': [2, 1, 3, [2, 2]],
'J': [1, 0, 3, [1]],
'K': [1, 0, 3, [1]],
'n9': [2, 1, 4, [2, 2]],
'C': [1, 0, 4, [1]],
'D': [1, 0, 4, [1]],
'E': [1, 0, 4, [1]],
'F': [1, 0, 4, [1]],
'G': [1, 0, 4, [1]],
'H': [1, 0, 4, [1]],
'I': [1, 0, 4, [1]],
'A': [1, 0, 5, [1]],
'B': [1, 0, 5, [1]]
}
self.assertDictEqual(obs, exp)
def test_calc_length_metrics(self):
"""Example from Fig. 1a of Parks et al. (2018):
/--1--A
/n3--1--|
| \\--1--B
/n2----2----|
| | /--1--C
-n1-| \n4----2----|
| \\----2----D
|
\\------3------E
"""
tree = TreeNode.read(['(((A:1,B:1)n3:1,(C:1,D:2)n4:2)n2:2,E:3)n1;'])
calc_length_metrics(tree)
obs = {x.name: {'height': x.height, 'depths': x.depths,
'red': round(x.red, 7)} for x in tree.traverse()}
exp = {'n1': {'height': 0.0, 'depths': [4.0, 4.0, 5.0, 6.0, 3.0],
'red': 0.0},
'n2': {'height': 2.0, 'depths': [2.0, 2.0, 3.0, 4.0],
'red': 0.4210526},
'n3': {'height': 3.0, 'depths': [1.0, 1.0], 'red': 0.7105263},
'n4': {'height': 4.0, 'depths': [1.0, 2.0], 'red': 0.7518797},
'A': {'height': 4.0, 'depths': [0.0], 'red': 1.0},
'B': {'height': 4.0, 'depths': [0.0], 'red': 1.0},
'C': {'height': 5.0, 'depths': [0.0], 'red': 1.0},
'D': {'height': 6.0, 'depths': [0.0], 'red': 1.0},
'E': {'height': 3.0, 'depths': [0.0], 'red': 1.0}}
self.assertDictEqual(obs, exp)
def test_format_newick(self):
newick = '((A_1:1.05,B_2:1.68):2.24,(C:0.28,D:1.14):1.73e-10);'
tree = TreeNode.read([newick])
# default behavior (same as TreeNode.write)
self.assertEqual(format_newick(tree), newick)
# keep space
exp = "(('A 1':1.05,'B 2':1.68):2.24,(C:0.28,D:1.14):1.73e-10);"
self.assertEqual(format_newick(tree, keep_space=True), exp)
# specify digits for float point
exp = '((A_1:1.1,B_2:1.7):2.2,(C:0.28,D:1.1):1.73e-10);'
self.assertEqual(format_newick(tree, max_f=2), exp)
# specify digits for scientific notation
exp = '((A_1:1.05,B_2:1.68):2.24,(C:0.28,D:1.14):1.7e-10);'
self.assertEqual(format_newick(tree, max_e=2), exp)
# all options enabled
exp = "(('A 1':1.1,'B 2':1.7):2.2,(C:0.28,D:1.1):1.7e-10);"
self.assertEqual(format_newick(tree, True, 2, 2), exp)
def test_root_by_outgroup(self):
tree = TreeNode.read(['((((a,b),(c,d)),(e,f)),g);'])
# outgroup is monophyletic
obs = root_by_outgroup(tree, outgroup=['a', 'b'])
exp = TreeNode.read(['((a,b),((c,d),((e,f),g)));'])
self.assertTrue(_exact_compare(obs, exp))
# outgroup is monophyletic after rotating
obs = root_by_outgroup(tree, outgroup=['e', 'f', 'g'])
exp = TreeNode.read(['(((e,f),g),((c,d),(b,a)));'])
self.assertTrue(_exact_compare(obs, exp))
# outgroup is not monophyletic
msg = 'Outgroup is not monophyletic in tree.'
with self.assertRaisesRegex(ValueError, msg):
root_by_outgroup(tree, outgroup=['a', 'c'])
# outgroup is single taxon
obs = root_by_outgroup(tree, outgroup=['a'])
exp = TreeNode.read(['(a,(b,((c,d),((e,f),g))));'])
self.assertTrue(_exact_compare(obs, exp))
# outgroup has extra taxa
obs = root_by_outgroup(tree, outgroup=['a', 'b', 'x'])
exp = TreeNode.read(['((a,b),((c,d),((e,f),g)));'])
self.assertTrue(_exact_compare(obs, exp))
# outgroup has extra taxa but strict mode
msg = 'Outgroup is not a subset of tree taxa.'
with self.assertRaisesRegex(ValueError, msg):
root_by_outgroup(tree, outgroup=['a', 'b', 'x'], strict=True)
# outgroup is not in tree
msg = 'None of outgroup taxa are present in tree.'
with self.assertRaisesRegex(ValueError, msg):
root_by_outgroup(tree, outgroup=['x', 'y'])
# outgroup is the whole tree
msg = 'Outgroup constitutes the entire tree.'
with self.assertRaisesRegex(ValueError, msg):
root_by_outgroup(tree, outgroup='abcdefg')
# generate unrooted tree
obs = root_by_outgroup(tree, outgroup=['a', 'b'], unroot=True)
exp = TreeNode.read(['(((e,f),g),(a,b),(c,d));'])
self.assertTrue(_exact_compare(obs, exp))
def test_restore_rooting(self):
# rooted source
source = TreeNode.read(['(((e,f),g),((c,d),(b,a)));'])
target = TreeNode.read(['(((a,b),(c,d)),(e,f),g);'])
rooted = restore_rooting(source, target)
self.assertTrue(_exact_compare(rooted, source))
# unrooted source
source = TreeNode.read(['(((e,f),g),(a,b),(c,d));'])
unrooted = restore_rooting(source, target)
self.assertTrue(_exact_compare(unrooted, source))
# test support handling
source = TreeNode.read(['(((e,f),g),((c,d),(b,a)));'])
target = TreeNode.read(['(((a,b)95,(c,d)90)80,(e,f)100,g);'])
assign_supports(target)
obs = restore_rooting(source, target)
exp = TreeNode.read(['(((e,f)100,g)80,((c,d)90,(b,a)95)80);'])
assign_supports(exp)
self.assertTrue(_exact_compare(obs, exp))
# taxa don't match
msg = 'Source and target trees have different taxa.'
with self.assertRaisesRegex(ValueError, msg):
restore_rooting(source, TreeNode.read(['((a,b),(c,d));']))
def test_restore_node_labels(self):
# simple case
source = TreeNode.read(['((a,b)x,(c,d)y);'])
target = TreeNode.read(['((a:0.5,b:0.6):1.2,(c:1.0,d:1.4):0.4);'])
obs = restore_node_labels(source, target)
exp = TreeNode.read(['((a:0.5,b:0.6)x:1.2,(c:1.0,d:1.4)y:0.4);'])
self.assertTrue(_exact_compare(obs, exp))
# complex case with missing label, extra taxa, label overwrittern
source = TreeNode.read(['(((a,b)85,c)90,((d,e)98,(f,g)93));'])
target = TreeNode.read(['((((g,f),(e,d))x,(c,(a,b))y),h);'])
obs = restore_node_labels(source, target)
exp = TreeNode.read(['((((g,f)93,(e,d)98)x,(c,(a,b)85)90),h);'])
self.assertTrue(_exact_compare(obs, exp))
# a duplicated node label
msg = 'Duplicated node label "x" found.'
with self.assertRaisesRegex(ValueError, msg):
restore_node_labels(TreeNode.read(['((a,b)x,(c,d)x);']), target)
def test_restore_node_order(self):
source = TreeNode.read(['(((a,b),(c,d)),((e,f),g));'])
target = TreeNode.read(['((g,(e,f)100),((d,c)80,(a,b)90));'])
obs = restore_node_order(source, target)
exp = TreeNode.read(['(((a,b)90,(c,d)80),((e,f)100,g));'])
self.assertTrue(_exact_compare(obs, exp))
msg = 'Two trees have different sizes.'
with self.assertRaisesRegex(ValueError, msg):
restore_node_order(TreeNode.read(['((a,b),(c,d));']), target)
msg = 'Two trees have different topologies.'
with self.assertRaisesRegex(ValueError, msg):
restore_node_order(
TreeNode.read(['((((a,b),c),d),(e,(f,g)));']), target)
def test_get_base(self):
tree = TreeNode.read(['(((a,b)n6,(c,d)n5)n3,((e,f)n4,g)n2)n1;'])
self.assertEqual(get_base(tree.find('a')).name, 'n3')
self.assertEqual(get_base(tree.find('e')).name, 'n2')
obs = get_base(tree.lca([tree.find('a'), tree.find('b')])).name
self.assertEqual(obs, 'n3')
msg = 'Root has no base.'
with self.assertRaisesRegex(ValueError, msg):
get_base(tree)
def test_calc_bidi_minlevels(self):
# an unrooted tree (typical use case)
tree = TreeNode.read(['(((a,b)n4,(c,d)n5)n2,(((e,f)n8,(g,h)n9)n6,'
'((i,j)n10,(k,l)n11)n7)n3,m)n1;'])
calc_bidi_minlevels(tree)
# tips should always be 1
self.assertSetEqual(set(x.minlevel for x in tree.tips()), {1})
# internal nodes
obs = {x.name: x.minlevel for x in tree.non_tips(include_self=True)}
exp = {'n1': 2, 'n2': 3, 'n3': 3, 'n4': 2, 'n5': 2, 'n6': 3, 'n7': 3,
'n8': 2, 'n9': 2, 'n10': 2, 'n11': 2}
self.assertDictEqual(obs, exp)
# a rooted tree (unusual but could happen)
tree = TreeNode.read(['(((a,b)n3,(c,d)n4)n2,e)n1;'])
calc_bidi_minlevels(tree)
obs = {x.name: x.minlevel for x in tree.non_tips(include_self=True)}
exp = {'n1': 2, 'n2': 2, 'n3': 2, 'n4': 2}
self.assertDictEqual(obs, exp)
def test_calc_bidi_mindepths(self):
tree = TreeNode.read(['(((a:0.5,b:0.7)n5:1.1,c:1.7)n2:0.3,((d:0.8,'
'e:0.6)n6:0.9,(f:1.2,g:0.5)n7:0.8)n3:1.3,'
'(h:0.4,i:0.3)n4:0.9)n1;'])
calc_bidi_mindepths(tree)
# tips should always be 1
self.assertSetEqual(set(x.mindepth for x in tree.tips()), {0.0})
# internal nodes
obs = {x.name: x.mindepth for x in tree.non_tips(include_self=True)}
exp = {'n1': 1.2, 'n2': 1.5, 'n3': 1.3, 'n4': 0.3, 'n5': 0.5,
'n6': 0.6, 'n7': 0.5}
self.assertDictEqual(obs, exp)
if __name__ == '__main__':
main()
| StarcoderdataPython |
9688495 | # -*- coding: utf-8 -*-
from setuptools import setup, Extension
setup(
name='extension_dist',
version='0.1',
description="A dummy distribution",
long_description="A distribution with an extension module.",
classifiers=[
"Topic :: Software Development :: Testing",
],
author='<NAME>',
author_email='<EMAIL>',
keywords='dummy testing',
packages=['extension_dist'],
ext_modules=[
Extension('extension_dist.test_ext', ['test_ext.c']),
],
eager_resources=[
'extension_dist/answer.dat',
],
package_data={
'extension_dist': ['*.dat'],
},
)
| StarcoderdataPython |
5168618 | import cv2
import keyboard
import numpy as np
import open3d as o3d
import pygame
from transforms3d.axangles import axangle2mat
import config
from capture import OpenCVCapture
from hand_mesh import HandMesh
from kinematics import mpii_to_mano
from utils import OneEuroFilter, imresize
from wrappers import ModelPipeline
from utils import *
def live_application(capture):
"""
Launch an application that reads from a webcam and estimates hand pose at
real-time.
The captured hand must be the right hand, but will be flipped internally
and rendered.
Parameters
----------
capture : object
An object from `capture.py` to read capture stream from.
"""
############ output visualization ############
view_mat = axangle2mat([1, 0, 0], np.pi) # align different coordinate systems
window_size = 1080
hand_mesh = HandMesh(config.HAND_MESH_MODEL_PATH)
mesh = o3d.geometry.TriangleMesh()
mesh.triangles = o3d.utility.Vector3iVector(hand_mesh.faces)
mesh.vertices = \
o3d.utility.Vector3dVector(np.matmul(view_mat, hand_mesh.verts.T).T * 1000)
mesh.compute_vertex_normals()
viewer = o3d.visualization.Visualizer()
viewer.create_window(
width=window_size + 1, height=window_size + 1,
window_name='Minimal Hand - output'
)
viewer.add_geometry(mesh)
view_control = viewer.get_view_control()
cam_params = view_control.convert_to_pinhole_camera_parameters()
extrinsic = cam_params.extrinsic.copy()
extrinsic[0:3, 3] = 0
cam_params.extrinsic = extrinsic
cam_params.intrinsic.set_intrinsics(
window_size + 1, window_size + 1, config.CAM_FX, config.CAM_FY,
window_size // 2, window_size // 2
)
view_control.convert_from_pinhole_camera_parameters(cam_params)
view_control.set_constant_z_far(1000)
render_option = viewer.get_render_option()
render_option.load_from_json('./render_option.json')
viewer.update_renderer()
############ input visualization ############
pygame.init()
display = pygame.display.set_mode((window_size, window_size))
pygame.display.set_caption('Minimal Hand - input')
############ misc ############
mesh_smoother = OneEuroFilter(4.0, 0.0)
clock = pygame.time.Clock()
model = ModelPipeline()
while True:
frame_large = capture.read()
if frame_large is None:
continue
if frame_large.shape[0] > frame_large.shape[1]:
margin = int((frame_large.shape[0] - frame_large.shape[1]) / 2)
frame_large = frame_large[margin:-margin]
else:
margin = int((frame_large.shape[1] - frame_large.shape[0]) / 2)
frame_large = frame_large[:, margin:-margin]
frame_large = np.flip(frame_large, axis=1).copy()
frame = imresize(frame_large, (128, 128))
_, theta_mpii = model.process(frame)
theta_mano = mpii_to_mano(theta_mpii)
v = hand_mesh.set_abs_quat(theta_mano)
v *= 2 # for better visualization
v = v * 1000 + np.array([0, 0, 400])
v = mesh_smoother.process(v)
mesh.triangles = o3d.utility.Vector3iVector(hand_mesh.faces)
mesh.vertices = o3d.utility.Vector3dVector(np.matmul(view_mat, v.T).T)
mesh.paint_uniform_color(config.HAND_COLOR)
mesh.compute_triangle_normals()
mesh.compute_vertex_normals()
viewer.update_geometry(mesh)
viewer.poll_events()
display.blit(
pygame.surfarray.make_surface(
np.transpose(
imresize(frame_large, (window_size, window_size)
), (1, 0, 2))
),
(0, 0)
)
pygame.display.update()
if keyboard.is_pressed("esc"):
break
clock.tick(30)
if __name__ == '__main__':
live_application(OpenCVCapture())
| StarcoderdataPython |
11226465 | from time import time
from tori.decorator.common import singleton
from tori.centre import settings as AppSettings
from tori.common import Enigma
@singleton
class GuidGenerator(object):
def generate(self):
key = '%s/%s' % (AppSettings['cookie_secret'], time())\
if 'cookie_secret' in AppSettings\
else str(time())
return Enigma.instance().hash(key)
| StarcoderdataPython |
8090292 | import scrapy
import time
class HoboSpider(scrapy.Spider):
name = "hobo"
start_urls = [
'https://www.hobo.nl/hi-fi.html',
'https://www.hobo.nl/streaming.html',
'https://www.hobo.nl/home-cinema-beeld.html',
'https://www.hobo.nl/luidsprekers.html',
'https://www.hobo.nl/hoofdtelefoons.html',
'https://www.hobo.nl/kabels.html',
'https://www.hobo.nl/accessoires.html'
]
def parse(self, response):
time.sleep(2)
next_page_url = response.css('a.next::attr(href)').extract()[0].encode('utf8')
for info in response.css('li.item'):
yield {
'product_name': info.css('p.brand-name::text').extract_first() + ' ' + info.css('h2.product-name::text').extract_first(),
'url': response.css('a.product-image::attr(href)').extract()[0],
'price': info.css('span.price::text').extract_first().strip()[2:]
}
if next_page_url is not None:
yield scrapy.Request(url= next_page_url, callback=self.parse)
| StarcoderdataPython |
1809358 | <reponame>nathanfdunn/ipymd<filename>ipymd/formats/atlas.py
# -*- coding: utf-8 -*-
"""Atlas readers and writers."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import re
from .markdown import BaseMarkdownReader, BaseMarkdownWriter
from ..ext.six.moves.html_parser import HTMLParser
from ..ext.six.moves.html_entities import name2codepoint
from ..utils.utils import _ensure_string
#------------------------------------------------------------------------------
# HTML utility functions
#------------------------------------------------------------------------------
class MyHTMLParser(HTMLParser):
def __init__(self, *args, **kwargs):
HTMLParser.__init__(self, *args, **kwargs)
self.is_code = False
self.is_math = False
self.display = ''
self.data = ''
def handle_starttag(self, tag, attrs):
if tag == 'pre' and ('data-type', 'programlisting') in attrs:
self.is_code = True
elif tag == 'span' and ('data-type', 'tex') in attrs:
self.is_math = True
if ('data-display', 'inline') in attrs:
self.display = 'inline'
elif ('data-display', 'block') in attrs:
self.display = 'block'
def handle_data(self, data):
if self.is_code:
self.data += data
elif self.is_math:
self.data += data
def _get_html_contents(html):
"""Process a HTML block and detects whether it is a code block,
a math block, or a regular HTML block."""
parser = MyHTMLParser()
parser.feed(html)
if parser.is_code:
return ('code', parser.data.strip())
elif parser.is_math:
return ('math', parser.data.strip())
else:
return '', ''
#------------------------------------------------------------------------------
# Atlas
#------------------------------------------------------------------------------
class AtlasReader(BaseMarkdownReader):
code_wrap = ('<pre data-code-language="{lang}"\n'
' data-executable="true"\n'
' data-type="programlisting">\n'
'{code}\n'
'</pre>')
math_wrap = '<span class="math-tex" data-type="tex">{equation}</span>'
# Utility methods
# -------------------------------------------------------------------------
def _remove_math_span(self, source):
# Remove any <span> equation tag that would be in a Markdown cell.
source = source.replace('<span class="math-tex" data-type="tex">', '')
source = source.replace('</span>', '')
return source
# Parser
# -------------------------------------------------------------------------
def parse_fences(self, m):
return self._markdown_cell_from_regex(m)
def parse_block_code(self, m):
return self._markdown_cell_from_regex(m)
def parse_block_html(self, m):
text = m.group(0).strip()
type, contents = _get_html_contents(text)
if type == 'code':
return self._code_cell(contents)
elif type == 'math':
return self._markdown_cell(contents)
else:
return self._markdown_cell(text)
def parse_text(self, m):
text = m.group(0).strip()
if (text.startswith('<span class="math-tex"') and
text.endswith('</span>')):
# Replace '\\(' by '$$' in the notebook.
text = text.replace('\\\\(', '$$')
text = text.replace('\\\\)', '$$')
text = text.strip()
else:
# Process math equations.
text = text.replace('\\\\(', '$')
text = text.replace('\\\\)', '$')
# Remove the math <span>.
text = self._remove_math_span(text)
# Add the processed Markdown cell.
return self._markdown_cell(text.strip())
class AtlasWriter(BaseMarkdownWriter):
_math_regex = '''(?P<dollars>[\$]{1,2})([^\$]+)(?P=dollars)'''
def append_markdown(self, source, metadata=None):
source = _ensure_string(source)
# Wrap math equations.
source = re.sub(self._math_regex,
AtlasReader.math_wrap.format(equation=r'\\\\(\2\\\\)'),
source)
# Write the processed Markdown.
self._output.write(source.rstrip())
def append_code(self, input, output=None, metadata=None):
# Wrap code.
wrapped = AtlasReader.code_wrap.format(lang='python', code=input)
# Write the HTML code block.
self._output.write(wrapped)
ATLAS_FORMAT = dict(
reader=AtlasReader,
writer=AtlasWriter,
file_extension='.md',
file_type='text',
)
| StarcoderdataPython |
3378445 | <gh_stars>10-100
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""This module contains user-facing synchronous clients for the
Azure IoTHub Device SDK for Python.
"""
import logging
import threading
from .abstract_clients import (
AbstractIoTHubClient,
AbstractIoTHubDeviceClient,
AbstractIoTHubModuleClient,
)
from .models import Message
from .inbox_manager import InboxManager
from .sync_inbox import SyncClientInbox
from .pipeline import constant
logger = logging.getLogger(__name__)
class GenericIoTHubClient(AbstractIoTHubClient):
"""A superclass representing a generic synchronous client.
This class needs to be extended for specific clients.
"""
def __init__(self, **kwargs):
"""Initializer for a generic synchronous client.
This initializer should not be called directly.
Instead, use one of the 'create_from_' classmethods to instantiate
TODO: How to document kwargs?
Possible values: iothub_pipeline, edge_pipeline
"""
# Depending on the subclass calling this __init__, there could be different arguments,
# and the super() call could call a different class, due to the different MROs
# in the class hierarchies of different clients. Thus, args here must be passed along as
# **kwargs.
super(GenericIoTHubClient, self).__init__(**kwargs)
self._inbox_manager = InboxManager(inbox_type=SyncClientInbox)
self._iothub_pipeline.on_connected = self._on_connected
self._iothub_pipeline.on_disconnected = self._on_disconnected
self._iothub_pipeline.on_method_request_received = self._inbox_manager.route_method_request
self._iothub_pipeline.on_twin_patch_received = self._inbox_manager.route_twin_patch
def _on_connected(self):
"""Helper handler that is called upon an iothub pipeline connect"""
logger.info("Connection State - Connected")
def _on_disconnected(self):
"""Helper handler that is called upon an iothub pipeline disconnect"""
logger.info("Connection State - Disconnected")
self._inbox_manager.clear_all_method_requests()
logger.info("Cleared all pending method requests due to disconnect")
def connect(self):
"""Connects the client to an Azure IoT Hub or Azure IoT Edge Hub instance.
The destination is chosen based on the credentials passed via the auth_provider parameter
that was provided when this object was initialized.
This is a synchronous call, meaning that this function will not return until the connection
to the service has been completely established.
"""
logger.info("Connecting to Hub...")
connect_complete = threading.Event()
def callback():
connect_complete.set()
logger.info("Successfully connected to Hub")
self._iothub_pipeline.connect(callback=callback)
connect_complete.wait()
def disconnect(self):
"""Disconnect the client from the Azure IoT Hub or Azure IoT Edge Hub instance.
This is a synchronous call, meaning that this function will not return until the connection
to the service has been completely closed.
"""
logger.info("Disconnecting from Hub...")
disconnect_complete = threading.Event()
def callback():
disconnect_complete.set()
logger.info("Successfully disconnected from Hub")
self._iothub_pipeline.disconnect(callback=callback)
disconnect_complete.wait()
def send_message(self, message):
"""Sends a message to the default events endpoint on the Azure IoT Hub or Azure IoT Edge Hub instance.
This is a synchronous event, meaning that this function will not return until the event
has been sent to the service and the service has acknowledged receipt of the event.
If the connection to the service has not previously been opened by a call to connect, this
function will open the connection before sending the event.
:param message: The actual message to send. Anything passed that is not an instance of the
Message class will be converted to Message object.
"""
if not isinstance(message, Message):
message = Message(message)
logger.info("Sending message to Hub...")
send_complete = threading.Event()
def callback():
send_complete.set()
logger.info("Successfully sent message to Hub")
self._iothub_pipeline.send_message(message, callback=callback)
send_complete.wait()
def receive_method_request(self, method_name=None, block=True, timeout=None):
"""Receive a method request via the Azure IoT Hub or Azure IoT Edge Hub.
:param str method_name: Optionally provide the name of the method to receive requests for.
If this parameter is not given, all methods not already being specifically targeted by
a different request to receive_method will be received.
:param bool block: Indicates if the operation should block until a request is received.
Default True.
:param int timeout: Optionally provide a number of seconds until blocking times out.
:raises: InboxEmpty if timeout occurs on a blocking operation.
:raises: InboxEmpty if no request is available on a non-blocking operation.
:returns: MethodRequest object representing the received method request.
"""
if not self._iothub_pipeline.feature_enabled[constant.METHODS]:
self._enable_feature(constant.METHODS)
method_inbox = self._inbox_manager.get_method_request_inbox(method_name)
logger.info("Waiting for method request...")
method_request = method_inbox.get(block=block, timeout=timeout)
logger.info("Received method request")
return method_request
def send_method_response(self, method_response):
"""Send a response to a method request via the Azure IoT Hub or Azure IoT Edge Hub.
This is a synchronous event, meaning that this function will not return until the event
has been sent to the service and the service has acknowledged receipt of the event.
If the connection to the service has not previously been opened by a call to connect, this
function will open the connection before sending the event.
:param method_response: The MethodResponse to send.
:type method_response: MethodResponse
"""
logger.info("Sending method response to Hub...")
send_complete = threading.Event()
def callback():
send_complete.set()
logger.info("Successfully sent method response to Hub")
self._iothub_pipeline.send_method_response(method_response, callback=callback)
send_complete.wait()
def _enable_feature(self, feature_name):
"""Enable an Azure IoT Hub feature.
This is a synchronous call, meaning that this function will not return until the feature
has been enabled.
:param feature_name: The name of the feature to enable.
See azure.iot.device.common.pipeline.constant for possible values
"""
logger.info("Enabling feature:" + feature_name + "...")
enable_complete = threading.Event()
def callback():
enable_complete.set()
logger.info("Successfully enabled feature:" + feature_name)
self._iothub_pipeline.enable_feature(feature_name, callback=callback)
enable_complete.wait()
def get_twin(self):
"""
Gets the device or module twin from the Azure IoT Hub or Azure IoT Edge Hub service.
This is a synchronous call, meaning that this function will not return until the twin
has been retrieved from the service.
:returns: Twin object which was retrieved from the hub
"""
if not self._iothub_pipeline.feature_enabled[constant.TWIN]:
self._enable_feature(constant.TWIN)
# hack to work aroud lack of the "nonlocal" keyword in 2.7. The non-local "context"
# object can be read and modified inside the inner function.
# (https://stackoverflow.com/a/28433571)
class context:
twin = None
op_complete = threading.Event()
def on_pipeline_op_complete(retrieved_twin):
context.twin = retrieved_twin
op_complete.set()
self._iothub_pipeline.get_twin(callback=on_pipeline_op_complete)
op_complete.wait()
return context.twin
def patch_twin_reported_properties(self, reported_properties_patch):
"""
Update reported properties with the Azure IoT Hub or Azure IoT Edge Hub service.
This is a synchronous call, meaning that this function will not return until the patch
has been sent to the service and acknowledged.
If the service returns an error on the patch operation, this function will raise the
appropriate error.
:param reported_properties_patch:
:type reported_properties_patch: dict, str, int, float, bool, or None (JSON compatible values)
"""
if not self._iothub_pipeline.feature_enabled[constant.TWIN]:
self._enable_feature(constant.TWIN)
op_complete = threading.Event()
def on_pipeline_op_complete():
op_complete.set()
self._iothub_pipeline.patch_twin_reported_properties(
patch=reported_properties_patch, callback=on_pipeline_op_complete
)
op_complete.wait()
print("Done with patch")
def receive_twin_desired_properties_patch(self, block=True, timeout=None):
"""
Receive a desired property patch via the Azure IoT Hub or Azure IoT Edge Hub.
This is a synchronous call, which means the following:
1. If block=True, this function will block until one of the following happens:
* a desired proprety patch is received from the Azure IoT Hub or Azure IoT Edge Hub.
* the timeout period, if provided, elapses. If a timeout happens, this function will
raise a InboxEmpty exception
2. If block=False, this function will return any desired property patches which may have
been received by the pipeline, but not yet returned to the application. If no
desired property patches have been received by the pipeline, this function will raise
an InboxEmpty exception
:param bool block: Indicates if the operation should block until a request is received.
Default True.
:param int timeout: Optionally provide a number of seconds until blocking times out.
:raises: InboxEmpty if timeout occurs on a blocking operation.
:raises: InboxEmpty if no request is available on a non-blocking operation.
:returns: desired property patch. This can be dict, str, int, float, bool, or None (JSON compatible values)
"""
if not self._iothub_pipeline.feature_enabled[constant.TWIN_PATCHES]:
self._enable_feature(constant.TWIN_PATCHES)
twin_patch_inbox = self._inbox_manager.get_twin_patch_inbox()
logger.info("Waiting for twin patches...")
patch = twin_patch_inbox.get(block=block, timeout=timeout)
logger.info("twin patch received")
return patch
class IoTHubDeviceClient(GenericIoTHubClient, AbstractIoTHubDeviceClient):
"""A synchronous device client that connects to an Azure IoT Hub instance.
Intended for usage with Python 2.7 or compatibility scenarios for Python 3.5.3+.
"""
def __init__(self, iothub_pipeline):
"""Initializer for a IoTHubDeviceClient.
This initializer should not be called directly.
Instead, use one of the 'create_from_' classmethods to instantiate
:param iothub_pipeline: The pipeline used to connect to the IoTHub endpoint.
:type iothub_pipeline: IoTHubPipeline
"""
super(IoTHubDeviceClient, self).__init__(iothub_pipeline=iothub_pipeline)
self._iothub_pipeline.on_c2d_message_received = self._inbox_manager.route_c2d_message
def receive_message(self, block=True, timeout=None):
"""Receive a message that has been sent from the Azure IoT Hub.
:param bool block: Indicates if the operation should block until a message is received.
Default True.
:param int timeout: Optionally provide a number of seconds until blocking times out.
:raises: InboxEmpty if timeout occurs on a blocking operation.
:raises: InboxEmpty if no message is available on a non-blocking operation.
:returns: Message that was sent from the Azure IoT Hub.
"""
if not self._iothub_pipeline.feature_enabled[constant.C2D_MSG]:
self._enable_feature(constant.C2D_MSG)
c2d_inbox = self._inbox_manager.get_c2d_message_inbox()
logger.info("Waiting for message from Hub...")
message = c2d_inbox.get(block=block, timeout=timeout)
logger.info("Message received")
return message
class IoTHubModuleClient(GenericIoTHubClient, AbstractIoTHubModuleClient):
"""A synchronous module client that connects to an Azure IoT Hub or Azure IoT Edge instance.
Intended for usage with Python 2.7 or compatibility scenarios for Python 3.5.3+.
"""
def __init__(self, iothub_pipeline, edge_pipeline=None):
"""Intializer for a IoTHubModuleClient.
This initializer should not be called directly.
Instead, use one of the 'create_from_' classmethods to instantiate
:param iothub_pipeline: The pipeline used to connect to the IoTHub endpoint.
:type iothub_pipeline: IoTHubPipeline
:param edge_pipeline: (OPTIONAL) The pipeline used to connect to the Edge endpoint.
:type edge_pipeline: EdgePipeline
"""
super(IoTHubModuleClient, self).__init__(
iothub_pipeline=iothub_pipeline, edge_pipeline=edge_pipeline
)
self._iothub_pipeline.on_input_message_received = self._inbox_manager.route_input_message
def send_message_to_output(self, message, output_name):
"""Sends an event/message to the given module output.
These are outgoing events and are meant to be "output events".
This is a synchronous event, meaning that this function will not return until the event
has been sent to the service and the service has acknowledged receipt of the event.
If the connection to the service has not previously been opened by a call to connect, this
function will open the connection before sending the event.
:param message: message to send to the given output. Anything passed that is not an instance of the
Message class will be converted to Message object.
:param output_name: Name of the output to send the event to.
"""
if not isinstance(message, Message):
message = Message(message)
message.output_name = output_name
logger.info("Sending message to output:" + output_name + "...")
send_complete = threading.Event()
def callback():
logger.info("Successfully sent message to output: " + output_name)
send_complete.set()
self._iothub_pipeline.send_output_event(message, callback=callback)
send_complete.wait()
def receive_message_on_input(self, input_name, block=True, timeout=None):
"""Receive an input message that has been sent from another Module to a specific input.
:param str input_name: The input name to receive a message on.
:param bool block: Indicates if the operation should block until a message is received.
Default True.
:param int timeout: Optionally provide a number of seconds until blocking times out.
:raises: InboxEmpty if timeout occurs on a blocking operation.
:raises: InboxEmpty if no message is available on a non-blocking operation.
:returns: Message that was sent to the specified input.
"""
if not self._iothub_pipeline.feature_enabled[constant.INPUT_MSG]:
self._enable_feature(constant.INPUT_MSG)
input_inbox = self._inbox_manager.get_input_message_inbox(input_name)
logger.info("Waiting for input message on: " + input_name + "...")
message = input_inbox.get(block=block, timeout=timeout)
logger.info("Input message received on: " + input_name)
return message
| StarcoderdataPython |
6520825 | #!/usr/bin/python
from auvlib.data_tools import std_data, gsf_data, utils
from auvlib.bathy_maps import mesh_map
import sys
import os
import numpy as np
import torch
from matplotlib import pyplot as plt
import cv2 # needed for resizing
from auvlib.bathy_maps.gen_utils import clip_to_interval
def predict_sidescan(network, image_input):
image_input = image_input.transpose(-1, 0, 1) # we have to change the dimensions from width x height x channel (WHC) to channel x width x height (CWH)
image_input = image_input[np.newaxis, :] # reshape([1] + image_input.shape)
image_input = 2.*(1./255.*image_input.astype(np.float32))-1.
#image_input = 1./255.*image_input.astype(np.float32)
real_B = torch.from_numpy(image_input).cuda() # create the image tensor
fake_A = network(real_B) # G(A)
image_numpy = fake_A[0].cpu().detach().float().numpy() # convert it into a numpy array
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
image_numpy = image_numpy.astype(np.uint8)
return image_numpy
def load_network(network_path):
from models import networks # from the models directory of https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
netG = networks.define_G(3, 3, 64, 'unet_256', 'batch', True, 'normal', 0.02, [0])
if isinstance(netG, torch.nn.DataParallel):
print("Netg is parallell instance")
netG = netG.module
state_dict = torch.load(network_path, map_location="cuda")
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
netG.load_state_dict(state_dict)
return netG
def generate_sss_map(height_map, network_path):
network = load_network(network_path)
rows, cols = height_map.shape
sss_gen_map = np.zeros((rows, cols, 3), dtype=np.uint8)
interval = 2.
height, width = 256, 256
for i in xrange(0, rows-20, 20):
for j in xrange(0, cols-20, 20):
height_patch = height_map[i:i+20, j:j+20]
height_patch = clip_to_interval(height_patch, interval)
height_patch = cv2.resize(height_patch, (width, height), interpolation = cv2.INTER_CUBIC)
height_patch = (255./interval*height_patch).astype(np.uint8)
height_image = cv2.applyColorMap(height_patch, cv2.COLORMAP_JET)
generated = predict_sidescan(network, height_image)
generated = cv2.resize(generated, dsize=(20, 20), interpolation=cv2.INTER_LINEAR)
sss_gen_map[i:i+20, j:j+20, :] = generated
return sss_gen_map.transpose(1, 0, 2)
def generate_or_load_gen_sss_map(height_map, network_path):
if os.path.exists("gen_sss_map.jpg"):
gen_sss_map = cv2.imread("gen_sss_map.jpg")
else:
gen_sss_map = generate_sss_map(height_map, network_path)
cv2.imwrite("gen_sss_map.jpg", gen_sss_map)
return gen_sss_map
network_path = sys.argv[2] #"/home/nbore/Installs/pytorch-CycleGAN-and-pix2pix/datasets/checkpoints/pix2pix_sonar24/latest_net_G.pth"
gsf_pings = utils.parse_or_load_gsf(sys.argv[1])
mbes_pings = gsf_data.convert_pings(gsf_pings)
V, F, bounds = mesh_map.mesh_from_pings(mbes_pings, 0.5)
resolution = (bounds[1, 0] - bounds[0, 0])/1333.
print "Resolution: ", resolution
height_map, bounds = mesh_map.height_map_from_pings(mbes_pings, resolution)
sss_gen_map = generate_or_load_gen_sss_map(height_map, network_path)
R, G, B = sss_gen_map[:, :, 0], sss_gen_map[:, :, 1], sss_gen_map[:, :, 2]
mesh_map.show_textured_mesh(V, F, R, G, B, bounds)
| StarcoderdataPython |
154984 | <filename>scicite/compute_features.py
""" Module for computing features """
import re
from collections import Counter, defaultdict
from typing import List, Optional, Tuple, Type
import functools
from spacy.tokens.token import Token as SpacyToken
import scicite.constants as constants
from scicite.constants import CITATION_TOKEN
from scicite.resources.lexicons import (AGENT_PATTERNS, ALL_ACTION_LEXICONS,
ALL_CONCEPT_LEXICONS, FORMULAIC_PATTERNS)
from scicite.data import Citation
import logging
logger = logging.getLogger('classifier')
NETWORK_WEIGHTS_FILE = constants.root_path + '/resources/arc-network-weights.tsv'
def load_patterns(filename, p_dict, label):
with open(filename) as f:
class_counts = Counter()
for line in f:
if not '@' in line:
continue
cols = line.split("\t")
pattern = cols[0].replace("-lrb-", "(").replace('-rrb-', ')')
category = cols[1]
if category == 'Background':
continue
class_counts[category] += 1
p_dict[category + '_' + label + '_' + str(class_counts[category])] \
= pattern.split()
# p_dict[clazz + '_' + label].append(pattern.split())
def get_values_from_list(inplst, key, is_class=True):
""" gets a value of an obj for a list of dicts (inplst)
Args:
inplst: list of objects
key: key of interest
is_class: ist the input object a class or a dictionary obj
"""
return [getattr(elem, key) for elem in inplst] if is_class \
else [elem[key] for elem in inplst]
def is_in_lexicon(lexicon: dict,
sentence: str,
count: Optional[bool] = False) -> Tuple[List[str], List[str]]:
""" checks if the words in a lexicon exist in the sentence """
features = []
feature_names = []
if count:
cnt = 0
for key, word_list in lexicon.items():
exists = False
for word in word_list:
if word in sentence:
if not count:
exists = True
break
else:
cnt += 1
if not count:
features.append(exists)
else:
features.append(cnt)
feature_names.append(key)
return features, feature_names
| StarcoderdataPython |
9603564 | class Solution:
"""
@param n: An integer
@param nums: An array
@return: the Kth largest element
"""
def kthLargestElement(self, n, nums):
if not nums or n < 1 or n > len(nums):
return None
return self.partition(nums, 0, len(nums) - 1, len(nums) - n)
def partition(self, nums, start, end, k):
# during the process, it is guaranteed start <= k <= end
if start == end:
return nums[k]
left, right = start, end
pivot = nums[(left + right) // 2]
while left <= right:
while left <= right and nums[left] < pivot:
left += 1
while left <= right and nums[right] > pivot:
right -= 1
if left <= right:
nums[left], nums[right] = nums[right], nums[left]
left += 1
right -= 1
if k <= right:
return self.partition(nums, start, right, k)
if k >= left:
return self.partition(nums, left, end, k)
return nums[k] | StarcoderdataPython |
152291 | <reponame>Dmarch28/khmer
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2016, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: <EMAIL>
# pylint: disable=missing-docstring
import os
import khmer
import screed
from . import khmer_tst_utils as utils
from .test_scripts import _make_counting
def test_filter_abund_1():
script = 'filter-abund.py'
infile = utils.copy_test_data('test-abund-read-2.fa')
n_infile = utils.copy_test_data('test-fastq-n-reads.fq')
in_dir = os.path.dirname(infile)
n_in_dir = os.path.dirname(n_infile)
counting_ht = _make_counting(infile, K=17)
n_counting_ht = _make_counting(n_infile, K=17)
args = [counting_ht, infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
n_outfile = n_infile + '.abundfilt'
n_outfile2 = n_infile + '2.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 1, seqs
assert 'GGTTGACGGGGCTCAGGG' in seqs
args = [n_counting_ht, n_infile]
utils.runscript(script, args, n_in_dir)
seqs = set([r.sequence for r in screed.open(n_infile)])
assert os.path.exists(n_outfile), n_outfile
args = [n_counting_ht, n_infile, '-o', n_outfile2]
utils.runscript(script, args, in_dir)
assert os.path.exists(n_outfile2), n_outfile2
def test_filter_abund_2():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
counting_ht = _make_counting(infile, K=17)
script = 'filter-abund.py'
args = ['-C', '1', counting_ht, infile, infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 2, seqs
assert 'GGTTGACGGGGCTCAGGG' in seqs
def test_filter_abund_2_stdin():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
counting_ht = _make_counting(infile, K=17)
script = 'filter-abund.py'
args = ['-C', '1', counting_ht, '-']
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status == 1
assert "Accepting input from stdin; output filename must be provided" \
in str(err)
def test_filter_abund_2_stdin_gzip_out():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
outfile = utils.get_temp_filename('out.fa.gz')
counting_ht = _make_counting(infile, K=17)
script = 'filter-abund.py'
args = ['-C', '1', counting_ht, infile, '-o', outfile, '--gzip']
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
print(out)
print(err)
assert status == 0
# make sure that FASTQ records are retained.
def test_filter_abund_3_fq_retained():
infile = utils.copy_test_data('test-abund-read-2.fq')
in_dir = os.path.dirname(infile)
counting_ht = _make_counting(infile, K=17)
script = 'filter-abund.py'
args = ['-C', '1', counting_ht, infile, infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 2, seqs
assert 'GGTTGACGGGGCTCAGGG' in seqs
# check for 'quality' string.
quals = set([r.quality for r in screed.open(outfile)])
assert len(quals) == 2, quals
assert '##################' in quals
# make sure that FASTQ names are properly parsed, both formats.
def test_filter_abund_4_fq_casava_18():
infile = utils.copy_test_data('test-abund-read-2.paired2.fq')
in_dir = os.path.dirname(infile)
counting_ht = _make_counting(infile, K=17)
script = 'filter-abund.py'
args = [counting_ht, infile, infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.name for r in screed.open(outfile)])
assert 'pair:foo 1::N' in seqs, seqs
def test_filter_abund_1_singlefile():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
script = 'filter-abund-single.py'
args = ['-x', '1e7', '-N', '2', '-k', '17', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 98' in err, err
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 1, seqs
assert 'GGTTGACGGGGCTCAGGG' in seqs
def test_filter_abund_1_singlefile_long_k():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
script = 'filter-abund-single.py'
args = ['-x', '1e7', '-N', '2', '-k', '35', '-H', 'murmur', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 80' in err, err
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 0
def test_filter_abund_1_singlefile_long_k_nosave():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
script = 'filter-abund-single.py'
args = ['-x', '1e7', '-N', '2', '-k', '35', '-H', 'murmur', infile,
'--savegraph', 'foo']
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
print(out)
print(err)
assert status == 1
assert 'ERROR: cannot save different hash functions yet.' in err
def test_filter_abund_2_singlefile():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
tabfile = utils.get_temp_filename('test-savegraph.ct')
script = 'filter-abund-single.py'
args = ['-x', '1e7', '-N', '2', '-k', '17', '--savegraph',
tabfile, infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 98' in err, err
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 1, seqs
assert 'GGTTGACGGGGCTCAGGG' in seqs
def test_filter_abund_2_singlefile_fq_casava_18():
infile = utils.copy_test_data('test-abund-read-2.paired2.fq')
in_dir = os.path.dirname(infile)
script = 'filter-abund-single.py'
args = ['-x', '1e7', '-N', '2', '-k', '17', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.name for r in screed.open(outfile)])
assert 'pair:foo 1::N' in seqs, seqs
def test_filter_abund_4_retain_low_abund():
# test that the -V option does not trim sequences that are low abundance
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
counting_ht = _make_counting(infile, K=17)
script, args = ('filter-abund.py', ['-V', counting_ht, infile])
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 2, seqs
assert 'GGTTGACGGGGCTCAGGG' in seqs
def test_filter_abund_single_4_retain_low_abund():
# test that the -V option does not trim sequences that are low abundance
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
counting_ht = _make_counting(infile, K=17)
script, args = ('filter-abund-single.py', ['-k', '17', '-V', infile])
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 2, seqs
assert 'GGTTGACGGGGCTCAGGG' in seqs
def test_filter_abund_5_trim_high_abund():
# test that the -V option *does* trim sequences that are high abundance
infile = utils.copy_test_data('test-abund-read-3.fa')
in_dir = os.path.dirname(infile)
counting_ht = _make_counting(infile, K=17)
script, args = ('filter-abund.py', ['-V', counting_ht, infile])
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 2, seqs
# trimmed sequence @ error
assert 'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAGAGACAGC' in seqs
def test_filter_abund_single_trim_high_abund():
# test that the -V option *does* trim sequences that are high abundance
infile = utils.copy_test_data('test-abund-read-3.fa')
in_dir = os.path.dirname(infile)
script, args = ('filter-abund-single.py', ['-k', '17', '-V', infile])
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 2, seqs
# trimmed sequence @ error
assert 'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAGAGACAGC' in seqs
def test_filter_abund_6_trim_high_abund_Z():
# test that -V/-Z settings interact properly -
# trimming should not happen if -Z is set high enough.
infile = utils.copy_test_data('test-abund-read-3.fa')
in_dir = os.path.dirname(infile)
counting_ht = _make_counting(infile, K=17)
for script, args in (('filter-abund.py',
['-V', '-Z', '25', counting_ht, infile]),
('filter-abund-single.py',
['-k', '17', '-V', '-Z', '25', infile])):
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 2, seqs
# untrimmed seq.
badseq = 'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAGAGACAGCgtgCCGCAGCTG' \
'TCGTCAGGGGATTTCCGGGCGG'
assert badseq in seqs # should be there, untrimmed
def test_filter_abund_7_retain_Ns():
# check that filter-abund retains sequences with Ns, and treats them as As.
infile = utils.copy_test_data('test-filter-abund-Ns.fq')
in_dir = os.path.dirname(infile)
# copy test file over to test.fq & load into countgraph
counting_ht = _make_counting(infile, K=17)
script = 'filter-abund.py'
args = ['-C', '3', counting_ht, infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
# test for a sequence with an 'N' in it --
names = set([r.name for r in screed.open(outfile)])
assert '895:1:37:17593:9954 1::FOO_withN' in names, names
# check to see if that 'N' was properly changed to an 'A'
seqs = set([r.sequence for r in screed.open(outfile)])
assert 'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAG' not in seqs, seqs
# ...and that an 'N' remains in the output sequences
found_N = False
for s in seqs:
if 'N' in s:
found_N = True
assert found_N, seqs
def test_filter_abund_single_8_retain_Ns():
# check that filter-abund-single retains
# sequences with Ns, and treats them as As.
infile = utils.copy_test_data('test-filter-abund-Ns.fq')
in_dir = os.path.dirname(infile)
script = 'filter-abund-single.py'
args = ['-k', '17', '-x', '1e7', '-N', '2', '-C', '3', infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
# test for a sequence with an 'N' in it --
names = set([r.name for r in screed.open(outfile)])
assert '895:1:37:17593:9954 1::FOO_withN' in names, names
# check to see if that 'N' was properly changed to an 'A'
seqs = set([r.sequence for r in screed.open(outfile)])
assert 'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAG' not in seqs, seqs
# ...and that an 'N' remains in the output sequences
found_N = False
for s in seqs:
if 'N' in s:
found_N = True
assert found_N, seqs
def test_outfile():
infile = utils.get_test_data('paired-mixed-witherror.fa.pe')
outfile = utils.get_temp_filename('paired-mixed-witherror.fa.pe.abundfilt')
script = 'filter-abund-single.py'
args = ['-o', outfile, infile]
(status, out, err) = utils.runscript(script, args)
md5hash = utils._calc_md5(open(outfile, 'rb'))
assert md5hash == 'f17122f4c0c3dc0bcc4eeb375de93040', md5hash
def test_filter_abund_1_quiet():
script = 'filter-abund.py'
infile = utils.copy_test_data('test-abund-read-2.fa')
n_infile = utils.copy_test_data('test-fastq-n-reads.fq')
in_dir = os.path.dirname(infile)
n_in_dir = os.path.dirname(n_infile)
counting_ht = _make_counting(infile, K=17)
n_counting_ht = _make_counting(n_infile, K=17)
args = ['-q', counting_ht, infile]
status, out, err = utils.runscript(script, args, in_dir)
assert len(err) == 0
assert len(out) < 1000
outfile = infile + '.abundfilt'
n_outfile = n_infile + '.abundfilt'
n_outfile2 = n_infile + '2.abundfilt'
assert os.path.exists(outfile), outfile
def test_filter_abund_1_singlefile_quiet():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
script = 'filter-abund-single.py'
args = ['-q', '-x', '1e7', '-N', '2', '-k', '17', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert len(err) == 0
assert len(out) < 1000
outfile = infile + '.abundfilt'
assert os.path.exists(outfile), outfile
| StarcoderdataPython |
284348 | <reponame>CFMTech/monitor-server-api
# SPDX-FileCopyrightText: 2021 <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: MIT
def test_list_head_resources_on_all_metrics(monitor, gen):
# Lets generate 200 metrics
c, s = gen.new_context(), gen.new_session()
for i in range(200):
mem = abs(100 - i) * 100
m = gen.new_metric(c, s, item='this_item', mem_usage=mem)
monitor.post_metrics_v1(m)
resp = monitor.client.get('/api/v1/resources/memory/head/15/metrics')
jdata = resp.json
assert 'metrics' in jdata
assert jdata['metrics']
# Extract memory_usage and challenge against expected
memory_use = sorted([int(metric['mem_usage']) for metric in jdata['metrics']], reverse=True)
assert memory_use == [10000, 9900, 9900, 9800, 9800,
9700, 9700, 9600, 9600, 9500,
9500, 9400, 9400, 9300, 9300]
def test_list_tail_memory_on_all_metrics(monitor, gen):
# Lets generate 200 metrics
c, s = gen.new_context(), gen.new_session()
for i in range(200):
mem = abs(100 - i) * 100
m = gen.new_metric(c, s, item='this_item', mem_usage=mem)
monitor.post_metrics_v1(m)
resp = monitor.client.get('/api/v1/resources/memory/tail/15/metrics')
jdata = resp.json
assert 'metrics' in jdata
assert jdata['metrics']
# Extract memory_usage and challenge against expected
memory_use = sorted([int(metric['mem_usage']) for metric in jdata['metrics']], reverse=True)
assert memory_use == [700, 700, 600, 600, 500,
500, 400, 400, 300, 300,
200, 200, 100, 100, 0]
def test_list_head_memory_on_components(monitor, gen):
# Lets generate 200 metrics
c, s = gen.new_context(), gen.new_session()
for i in range(100):
mem = i * 100
m = gen.new_metric(c, s, component="compA", item='this_item', mem_usage=mem)
monitor.post_metrics_v1(m)
for i in range(100):
mem = i
m = gen.new_metric(c, s, component="compB", item='this_item', mem_usage=mem)
monitor.post_metrics_v1(m)
resp = monitor.client.get('/api/v1/resources/memory/components/compB/head/5/metrics')
jdata = resp.json
assert 'metrics' in jdata
assert jdata['metrics']
# Extract memory_usage and challenge against expected
memory_use = sorted([int(metric['mem_usage']) for metric in jdata['metrics']], reverse=True)
assert memory_use == [99, 98, 97, 96, 95]
for m in jdata["metrics"]:
assert m['component'] == 'compB'
def test_list_tail_memory_on_components(monitor, gen):
# Lets generate 200 metrics
c, s = gen.new_context(), gen.new_session()
for i in range(100):
mem = (i + 10) * 100
m = gen.new_metric(c, s, component="compA", item='this_item', mem_usage=mem)
monitor.post_metrics_v1(m)
for i in range(100):
mem = i
m = gen.new_metric(c, s, component="compB", item='this_item', mem_usage=mem)
monitor.post_metrics_v1(m)
resp = monitor.client.get('/api/v1/resources/memory/components/compB/tail/5/metrics')
jdata = resp.json
assert 'metrics' in jdata
assert jdata['metrics']
# Extract memory_usage and challenge against expected
memory_use = sorted([int(metric['mem_usage']) for metric in jdata['metrics']], reverse=True)
assert memory_use == [4, 3, 2, 1, 0]
for m in jdata["metrics"]:
assert m['component'] == 'compB'
def test_list_head_memory_on_pipeline(monitor, gen):
# Lets generate 200 metrics
c = gen.new_context()
s1, s2 = gen.new_session(pipeline_branch="pipeline1"), gen.new_session(pipeline_branch="pipeline2")
monitor.post_sessions_v1(s1, s2)
for i in range(100):
mem = (i + 101) * 100
m = gen.new_metric(c, s1, mem_usage=mem)
monitor.post_metrics_v1(m)
for i in range(100):
mem = i
m = gen.new_metric(c, s2, mem_usage=mem)
monitor.post_metrics_v1(m)
resp = monitor.client.get('/api/v1/resources/memory/pipelines/pipeline2/head/5/metrics')
jdata = resp.json
assert 'metrics' in jdata
assert jdata['metrics']
# Extract memory_usage and challenge against expected
memory_use = sorted([int(metric['mem_usage']) for metric in jdata['metrics']], reverse=True)
assert memory_use == [99, 98, 97, 96, 95]
for m in jdata["metrics"]:
assert m["session_h"] == s2["session_h"]
def test_list_tail_memory_on_pipeline(monitor, gen):
# Lets generate 200 metrics
c = gen.new_context()
s1, s2 = gen.new_session(pipeline_branch="pipeline1"), gen.new_session(pipeline_branch="pipeline2")
monitor.post_sessions_v1(s1, s2)
for i in range(100):
mem = (i + 101) * 100
m = gen.new_metric(c, s1, mem_usage=mem)
monitor.post_metrics_v1(m)
for i in range(100):
mem = i
m = gen.new_metric(c, s2, mem_usage=mem)
monitor.post_metrics_v1(m)
resp = monitor.client.get('/api/v1/resources/memory/pipelines/pipeline2/tail/5/metrics')
jdata = resp.json
assert 'metrics' in jdata
assert jdata['metrics']
# Extract memory_usage and challenge against expected
memory_use = sorted([int(metric['mem_usage']) for metric in jdata['metrics']], reverse=True)
assert memory_use == [4, 3, 2, 1, 0]
for m in jdata["metrics"]:
assert m["session_h"] == s2["session_h"]
def test_list_head_memory_of_build(monitor, gen):
# Lets generate 200 metrics
c = gen.new_context()
s1 = gen.new_session(pipeline_branch="pipeline1", pipeline_build_no="1")
s2 = gen.new_session(pipeline_branch="pipeline1", pipeline_build_no="2")
monitor.post_sessions_v1(s1, s2)
for i in range(100):
mem = (i + 101) * 100
m = gen.new_metric(c, s1, mem_usage=mem)
monitor.post_metrics_v1(m)
for i in range(100):
mem = i
m = gen.new_metric(c, s2, mem_usage=mem)
monitor.post_metrics_v1(m)
resp = monitor.client.get('/api/v1/resources/memory/pipelines/pipeline1/builds/2/head/5/metrics')
jdata = resp.json
assert 'metrics' in jdata
assert jdata['metrics']
# Extract memory_usage and challenge against expected
memory_use = sorted([int(metric['mem_usage']) for metric in jdata['metrics']], reverse=True)
assert memory_use == [99, 98, 97, 96, 95]
for m in jdata["metrics"]:
assert m["session_h"] == s2["session_h"]
def test_list_tail_memory_of_build(monitor, gen):
# Lets generate 200 metrics
c = gen.new_context()
s1 = gen.new_session(pipeline_branch="pipeline1", pipeline_build_no="1")
s2 = gen.new_session(pipeline_branch="pipeline1", pipeline_build_no="2")
monitor.post_sessions_v1(s1, s2)
for i in range(100):
mem = (i + 101) * 100
m = gen.new_metric(c, s1, mem_usage=mem)
monitor.post_metrics_v1(m)
for i in range(100):
mem = i
m = gen.new_metric(c, s2, mem_usage=mem)
monitor.post_metrics_v1(m)
resp = monitor.client.get('/api/v1/resources/memory/pipelines/pipeline1/builds/2/tail/5/metrics')
jdata = resp.json
assert 'metrics' in jdata
assert jdata['metrics']
# Extract memory_usage and challenge against expected
memory_use = sorted([int(metric['mem_usage']) for metric in jdata['metrics']], reverse=True)
assert memory_use == [4, 3, 2, 1, 0]
for m in jdata["metrics"]:
assert m["session_h"] == s2["session_h"]
| StarcoderdataPython |
4874525 | <reponame>gayatriprasad/critical-learning<filename>model.py
import torch
from torch import nn
import torch.nn.functional as F
import torch.nn.init as init
import numpy as np
from helper_functions import *
def _weights_init(m):
classname = m.__class__.__name__
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3,
stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, fc00:db20:35b:7399::5, ::2], (0, 0, 0, 0, planes // 4, planes // 4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, init='he', num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
if init == 'he':
print('ResNet He init')
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def resnet20(init='he'):
return ResNet(BasicBlock, [3, 3, 3], init=init)
class OrgCifarModel(nn.Module):
# model used in the paper Critial Learning Periods in Deep Networks
# https://arxiv.org/abs/1711.08856
def __init__(self):
super(OrgCifarModel, self).__init__()
self.features = nn.Sequential(
# convolution block 1
nn.Conv2d(in_channels=3, out_channels=96, kernel_size=3),
nn.BatchNorm2d(96),
nn.ReLU(inplace=True),
# convolution block 2
nn.Conv2d(in_channels=96, out_channels=96, kernel_size=3),
nn.BatchNorm2d(96),
nn.ReLU(inplace=True),
# convolution block 3
nn.Conv2d(in_channels=96, out_channels=192, kernel_size=3, stride=2),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
# convolution block 4
nn.Conv2d(in_channels=192, out_channels=192, kernel_size=3),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
# convolution block 5
nn.Conv2d(in_channels=192, out_channels=192, kernel_size=3),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
# convolution block 6
nn.Conv2d(in_channels=192, out_channels=192, kernel_size=3, stride=2),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
# convolution block 7
nn.Conv2d(in_channels=192, out_channels=192, kernel_size=3),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
# convolution block 8
nn.Conv2d(in_channels=192, out_channels=192, kernel_size=1),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
# convolution block 9
nn.Conv2d(in_channels=192, out_channels=10, kernel_size=1),
nn.BatchNorm2d(10),
nn.ReLU(inplace=True),
nn.AvgPool2d(2, stride=1)
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
return x
def xavier_initialization_weights(model):
for module_pos, module in model._modules.items():
if 'Sequential' in str(module):
for single_layer in module:
if 'Conv' in str(single_layer):
n = np.sqrt(2 / torch.numel(single_layer.weight.data))
nn.init.normal_(single_layer.weight.data, mean=0.0, std=n)
single_layer.bias.data = single_layer.bias.data * 0.0
if __name__ == "__main__":
model = resnet20('he') # OrgCifarModel()
# A full forward pass
# [B x C x H x W]
im = torch.randn(1, 3, 32, 32)
# model = Cifar_10_Model()
# model = OrgCifarModel()
x = model(im)
print(x)
save_model(model, '/home/gp/Desktop/model/', 'a.pt')
# model2 = OrgCifarModel() # resnet20('he')
model2 = resnet20('he')
load_model(model2, '/home/gp/Desktop/model/', 'a.pt')
x = model(im)
print(x)
# print(x.shape)
del model
del x
| StarcoderdataPython |
4976913 | <gh_stars>0
import os
from scipy.optimize import curve_fit
import numpy as np
from astropy.modeling import models, fitting
from astropy.stats import sigma_clip
from astropy.io import fits
from astropy import wcs as WCS
import matplotlib.pyplot as plt
import matplotlib.colors
from matplotlib.ticker import MaxNLocator
import warnings
from scipy.signal import fftconvolve, gaussian
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import generate_binary_structure, binary_erosion
from scipy.interpolate import interp1d
from scipy.integrate import quad
from skimage.feature import hessian_matrix
from spectractor.config import set_logger
from spectractor import parameters
from math import floor
def gauss(x, A, x0, sigma):
"""Evaluate the Gaussian function.
Parameters
----------
x: array_like
Abscisse array to evaluate the function of size Nx.
A: float
Amplitude of the Gaussian function.
x0: float
Mean of the Gaussian function.
sigma: float
Standard deviation of the Gaussian function.
Returns
-------
m: array_like
The Gaussian function evaluated on the x array.
Examples
--------
>>> x = np.arange(50)
>>> y = gauss(x, 10, 25, 3)
>>> print(y.shape)
(50,)
>>> y[25]
10.0
"""
return A * np.exp(-(x - x0) * (x - x0) / (2 * sigma * sigma))
def gauss_jacobian(x, A, x0, sigma):
"""Compute the Jacobian matrix of the Gaussian function.
Parameters
----------
x: array_like
Abscisse array to evaluate the function of size Nx.
A: float
Amplitude of the Gaussian function.
x0: float
Mean of the Gaussian function.
sigma: float
Standard deviation of the Gaussian function.
Returns
-------
m: array_like
The Jacobian matrix of size 3 x Nx.
Examples
--------
>>> x = np.arange(50)
>>> jac = gauss_jacobian(x, 10, 25, 3)
>>> print(jac.shape)
(50, 3)
"""
dA = gauss(x, A, x0, sigma) / A
dx0 = A * (x - x0) / (sigma * sigma) * dA
dsigma = A * (x - x0) * (x - x0) / (sigma ** 3) * dA
return np.array([dA, dx0, dsigma]).T
def line(x, a, b):
return a * x + b
# noinspection PyTypeChecker
def fit_gauss(x, y, guess=[10, 1000, 1], bounds=(-np.inf, np.inf), sigma=None):
"""Fit a Gaussian profile to data, using curve_fit. The mean guess value of the Gaussian
must not be far from the truth values. Boundaries helps a lot also.
Parameters
----------
x: np.array
The x data values.
y: np.array
The y data values.
guess: list, [amplitude, mean, sigma], optional
List of first guessed values for the Gaussian fit (default: [10, 1000, 1]).
bounds: list, optional
List of boundaries for the parameters [[minima],[maxima]] (default: (-np.inf, np.inf)).
sigma: np.array, optional
The y data uncertainties.
Returns
-------
popt: list
Best fitting parameters of curve_fit.
pcov: list
Best fitting parameters covariance matrix from curve_fit.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> x = np.arange(600.,700.,2)
>>> p = [10, 650, 10]
>>> y = gauss(x, *p)
>>> y_err = np.ones_like(y)
>>> print(y[25])
10.0
>>> guess = (2,630,2)
>>> popt, pcov = fit_gauss(x, y, guess=guess, bounds=((1,600,1),(100,700,100)), sigma=y_err)
.. doctest::
:hide:
>>> assert np.all(np.isclose(p,popt))
"""
popt, pcov = curve_fit(gauss, x, y, p0=guess, bounds=bounds, tr_solver='exact', jac=gauss_jacobian,
sigma=sigma, method='dogbox', verbose=0, xtol=1e-20, ftol=1e-20)
return popt, pcov
def multigauss_and_line(x, *params):
"""Multiple Gaussian profile plus a straight line to data.
The order of the parameters is line slope, line intercept,
and then block of 3 parameters for the Gaussian profiles like amplitude, mean and standard
deviation.
Parameters
----------
x: array
The x data values.
*params: list of float parameters as described above.
Returns
-------
y: array
The y profile values.
Examples
--------
>>> x = np.arange(600.,800.,1)
>>> y = multigauss_and_line(x, 1, 10, 20, 650, 3, 40, 750, 10)
>>> print(y[0])
610.0
"""
out = line(x, params[0], params[1])
for k in range((len(params) - 2) // 3):
out += gauss(x, *params[2 + 3 * k:2 + 3 * k + 3])
return out
# noinspection PyTypeChecker
def fit_multigauss_and_line(x, y, guess=[0, 1, 10, 1000, 1, 0], bounds=(-np.inf, np.inf)):
"""Fit a multiple Gaussian profile plus a straight line to data, using curve_fit.
The mean guess value of the Gaussian must not be far from the truth values.
Boundaries helps a lot also. The order of the parameters is line slope, line intercept,
and then block of 3 parameters for the Gaussian profiles like amplitude, mean and standard
deviation.
Parameters
----------
x: array
The x data values.
y: array
The y data values.
guess: list, [slope, intercept, amplitude, mean, sigma]
List of first guessed values for the Gaussian fit (default: [0, 1, 10, 1000, 1]).
bounds: 2D-list
List of boundaries for the parameters [[minima],[maxima]] (default: (-np.inf, np.inf)).
Returns
-------
popt: list
Best fitting parameters of curve_fit.
pcov: 2D-list
Best fitting parameters covariance matrix from curve_fit.
Examples
--------
>>> x = np.arange(600.,800.,1)
>>> y = multigauss_and_line(x, 1, 10, 20, 650, 3, 40, 750, 10)
>>> print(y[0])
610.0
>>> bounds = ((-np.inf,-np.inf,1,600,1,1,600,1),(np.inf,np.inf,100,800,100,100,800,100))
>>> popt, pcov = fit_multigauss_and_line(x, y, guess=(0,1,3,630,3,3,770,3), bounds=bounds)
>>> print(popt)
[ 1. 10. 20. 650. 3. 40. 750. 10.]
"""
maxfev = 1000
popt, pcov = curve_fit(multigauss_and_line, x, y, p0=guess, bounds=bounds, maxfev=maxfev, absolute_sigma=True)
return popt, pcov
def rescale_x_for_legendre(x):
middle = 0.5 * (np.max(x) + np.min(x))
x_norm = x - middle
if np.max(x_norm) != 0:
return x_norm / np.max(x_norm)
else:
return x_norm
# noinspection PyTypeChecker
def multigauss_and_bgd(x, *params):
"""Multiple Gaussian profile plus a polynomial background to data.
Polynomial function is based on the orthogonal Legendre polynomial basis.
The degree of the polynomial background is fixed by parameters.CALIB_BGD_NPARAMS.
The order of the parameters is a first block CALIB_BGD_NPARAMS parameters (from low to high Legendre polynome degree,
contrary to np.polyval), and then block of 3 parameters for the Gaussian profiles like amplitude, mean and standard
deviation.
Parameters
----------
x: array
The x data values.
*params: list of float parameters as described above.
Returns
-------
y: array
The y profile values.
Examples
--------
>>> parameters.CALIB_BGD_NPARAMS = 4
>>> x = np.arange(600., 800., 1)
>>> p = [20, 1, -1, -1, 20, 650, 3, 40, 750, 5]
>>> y = multigauss_and_bgd(x, *p)
>>> print(f'{y[0]:.2f}')
19.00
.. plot::
from spectractor import parameters
from spectractor.tools import multigauss_and_bgd
import numpy as np
parameters.CALIB_BGD_NPARAMS = 4
x = np.arange(600., 800., 1)
p = [20, 1, -1, -1, 20, 650, 3, 40, 750, 5]
y = multigauss_and_bgd(x, *p)
plt.plot(x,y,'r-')
plt.show()
"""
bgd_nparams = parameters.CALIB_BGD_NPARAMS
# out = np.polyval(params[0:bgd_nparams], x)
x_norm = rescale_x_for_legendre(x)
out = np.polynomial.legendre.legval(x_norm, params[0:bgd_nparams])
for k in range((len(params) - bgd_nparams) // 3):
out += gauss(x, *params[bgd_nparams + 3 * k:bgd_nparams + 3 * k + 3])
return out
# noinspection PyTypeChecker
def multigauss_and_bgd_jacobian(x, *params):
"""Jacobien of the multiple Gaussian profile plus a polynomial background to data.
The degree of the polynomial background is fixed by parameters.CALIB_BGD_NPARAMS.
The order of the parameters is a first block CALIB_BGD_NPARAMS parameters (from low to high Legendre polynome degree,
contrary to np.polyval), and then block of 3 parameters for the Gaussian profiles like amplitude, mean and standard
deviation. x values are renormalised on the [-1, 1] interval for the background.
Parameters
----------
x: array
The x data values.
*params: list of float parameters as described above.
Returns
-------
y: array
The jacobian values.
Examples
--------
>>> import spectractor.parameters as parameters
>>> parameters.CALIB_BGD_NPARAMS = 4
>>> x = np.arange(600.,800.,1)
>>> p = [20, 1, -1, -1, 20, 650, 3, 40, 750, 5]
>>> y = multigauss_and_bgd_jacobian(x, *p)
>>> assert(np.all(np.isclose(y.T[0],np.ones_like(x))))
>>> print(y.shape)
(200, 10)
"""
bgd_nparams = parameters.CALIB_BGD_NPARAMS
out = []
x_norm = rescale_x_for_legendre(x)
for k in range(bgd_nparams):
# out.append(params[k]*(parameters.CALIB_BGD_ORDER-k)*x**(parameters.CALIB_BGD_ORDER-(k+1)))
# out.append(x ** (bgd_nparams - 1 - k))
c = np.zeros(bgd_nparams)
c[k] = 1
out.append(np.polynomial.legendre.legval(x_norm, c))
for k in range((len(params) - bgd_nparams) // 3):
jac = gauss_jacobian(x, *params[bgd_nparams + 3 * k:bgd_nparams + 3 * k + 3]).T
for j in jac:
out.append(list(j))
return np.array(out).T
# noinspection PyTypeChecker
def fit_multigauss_and_bgd(x, y, guess=[0, 1, 10, 1000, 1, 0], bounds=(-np.inf, np.inf), sigma=None,
fix_centroids=False):
"""Fit a multiple Gaussian profile plus a polynomial background to data, using iminuit.
The mean guess value of the Gaussian must not be far from the truth values.
Boundaries helps a lot also. The degree of the polynomial background is fixed by parameters.CALIB_BGD_NPARAMS.
The order of the parameters is a first block CALIB_BGD_NPARAMS parameters (from high to low monomial terms,
same as np.polyval), and then block of 3 parameters for the Gaussian profiles like amplitude, mean and standard
deviation. x values are renormalised on the [-1, 1] interval for the background.
Parameters
----------
x: array
The x data values.
y: array
The y data values.
guess: list, [CALIB_BGD_ORDER+1 parameters, 3*number of Gaussian parameters]
List of first guessed values for the Gaussian fit (default: [0, 1, 10, 1000, 1]).
bounds: array
List of boundaries for the parameters [[minima],[maxima]] (default: (-np.inf, np.inf)).
sigma: array, optional
The uncertainties on the y values (default: None).
Returns
-------
popt: array
Best fitting parameters of curve_fit.
pcov: array
Best fitting parameters covariance matrix from curve_fit.
Examples
--------
>>> x = np.arange(600.,800.,1)
>>> p = [20, 1, -1, -1, 20, 650, 3, 40, 750, 5]
>>> y = multigauss_and_bgd(x, *p)
>>> print(f'{y[0]:.2f}')
19.00
>>> err = 0.1 * np.sqrt(y)
>>> guess = (15,0,0,0,10,640,2,20,750,7)
>>> bounds = ((-np.inf,-np.inf,-np.inf,-np.inf,1,600,1,1,600,1),(np.inf,np.inf,np.inf,np.inf,100,800,100,100,800,100))
>>> popt, pcov = fit_multigauss_and_bgd(x, y, guess=guess, bounds=bounds, sigma=err)
>>> assert np.all(np.isclose(p,popt,rtol=1e-4))
>>> fit = multigauss_and_bgd(x, *popt)
.. plot::
import matplotlib.pyplot as plt
import numpy as np
from spectractor.tools import multigauss_and_bgd, fit_multigauss_and_bgd
x = np.arange(600.,800.,1)
p = [20, 1, -1, -1, 20, 650, 3, 40, 750, 5]
y = multigauss_and_bgd(x, *p)
err = 0.1 * np.sqrt(y)
guess = (15,0,0,0,10,640,2,20,750,7)
bounds = ((-np.inf,-np.inf,-np.inf,-np.inf,1,600,1,1,600,1),(np.inf,np.inf,np.inf,np.inf,100,800,100,100,800,100))
popt, pcov = fit_multigauss_and_bgd(x, y, guess=guess, bounds=bounds, sigma=err)
fit = multigauss_and_bgd(x, *popt)
fig = plt.figure()
plt.errorbar(x,y,yerr=err,linestyle='None')
plt.plot(x,fit,'r-')
plt.plot(x,multigauss_and_bgd(x, *guess),'k--')
plt.show()
"""
maxfev = 10000
popt, pcov = curve_fit(multigauss_and_bgd, x, y, p0=guess, bounds=bounds, maxfev=maxfev, sigma=sigma,
absolute_sigma=True, method='trf', xtol=1e-4, ftol=1e-4, verbose=0,
jac=multigauss_and_bgd_jacobian, x_scale='jac')
# error = 0.1 * np.abs(guess) * np.ones_like(guess)
# z = np.where(np.isclose(error,0.0,1e-6))
# error[z] = 0.01
# bounds = np.array(bounds)
# if bounds.shape[0] == 2 and bounds.shape[1] > 2:
# bounds = bounds.T
# guess = np.array(guess)
#
# def chisq_multigauss_and_bgd(params):
# if sigma is None:
# return np.nansum((multigauss_and_bgd(x, *params) - y)**2)
# else:
# return np.nansum(((multigauss_and_bgd(x, *params) - y)/sigma)**2)
#
# def chisq_multigauss_and_bgd_jac(params):
# diff = multigauss_and_bgd(x, *params) - y
# jac = multigauss_and_bgd_jacobian(x, *params)
# if sigma is None:
# return np.array([np.nansum(2 * jac[p] * diff) for p in range(len(params))])
# else:
# return np.array([np.nansum(2 * jac[p] * diff / (sigma*sigma)) for p in range(len(params))])
#
# fix = [False] * error.size
# if fix_centroids:
# for k in range(parameters.CALIB_BGD_NPARAMS, len(fix), 3):
# fix[k+1] = True
# # noinspection PyArgumentList
# m = Minuit.from_array_func(fcn=chisq_multigauss_and_bgd, start=guess, error=error, errordef=1,
# fix=fix, print_level=0, limit=bounds, grad=chisq_multigauss_and_bgd_jac)
#
# m.tol = 0.001
# m.migrad()
# try:
# pcov = m.np_covariance()
# except:
# pcov = None
# popt = m.np_values()
return popt, pcov
# noinspection PyTupleAssignmentBalance
def fit_poly1d(x, y, order, w=None):
"""Fit a 1D polynomial function to data. Use np.polyfit.
Parameters
----------
x: array
The x data values.
y: array
The y data values.
order: int
The degree of the polynomial function.
w: array, optional
Weights on the y data (default: None).
Returns
-------
fit: array
The best fitting parameter values.
cov: 2D-array
The covariance matrix
model: array
The best fitting profile
Examples
--------
>>> x = np.arange(500., 1000., 1)
>>> p = [3, 2, 1, 1]
>>> y = np.polyval(p, x)
>>> err = np.ones_like(y)
>>> fit, cov, model = fit_poly1d(x, y, order=3)
.. doctest::
:hide:
>>> assert np.all(np.isclose(p, fit, 1e-5))
>>> assert np.all(np.isclose(model, y))
>>> assert cov.shape == (4, 4)
With uncertainties:
>>> fit, cov2, model2 = fit_poly1d(x, y, order=3, w=err)
.. doctest::
:hide:
>>> assert np.all(np.isclose(p, fit, 1e-5))
>>> fit, cov3, model3 = fit_poly1d([0, 1], [1, 1], order=3, w=err)
>>> print(fit)
[0 0 0 0]
"""
cov = np.array([])
if len(x) > order:
if w is None:
fit, cov = np.polyfit(x, y, order, cov=True)
else:
fit, cov = np.polyfit(x, y, order, cov=True, w=w)
model = np.polyval(fit, x)
else:
fit = np.array([0] * (order + 1))
model = y
return fit, cov, model
# noinspection PyTupleAssignmentBalance
def fit_poly1d_legendre(x, y, order, w=None):
"""Fit a 1D polynomial function to data using Legendre polynomial orthogonal basis.
Parameters
----------
x: array
The x data values.
y: array
The y data values.
order: int
The degree of the polynomial function.
w: array, optional
Weights on the y data (default: None).
Returns
-------
fit: array
The best fitting parameter values.
cov: 2D-array
The covariance matrix
model: array
The best fitting profile
Examples
--------
>>> x = np.arange(500., 1000., 1)
>>> p = [-1e-6, -1e-4, 1, 1]
>>> y = np.polyval(p, x)
>>> err = np.ones_like(y)
>>> fit, cov, model = fit_poly1d_legendre(x,y,order=3)
>>> assert np.all(np.isclose(p,fit,3))
>>> fit, cov2, model2 = fit_poly1d_legendre(x,y,order=3,w=err)
>>> assert np.all(np.isclose(p,fit,3))
>>> fit, cov3, model3 = fit_poly1d([0, 1], [1, 1], order=3, w=err)
>>> print(fit)
[0 0 0 0]
.. plot::
import matplotlib.pyplot as plt
import numpy as np
from spectractor.tools import fit_poly1d_legendre
p = [-1e-6, -1e-4, 1, 1]
x = np.arange(500., 1000., 1)
y = np.polyval(p, x)
err = np.ones_like(y)
fit, cov2, model2 = fit_poly1d_legendre(x,y,order=3,w=err)
plt.errorbar(x,y,yerr=err,fmt='ro')
plt.plot(x,model2)
plt.show()
"""
cov = -1
x_norm = rescale_x_for_legendre(x)
if len(x) > order:
fit, cov = np.polynomial.legendre.legfit(x_norm, y, deg=order, full=True, w=w)
model = np.polynomial.legendre.legval(x_norm, fit)
else:
fit = np.array([0] * (order + 1))
model = y
return fit, cov, model
# noinspection PyTypeChecker,PyUnresolvedReferences
def fit_poly2d(x, y, z, order):
"""Fit a 2D polynomial function to data. Use astropy.modeling.
Parameters
----------
x: array
The x data values.
y: array
The y data values.
z: array
The z data values.
order: int
The degree of the polynomial function.
Returns
-------
model: Astropy model
The best fitting astropy polynomial model
Examples
--------
>>> x, y = np.mgrid[:50,:50]
>>> z = x**2 + y**2 - 2*x*y
>>> fit = fit_poly2d(x, y, z, order=2)
.. doctest::
:hide:
>>> assert np.isclose(fit.c0_0.value, 0)
>>> assert np.isclose(fit.c1_0.value, 0)
>>> assert np.isclose(fit.c2_0.value, 1)
>>> assert np.isclose(fit.c0_1.value, 0)
>>> assert np.isclose(fit.c0_2.value, 1)
>>> assert np.isclose(fit.c1_1.value, -2)
"""
p_init = models.Polynomial2D(degree=order)
fit_p = fitting.LevMarLSQFitter()
with warnings.catch_warnings():
# Ignore model linearity warning from the fitter
warnings.simplefilter('ignore')
p = fit_p(p_init, x, y, z)
return p
def fit_poly1d_outlier_removal(x, y, order=2, sigma=3.0, niter=3):
"""Fit a 1D polynomial function to data. Use astropy.modeling.
Parameters
----------
x: array
The x data values.
y: array
The y data values.
order: int
The degree of the polynomial function (default: 2).
sigma: float
Value of the sigma-clipping (default: 3.0).
niter: int
The number of iterations to converge (default: 3).
Returns
-------
model: Astropy model
The best fitting astropy model.
outliers: array_like
List of the outlier points.
Examples
--------
>>> x = np.arange(500., 1000., 1)
>>> p = [3,2,1,0]
>>> y = np.polyval(p, x)
>>> y[::10] = 0.
>>> model, outliers = fit_poly1d_outlier_removal(x,y,order=3,sigma=3)
>>> print('{:.2f}'.format(model.c0.value))
0.00
>>> print('{:.2f}'.format(model.c1.value))
1.00
>>> print('{:.2f}'.format(model.c2.value))
2.00
>>> print('{:.2f}'.format(model.c3.value))
3.00
"""
gg_init = models.Polynomial1D(order)
gg_init.c1 = 0
gg_init.c2 = 0
fit = fitting.LinearLSQFitter()
or_fit = fitting.FittingWithOutlierRemoval(fit, sigma_clip, niter=niter, sigma=sigma)
# get fitted model and filtered data
or_fitted_model, filtered_data = or_fit(gg_init, x, y)
outliers = [] # not working
"""
import matplotlib.pyplot as plt
plt.figure(figsize=(8,5))
plt.plot(x, y, 'gx', label="original data")
plt.plot(x, gg_init(x), 'k.', label="guess")
plt.plot(x, filtered_data, 'r+', label="filtered data")
plt.plot(x, or_fitted_model(x), 'r--',
label="model fitted w/ filtered data")
plt.legend(loc=2, numpoints=1)
if parameters.DISPLAY: plt.show()
"""
return or_fitted_model, outliers
def fit_poly2d_outlier_removal(x, y, z, order=2, sigma=3.0, niter=30):
"""Fit a 2D polynomial function to data. Use astropy.modeling.
Parameters
----------
x: array
The x data values.
y: array
The y data values.
z: array
The z data values.
order: int
The degree of the polynomial function (default: 2).
sigma: float
Value of the sigma-clipping (default: 3.0).
niter: int
The number of iterations to converge (default: 30).
Returns
-------
model: Astropy model
The best fitting astropy model.
Examples
--------
>>> x, y = np.mgrid[:50,:50]
>>> z = x**2 + y**2 - 2*x*y
>>> z[::10,::10] = 0.
>>> fit = fit_poly2d_outlier_removal(x,y,z,order=2,sigma=3)
.. doctest::
:hide:
>>> assert np.isclose(fit.c0_0.value, 0)
>>> assert np.isclose(fit.c1_0.value, 0)
>>> assert np.isclose(fit.c2_0.value, 1)
>>> assert np.isclose(fit.c0_1.value, 0)
>>> assert np.isclose(fit.c0_2.value, 1)
>>> assert np.isclose(fit.c1_1.value, -2)
"""
my_logger = set_logger(__name__)
gg_init = models.Polynomial2D(order)
fit = fitting.LinearLSQFitter()
or_fit = fitting.FittingWithOutlierRemoval(fit, sigma_clip, niter=niter, sigma=sigma)
# get fitted model and filtered data
or_fitted_model, filtered_data = or_fit(gg_init, x, y, z)
my_logger.info(f'\n\t{or_fitted_model}')
# my_logger.debug(f'\n\t{fit.fit_info}')
return or_fitted_model
def tied_circular_gauss2d(g1):
std = g1.x_stddev
return std
def fit_gauss2d_outlier_removal(x, y, z, sigma=3.0, niter=3, guess=None, bounds=None, circular=False):
"""
Fit an astropy Gaussian 2D model with parameters : amplitude, x_mean, y_mean, x_stddev, y_stddev, theta
using outlier removal methods.
Parameters
----------
x: np.array
2D array of the x coordinates from meshgrid.
y: np.array
2D array of the y coordinates from meshgrid.
z: np.array
the 2D array image.
sigma: float
value of sigma for the sigma rejection of outliers (default: 3)
niter: int
maximum number of iterations for the outlier detection (default: 3)
guess: list, optional
List containing a first guess for the PSF parameters (default: None).
bounds: list, optional
2D list containing bounds for the PSF parameters with format ((min,...), (max...)) (default: None)
circular: bool, optional
If True, force the Gaussian shape to be circular (default: False)
Returns
-------
fitted_model: Fittable
Astropy Gaussian2D model
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from astropy.modeling import models
>>> X, Y = np.mgrid[:50,:50]
>>> PSF = models.Gaussian2D()
>>> p = (50, 25, 25, 5, 5, 0)
>>> Z = PSF.evaluate(X, Y, *p)
.. plot::
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import models
X, Y = np.mgrid[:50,:50]
PSF = models.Gaussian2D()
p = (50, 25, 25, 5, 5, 0)
Z = PSF.evaluate(X, Y, *p)
plt.imshow(Z, origin='lower')
plt.show()
>>> guess = (45, 20, 20, 7, 7, 0)
>>> bounds = ((1, 10, 10, 1, 1, -90), (100, 40, 40, 10, 10, 90))
>>> fit = fit_gauss2d_outlier_removal(X, Y, Z, guess=guess, bounds=bounds, circular=True)
>>> res = [getattr(fit, p).value for p in fit.param_names]
>>> print(res)
[50.0, 25.0, 25.0, 5.0, 5.0, 0.0]
.. plot::
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import models
from spectractor.tools import fit_gauss2d_outlier_removal
X, Y = np.mgrid[:50,:50]
PSF = models.Gaussian2D()
p = (50, 25, 25, 5, 5, 0)
Z = PSF.evaluate(X, Y, *p)
guess = (45, 20, 20, 7, 7, 0)
bounds = ((1, 10, 10, 1, 1, -90), (100, 40, 40, 10, 10, 90))
fit = fit_gauss2d_outlier_removal(X, Y, Z, guess=guess, bounds=bounds, circular=True)
plt.imshow(Z-fit(X, Y), origin='lower')
plt.show()
"""
my_logger = set_logger(__name__)
gg_init = models.Gaussian2D()
if guess is not None:
for ip, p in enumerate(gg_init.param_names):
getattr(gg_init, p).value = guess[ip]
if bounds is not None:
for ip, p in enumerate(gg_init.param_names):
getattr(gg_init, p).min = bounds[0][ip]
getattr(gg_init, p).max = bounds[1][ip]
if circular:
gg_init.y_stddev.tied = tied_circular_gauss2d
gg_init.theta.fixed = True
with warnings.catch_warnings():
# Ignore model linearity warning from the fitter
warnings.simplefilter('ignore')
fit = fitting.LevMarLSQFitter()
or_fit = fitting.FittingWithOutlierRemoval(fit, sigma_clip, niter=niter, sigma=sigma)
# get fitted model and filtered data
or_fitted_model, filtered_data = or_fit(gg_init, x, y, z)
my_logger.info(f'\n\t{or_fitted_model}')
# my_logger.debug(f'\n\t{fit.fit_info}')
return or_fitted_model
def fit_moffat2d_outlier_removal(x, y, z, sigma=3.0, niter=3, guess=None, bounds=None):
"""
Fit an astropy Moffat 2D model with parameters: amplitude, x_mean, y_mean, gamma, alpha
using outlier removal methods.
Parameters
----------
x: np.array
2D array of the x coordinates from meshgrid.
y: np.array
2D array of the y coordinates from meshgrid.
z: np.array
the 2D array image.
sigma: float
value of sigma for the sigma rejection of outliers (default: 3)
niter: int
maximum number of iterations for the outlier detection (default: 3)
guess: list, optional
List containing a first guess for the PSF parameters (default: None).
bounds: list, optional
2D list containing bounds for the PSF parameters with format ((min,...), (max...)) (default: None)
Returns
-------
fitted_model: Fittable
Astropy Moffat2D model
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from astropy.modeling import models
>>> X, Y = np.mgrid[:100,:100]
>>> PSF = models.Moffat2D()
>>> p = (50, 50, 50, 5, 2)
>>> Z = PSF.evaluate(X, Y, *p)
.. plot::
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import models
X, Y = np.mgrid[:100,:100]
PSF = models.Moffat2D()
p = (50, 50, 50, 5, 2)
Z = PSF.evaluate(X, Y, *p)
plt.imshow(Z, origin='loxer')
plt.show()
>>> guess = (45, 48, 52, 4, 2)
>>> bounds = ((1, 10, 10, 1, 1), (100, 90, 90, 10, 10))
>>> fit = fit_moffat2d_outlier_removal(X, Y, Z, guess=guess, bounds=bounds, niter=3)
>>> res = [getattr(fit, p).value for p in fit.param_names]
.. doctest::
:hide:
>>> assert(np.all(np.isclose(p, res, 1e-1)))
.. plot::
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import models
from spectractor.tools import fit_moffat2d_outlier_removal
X, Y = np.mgrid[:100,:100]
PSF = models.Moffat2D()
p = (50, 50, 50, 5, 2)
Z = PSF.evaluate(X, Y, *p)
guess = (45, 48, 52, 4, 2)
bounds = ((1, 10, 10, 1, 1), (100, 90, 90, 10, 10))
fit = fit_moffat2d_outlier_removal(X, Y, Z, guess=guess, bounds=bounds, niter=3)
plt.imshow(Z-fit(X, Y), origin='loxer')
plt.show()
"""
my_logger = set_logger(__name__)
gg_init = models.Moffat2D()
if guess is not None:
for ip, p in enumerate(gg_init.param_names):
getattr(gg_init, p).value = guess[ip]
if bounds is not None:
for ip, p in enumerate(gg_init.param_names):
getattr(gg_init, p).min = bounds[0][ip]
getattr(gg_init, p).max = bounds[1][ip]
with warnings.catch_warnings():
# Ignore model linearity warning from the fitter
warnings.simplefilter('ignore')
fit = fitting.LevMarLSQFitter()
or_fit = fitting.FittingWithOutlierRemoval(fit, sigma_clip, niter=niter, sigma=sigma)
# get fitted model and filtered data
or_fitted_model, filtered_data = or_fit(gg_init, x, y, z)
my_logger.info(f'\n\t{or_fitted_model}')
# my_logger.debug(f'\n\t{fit.fit_info}')
return or_fitted_model
def fit_moffat1d_outlier_removal(x, y, sigma=3.0, niter=3, guess=None, bounds=None):
"""
Fit an astropy Moffat 1D model with parameters: amplitude, x_mean, gamma, alpha
using outlier removal methods.
Parameters
----------
x: np.array
1D array of the x coordinates from meshgrid.
y: np.array
the 1D array amplitudes.
sigma: float
value of sigma for the sigma rejection of outliers (default: 3)
niter: int
maximum number of iterations for the outlier detection (default: 3)
guess: list, optional
List containing a first guess for the PSF parameters (default: None).
bounds: list, optional
2D list containing bounds for the PSF parameters with format ((min,...), (max...)) (default: None)
Returns
-------
fitted_model: Fittable
Astropy Moffat1D model
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from astropy.modeling import models
>>> X = np.arange(100)
>>> PSF = models.Moffat1D()
>>> p = (50, 50, 5, 2)
>>> Y = PSF.evaluate(X, *p)
.. plot::
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import models
X = np.arange(100)
PSF = models.Moffat1D()
p = (50, 50, 5, 2)
Y = PSF.evaluate(X, *p)
plt.plot(X, Y)
plt.show()
>>> guess = (45, 48, 4, 2)
>>> bounds = ((1, 10, 1, 1), (100, 90, 10, 10))
>>> fit = fit_moffat1d_outlier_removal(X, Y, guess=guess, bounds=bounds, niter=3)
>>> res = [getattr(fit, p).value for p in fit.param_names]
.. doctest::
:hide:
>>> assert(np.all(np.isclose(p, res, 1e-6)))
.. plot::
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import models
from spectractor.tools import fit_moffat1d_outlier_removal
X = np.arange(100)
PSF = models.Moffat1D()
p = (50, 50, 5, 2)
Y = PSF.evaluate(X, *p)
guess = (45, 48, 4, 2)
bounds = ((1, 10, 1, 1), (100, 90, 10, 10))
fit = fit_moffat1d_outlier_removal(X, Y, guess=guess, bounds=bounds, niter=3)
plt.plot(X, Y-fit(X))
plt.show()
"""
my_logger = set_logger(__name__)
gg_init = models.Moffat1D()
if guess is not None:
for ip, p in enumerate(gg_init.param_names):
getattr(gg_init, p).value = guess[ip]
if bounds is not None:
for ip, p in enumerate(gg_init.param_names):
getattr(gg_init, p).min = bounds[0][ip]
getattr(gg_init, p).max = bounds[1][ip]
with warnings.catch_warnings():
# Ignore model linearity warning from the fitter
warnings.simplefilter('ignore')
fit = fitting.LevMarLSQFitter()
or_fit = fitting.FittingWithOutlierRemoval(fit, sigma_clip, niter=niter, sigma=sigma)
# get fitted model and filtered data
or_fitted_model, filtered_data = or_fit(gg_init, x, y)
my_logger.debug(f'\n\t{or_fitted_model}')
# my_logger.debug(f'\n\t{fit.fit_info}')
return or_fitted_model
def fit_moffat1d(x, y, guess=None, bounds=None):
"""Fit an astropy Moffat 1D model with parameters :
amplitude, x_mean, gamma, alpha
Parameters
----------
x: np.array
1D array of the x coordinates from meshgrid.
y: np.array
the 1D array amplitudes.
guess: list, optional
List containing a first guess for the PSF parameters (default: None).
bounds: list, optional
2D list containing bounds for the PSF parameters with format ((min,...), (max...)) (default: None)
Returns
-------
fitted_model: Fittable
Astropy Moffat1D model
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from astropy.modeling import models
>>> X = np.arange(100)
>>> PSF = models.Moffat1D()
>>> p = (50, 50, 5, 2)
>>> Y = PSF.evaluate(X, *p)
.. plot::
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import models
X = np.arange(100)
PSF = models.Moffat1D()
p = (50, 50, 5, 2)
Y = PSF.evaluate(X, *p)
plt.plot(X, Y)
plt.show()
>>> guess = (45, 48, 4, 2)
>>> bounds = ((1, 10, 1, 1), (100, 90, 10, 10))
>>> fit = fit_moffat1d(X, Y, guess=guess, bounds=bounds)
>>> res = [getattr(fit, p).value for p in fit.param_names]
>>> assert(np.all(np.isclose(p, res, 1e-6)))
.. plot::
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import models
from spectractor.tools import fit_moffat1d
X = np.arange(100)
PSF = models.Moffat1D()
p = (50, 50, 5, 2)
Y = PSF.evaluate(X, *p)
guess = (45, 48, 4, 2)
bounds = ((1, 10, 1, 1), (100, 90, 10, 10))
fit = fit_moffat1d(X, Y, guess=guess, bounds=bounds)
plt.plot(X, Y-fit(X))
plt.show()
"""
my_logger = set_logger(__name__)
gg_init = models.Moffat1D()
if guess is not None:
for ip, p in enumerate(gg_init.param_names):
getattr(gg_init, p).value = guess[ip]
if bounds is not None:
for ip, p in enumerate(gg_init.param_names):
getattr(gg_init, p).min = bounds[0][ip]
getattr(gg_init, p).max = bounds[1][ip]
with warnings.catch_warnings():
# Ignore model linearity warning from the fitter
warnings.simplefilter('ignore')
fit = fitting.LevMarLSQFitter()
fitted_model = fit(gg_init, x, y)
my_logger.info(f'\n\t{fitted_model}')
# my_logger.debug(f'\n\t{fit.fit_info}')
return fitted_model
class LevMarLSQFitterWithNan(fitting.LevMarLSQFitter):
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [weights], [input coordinates]]
"""
model = args[0]
weights = args[1]
fitting._fitter_to_model_params(model, fps)
meas = args[-1]
if weights is None:
a = np.ravel(model(*args[2: -1]) - meas)
a[np.isfinite(a)] = 0
return a
else:
a = np.ravel(weights * (model(*args[2: -1]) - meas))
a[~np.isfinite(a)] = 0
return a
def compute_fwhm(x, y, minimum=0, center=None, full_output=False):
"""
Compute the full width half maximum of y(x) curve,
using an interpolation of the data points and dichotomie method.
Parameters
----------
x: array_like
The abscisse array.
y: array_like
The function array.
minimum: float, optional
The minimum reference from which to compyte half the height (default: 0).
center: float, optional
The center of the curve. If None, the weighted averageof the y(x) distribution is computed (default: None).
full_output: bool, optional
If True, half maximum, the edges of the curve and the curve center are given in output (default: False).
Returns
-------
FWHM: float
The full width half maximum of the curve.
half: float, optional
The half maximum value. Only if full_output=True.
center: float, optional
The y(x) center value. Only if full_output=True.
left_edge: float, optional
The left_edge value at half maximum. Only if full_output=True.
right_edge: float, optional
The right_edge value at half maximum. Only if full_output=True.
Examples
--------
Gaussian example
>>> x = np.arange(0, 100, 1)
>>> stddev = 4
>>> middle = 40
>>> psf = gauss(x, 1, middle, stddev)
>>> fwhm, half, center, a, b = compute_fwhm(x, psf, full_output=True)
>>> print(f"{fwhm:.4f} {2.355*stddev:.4f} {center:.4f}")
9.4329 9.4200 40.0000
.. doctest::
:hide:
>>> assert np.isclose(fwhm, 2.355*stddev, atol=2e-1)
>>> assert np.isclose(center, middle, atol=1e-3)
.. plot ::
import matplotlib.pyplot as plt
import numpy as np
from spectractor.tools import gauss, compute_fwhm
x = np.arange(0, 100, 1)
stddev = 4
middle = 40
psf = gauss(x, 1, middle, stddev)
fwhm, half, center, a, b = compute_fwhm(x, psf, full_output=True)
plt.figure()
plt.plot(x, psf, label="function")
plt.axvline(center, color="gray", label="center")
plt.axvline(a, color="k", label="edges at half max")
plt.axvline(b, color="k", label="edges at half max")
plt.axhline(half, color="r", label="half max")
plt.legend()
plt.title(f"FWHM={fwhm:.3f}")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
Defocused PSF example
>>> from spectractor.extractor.psf import MoffatGauss
>>> p = [2,40,40,4,2,-0.4,1,10]
>>> psf = MoffatGauss(p)
>>> fwhm, half, center, a, b = compute_fwhm(x, psf.evaluate(x), full_output=True)
.. doctest::
:hide:
>>> assert np.isclose(fwhm, 7.05, atol=1e-2)
>>> assert np.isclose(center, p[1], atol=1e-2)
.. plot ::
import matplotlib.pyplot as plt
import numpy as np
from spectractor.tools import gauss, compute_fwhm
from spectractor.extractor.psf import MoffatGauss
x = np.arange(0, 100, 1)
p = [2,40,40,4,2,-0.4,1,10]
psf = MoffatGauss(p)
fwhm, half, center, a, b = compute_fwhm(x, psf.evaluate(x), full_output=True)
plt.figure()
plt.plot(x, psf.evaluate(x, p), label="function")
plt.axvline(center, color="gray", label="center")
plt.axvline(a, color="k", label="edges at half max")
plt.axvline(b, color="k", label="edges at half max")
plt.axhline(half, color="r", label="half max")
plt.legend()
plt.title(f"FWHM={fwhm:.3f}")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
"""
if y.ndim > 1:
# TODO: implement fwhm for 2D curves
return -1
interp = interp1d(x, y, kind="linear", bounds_error=False, fill_value="extrapolate")
maximum = np.max(y) - minimum
imax = np.argmax(y)
a = x[imax + np.argmin(np.abs(y[imax:] - 0.9 * maximum))]
b = x[imax + np.argmin(np.abs(y[imax:] - 0.1 * maximum))]
def eq(xx):
return interp(xx) - 0.5 * maximum
res = dichotomie(eq, a, b, 1e-3)
if center is None:
center = np.average(x, weights=y)
fwhm = abs(2 * (res - center))
if not full_output:
return fwhm
else:
return fwhm, 0.5 * maximum, center, res, center - abs(res - center)
def compute_integral(x, y, bounds=None):
"""
Compute the integral of an y(x) curve. The curve is interpolated and extrapolated with cubic splines.
If not provided, bounds are set to the x array edges.
Parameters
----------
x: array_like
The abscisse array.
y: array_like
The function array.
bounds: array_like, optional
The bounds of the integral. If None, the edges of thex array are taken (default bounds=None).
Returns
-------
result: float
The integral of the PSF model.
Examples
--------
Gaussian example
.. doctest::
>>> x = np.arange(0, 100, 0.5)
>>> stddev = 4
>>> middle = 40
>>> psf = gauss(x, 1/(stddev*np.sqrt(2*np.pi)), middle, stddev)
>>> integral = compute_integral(x, psf)
>>> print(f"{integral:.6f}")
1.000000
Defocused PSF example
.. doctest::
>>> from spectractor.extractor.psf import MoffatGauss
>>> p = [2,30,30,4,2,-0.5,1,10]
>>> psf = MoffatGauss(p)
>>> integral = compute_integral(x, psf.evaluate(x))
>>> assert np.isclose(integral, p[0], atol=1e-2)
"""
if bounds is None:
bounds = (np.min(x), np.max(x))
interp = interp1d(x, y, kind="cubic", bounds_error=False, fill_value="extrapolate")
integral = quad(interp, bounds[0], bounds[1], limit=200)
return integral[0]
def find_nearest(array, value):
"""Find the nearest index and value in an array.
Parameters
----------
array: array
The array to inspect.
value: float
The value to look for.
Returns
-------
index: int
The array index of the nearest value close to *value*
val: float
The value fo the array at index.
Examples
--------
>>> x = np.arange(0.,10.)
>>> idx, val = find_nearest(x, 3.3)
>>> print(idx, val)
3 3.0
"""
idx = (np.abs(array - value)).argmin()
return idx, array[idx]
def ensure_dir(directory_name):
"""Ensure that *directory_name* directory exists. If not, create it.
Parameters
----------
directory_name: str
The directory name.
Examples
--------
>>> ensure_dir('tests')
>>> os.path.exists('tests')
True
>>> ensure_dir('tests/mytest')
>>> os.path.exists('tests/mytest')
True
>>> os.rmdir('./tests/mytest')
"""
if not os.path.exists(directory_name):
os.makedirs(directory_name)
def weighted_avg_and_std(values, weights):
"""
Return the weighted average and standard deviation.
values, weights -- Numpy ndarrays with the same shape.
For example for the PSF
x=pixel number
y=Intensity in pixel
values-x
weights=y=f(x)
"""
average = np.average(values, weights=weights)
variance = np.average((values - average) ** 2, weights=weights) # Fast and numerically precise
return average, np.sqrt(variance)
def hessian_and_theta(data, margin_cut=1):
# compute hessian matrices on the image
Hxx, Hxy, Hyy = hessian_matrix(data, sigma=3, order='rc')
lambda_plus = 0.5 * ((Hxx + Hyy) + np.sqrt((Hxx - Hyy) ** 2 + 4 * Hxy * Hxy))
lambda_minus = 0.5 * ((Hxx + Hyy) - np.sqrt((Hxx - Hyy) ** 2 + 4 * Hxy * Hxy))
theta = 0.5 * np.arctan2(2 * Hxy, Hxx - Hyy) * 180 / np.pi
# remove the margins
lambda_minus = lambda_minus[margin_cut:-margin_cut, margin_cut:-margin_cut]
lambda_plus = lambda_plus[margin_cut:-margin_cut, margin_cut:-margin_cut]
theta = theta[margin_cut:-margin_cut, margin_cut:-margin_cut]
return lambda_plus, lambda_minus, theta
def filter_stars_from_bgd(data, margin_cut=1):
lambda_plus, lambda_minus, theta = hessian_and_theta(np.copy(data), margin_cut=margin_cut)
# thresholds
lambda_threshold = np.median(lambda_minus) - 2 * np.std(lambda_minus)
mask = np.where(lambda_minus < lambda_threshold)
data[mask] = np.nan
return data
def fftconvolve_gaussian(array, reso):
"""Convolve an 1D or 2D array with a Gaussian profile of given standard deviation.
Parameters
----------
array: array
The array to convolve.
reso: float
The standard deviation of the Gaussian profile.
Returns
-------
convolved: array
The convolved array, same size and shape as input.
Examples
--------
>>> array = np.ones(100)
>>> output = fftconvolve_gaussian(array, 3)
>>> print(output[:3])
[0.5 0.63114657 0.74850168]
>>> array = np.ones((100, 100))
>>> output = fftconvolve_gaussian(array, 3)
>>> print(output[0][:3])
[0.5 0.63114657 0.74850168]
>>> array = np.ones((100, 100, 100))
>>> output = fftconvolve_gaussian(array, 3)
"""
my_logger = set_logger(__name__)
if array.ndim == 2:
kernel = gaussian(array.shape[1], reso)
kernel /= np.sum(kernel)
for i in range(array.shape[0]):
array[i] = fftconvolve(array[i], kernel, mode='same')
elif array.ndim == 1:
kernel = gaussian(array.size, reso)
kernel /= np.sum(kernel)
array = fftconvolve(array, kernel, mode='same')
else:
my_logger.error(f'\n\tArray dimension must be 1 or 2. Here I have array.ndim={array.ndim}.')
return array
def formatting_numbers(value, error_high, error_low, std=None, label=None):
"""Format a physical value and its uncertainties. Round the uncertainties
to the first significant digit, and do the same for the physical value.
Parameters
----------
value: float
The physical value.
error_high: float
Upper uncertainty.
error_low: float
Lower uncertainty
std: float, optional
The RMS of the physical parameter (default: None).
label: str, optional
The name of the physical parameter to output (default: None).
Returns
-------
text: tuple
The formatted output strings inside a tuple.
Examples
--------
>>> formatting_numbers(3., 0.789, 0.500, std=0.45, label='test')
('test', '3.0', '0.8', '0.5', '0.5')
>>> formatting_numbers(3., 0.07, 0.008, std=0.03, label='test')
('test', '3.000', '0.07', '0.008', '0.03')
>>> formatting_numbers(3240., 0.2, 0.4, std=0.3)
('3240.0', '0.2', '0.4', '0.3')
>>> formatting_numbers(3240., 230, 420, std=330)
('3240', '230', '420', '330')
>>> formatting_numbers(0, 0.008, 0.04, std=0.03)
('0.000', '0.008', '0.040', '0.030')
>>> formatting_numbers(-55, 0.008, 0.04, std=0.03)
('-55.000', '0.008', '0.04', '0.03')
"""
str_std = ""
out = []
if label is not None:
out.append(label)
power10 = min(int(floor(np.log10(np.abs(error_high)))), int(floor(np.log10(np.abs(error_low)))))
if np.isclose(0.0, float("%.*f" % (abs(power10), value))):
str_value = "%.*f" % (abs(power10), 0)
str_error_high = "%.*f" % (abs(power10), error_high)
str_error_low = "%.*f" % (abs(power10), error_low)
if std is not None:
str_std = "%.*f" % (abs(power10), std)
elif power10 > 0:
str_value = f"{value:.0f}"
str_error_high = f"{error_high:.0f}"
str_error_low = f"{error_low:.0f}"
if std is not None:
str_std = f"{std:.0f}"
else:
if int(floor(np.log10(np.abs(error_high)))) == int(floor(np.log10(np.abs(error_low)))):
str_value = "%.*f" % (abs(power10), value)
str_error_high = f"{error_high:.1g}"
str_error_low = f"{error_low:.1g}"
if std is not None:
str_std = f"{std:.1g}"
elif int(floor(np.log10(np.abs(error_high)))) > int(floor(np.log10(np.abs(error_low)))):
str_value = "%.*f" % (abs(power10), value)
str_error_high = f"{error_high:.2g}"
str_error_low = f"{error_low:.1g}"
if std is not None:
str_std = f"{std:.2g}"
else:
str_value = "%.*f" % (abs(power10), value)
str_error_high = f"{error_high:.1g}"
str_error_low = f"{error_low:.2g}"
if std is not None:
str_std = f"{std:.2g}"
out += [str_value, str_error_high]
# if not np.isclose(error_high, error_low):
out += [str_error_low]
if std is not None:
out += [str_std]
out = tuple(out)
return out
def pixel_rotation(x, y, theta, x0=0, y0=0):
"""Rotate a 2D vector (x,y) of an angle theta clockwise.
Parameters
----------
x: float
x coordinate
y: float
y coordinate
theta: float
angle in radians
x0: float, optional
x position of the center of rotation (default: 0)
y0: float, optional
y position of the center of rotation (default: 0)
Returns
-------
u: float
rotated x coordinate
v: float
rotated y coordinate
Examples
--------
>>> pixel_rotation(0, 0, 45)
(0.0, 0.0)
>>> u, v = pixel_rotation(1, 0, np.pi/4)
.. doctest::
:hide:
>>> assert np.isclose(u, 1/np.sqrt(2))
>>> assert np.isclose(v, -1/np.sqrt(2))
>>> u, v = pixel_rotation(1, 2, -np.pi/2, x0=1, y0=0)
>>> assert np.isclose(u, -2)
>>> assert np.isclose(v, 0)
"""
u = np.cos(theta) * (x - x0) + np.sin(theta) * (y - y0)
v = -np.sin(theta) * (x - x0) + np.cos(theta) * (y - y0)
return u, v
def detect_peaks(image):
"""
Takes an image and detect the peaks using the local maximum filter.
Returns a boolean mask of the peaks (i.e. 1 when
the pixel's value is the neighborhood maximum, 0 otherwise).
Only positive peaks are detected (take absolute value or negative value of the
image to detect the negative ones).
Parameters
----------
image: array_like
The image 2D array.
Returns
-------
detected_peaks: array_like
Boolean maskof the peaks.
Examples
--------
>>> im = np.zeros((50,50))
>>> im[4,6] = 2
>>> im[10,20] = -3
>>> im[49,49] = 1
>>> detected_peaks = detect_peaks(im)
.. doctest::
:hide:
>>> assert detected_peaks[4,6]
>>> assert not detected_peaks[10,20]
>>> assert detected_peaks[49,49]
"""
# define an 8-connected neighborhood
neighborhood = generate_binary_structure(2, 2)
# apply the local maximum filter; all pixel of maximal value
# in their neighborhood are set to 1
local_max = maximum_filter(image, footprint=neighborhood) == image
# local_max is a mask that contains the peaks we are
# looking for, but also the background.
# In order to isolate the peaks we must remove the background from the mask.
# we create the mask of the background
background = (image == 0)
# a little technicality: we must erode the background in order to
# successfully subtract it form local_max, otherwise a line will
# appear along the background border (artifact of the local maximum filter)
eroded_background = binary_erosion(background, structure=neighborhood, border_value=50)
# we obtain the final mask, containing only peaks,
# by removing the background from the local_max mask (xor operation)
detected_peaks = local_max ^ eroded_background
return detected_peaks
def clean_target_spikes(data, saturation):
saturated_pixels = np.where(data > saturation)
data[saturated_pixels] = saturation
NY, NX = data.shape
delta = len(saturated_pixels[0])
while delta > 0:
delta = len(saturated_pixels[0])
grady, gradx = np.gradient(data)
for iy in range(1, NY - 1):
for ix in range(1, NX - 1):
# if grady[iy,ix] > 0.8*np.max(grady) :
# data[iy,ix] = data[iy-1,ix]
# if grady[iy,ix] < 0.8*np.min(grady) :
# data[iy,ix] = data[iy+1,ix]
if gradx[iy, ix] > 0.8 * np.max(gradx):
data[iy, ix] = data[iy, ix - 1]
if gradx[iy, ix] < 0.8 * np.min(gradx):
data[iy, ix] = data[iy, ix + 1]
saturated_pixels = np.where(data >= saturation)
delta = delta - len(saturated_pixels[0])
return data
def plot_image_simple(ax, data, scale="lin", title="", units="Image units", cmap=None,
target_pixcoords=None, vmin=None, vmax=None, aspect=None, cax=None):
"""Simple function to plot a spectrum with error bars and labels.
Parameters
----------
ax: Axes
Axes instance to make the plot
data: array_like
The image data 2D array.
scale: str
Scaling of the image (choose between: lin, log or log10, symlog) (default: lin)
title: str
Title of the image (default: "")
units: str
Units of the image to be written in the color bar label (default: "Image units")
cmap: colormap
Color map label (default: None)
target_pixcoords: array_like, optional
2D array giving the (x,y) coordinates of the targets on the image: add a scatter plot (default: None)
vmin: float
Minimum value of the image (default: None)
vmax: float
Maximum value of the image (default: None)
aspect: str
Aspect keyword to be passed to imshow (default: None)
cax: Axes, optional
Color bar axes if necessary (default: None).
Examples
--------
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> from spectractor.extractor.images import Image
>>> from spectractor import parameters
>>> from spectractor.tools import plot_image_simple
>>> f, ax = plt.subplots(1,1)
>>> im = Image('tests/data/reduc_20170605_028.fits', config="./config/ctio.ini")
>>> plot_image_simple(ax, im.data, scale="symlog", units="ADU", target_pixcoords=(815,580),
... title="tests/data/reduc_20170605_028.fits")
>>> if parameters.DISPLAY: plt.show()
"""
if scale == "log" or scale == "log10":
# removes the zeros and negative pixels first
zeros = np.where(data <= 0)
min_noz = np.min(data[np.where(data > 0)])
data[zeros] = min_noz
# apply log
# data = np.log10(data)
if scale == "log10" or scale == "log":
norm = matplotlib.colors.LogNorm(vmin=vmin, vmax=vmax)
elif scale == "symlog":
norm = matplotlib.colors.SymLogNorm(vmin=vmin, vmax=vmax, linthresh=10, base=10)
else:
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
im = ax.imshow(data, origin='lower', cmap=cmap, norm=norm, aspect=aspect)
ax.grid(color='silver', ls='solid')
ax.grid(True)
ax.set_xlabel(parameters.PLOT_XLABEL)
ax.set_ylabel(parameters.PLOT_YLABEL)
cb = plt.colorbar(im, ax=ax, cax=cax)
if scale == "lin":
cb.formatter.set_powerlimits((0, 0))
cb.locator = MaxNLocator(7, prune=None)
cb.update_ticks()
cb.set_label('%s (%s scale)' % (units, scale)) # ,fontsize=16)
if title != "":
ax.set_title(title)
if target_pixcoords is not None:
ax.scatter(target_pixcoords[0], target_pixcoords[1], marker='o', s=100, edgecolors='k', facecolors='none',
label='Target', linewidth=2)
def plot_spectrum_simple(ax, lambdas, data, data_err=None, xlim=None, color='r', linestyle='none', lw=2, label='',
title='', units=''):
"""Simple function to plot a spectrum with error bars and labels.
Parameters
----------
ax: Axes
Axes instance to make the plot.
lambdas: array
The wavelengths array.
data: array
The spectrum data array.
data_err: array, optional
The spectrum uncertainty array (default: None).
xlim: list, optional
List of minimum and maximum abscisses (default: None).
color: str, optional
String for the color of the spectrum (default: 'r').
linestyle: str, optional
String for the linestyle of the spectrum (default: 'none').
lw: int, optional
Integer for line width (default: 2).
label: str, optional
String label for the plot legend (default: '').
title: str, optional
String label for the plot title (default: '').
units: str, optional
String label for the plot units (default: '').
Examples
--------
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> from spectractor.extractor.spectrum import Spectrum
>>> from spectractor import parameters
>>> from spectractor.tools import plot_spectrum_simple
>>> f, ax = plt.subplots(1,1)
>>> s = Spectrum(file_name='tests/data/reduc_20170530_134_spectrum.fits')
>>> plot_spectrum_simple(ax, s.lambdas, s.data, data_err=s.err, xlim=None, color='r', label='test')
>>> if parameters.DISPLAY: plt.show()
"""
xs = lambdas
if xs is None:
xs = np.arange(data.size)
if data_err is not None:
ax.errorbar(xs, data, yerr=data_err, fmt=f'{color}o', lw=lw, label=label,
zorder=0, markersize=2, linestyle=linestyle)
else:
ax.plot(xs, data, f'{color}-', lw=lw, label=label, linestyle=linestyle)
ax.grid(True)
if xlim is None and lambdas is not None:
xlim = [parameters.LAMBDA_MIN, parameters.LAMBDA_MAX]
ax.set_xlim(xlim)
try:
ax.set_ylim(0., np.nanmax(data[np.logical_and(xs > xlim[0], xs < xlim[1])]) * 1.2)
except ValueError:
pass
if lambdas is not None:
ax.set_xlabel(r'$\lambda$ [nm]')
else:
ax.set_xlabel('X [pixels]')
if units != '':
ax.set_ylabel(f'Flux [{units}]')
else:
ax.set_ylabel(f'Flux')
if title != '':
ax.set_title(title)
def plot_compass_simple(ax, parallactic_angle=None, arrow_size=0.1, origin=[0.15, 0.15]):
"""Plot small (N,W) compass, and optionally zenith direction.
Parameters
----------
ax: Axes
Axes instance to make the plot.
parallactic_angle: float, optional
Value is the parallactic angle with respect to North eastward and plot the zenith direction (default: None).
arrow_size: float, optional
Length of the arrow as a fraction of axe sizes (default: 0.1)
origin: array_like, optional
(x0, y0) position of the compass as axes fraction (default: [0.15, 0.15]).
Examples
--------
>>> from spectractor.extractor.images import Image
>>> from spectractor import parameters
>>> from spectractor.tools import plot_image_simple, plot_compass_simple
>>> f, ax = plt.subplots(1,1)
>>> im = Image('tests/data/reduc_20170605_028.fits', config="./config/ctio.ini")
>>> plot_image_simple(ax, im.data, scale="symlog", units="ADU", target_pixcoords=(750,700),
... title='tests/data/reduc_20170530_134.fits')
>>> plot_compass_simple(ax, im.parallactic_angle)
>>> if parameters.DISPLAY: plt.show()
"""
# North arrow
N_arrow = [0, arrow_size]
N_xy = np.asarray(flip_and_rotate_radec_to_image_xy_coordinates(N_arrow[0], N_arrow[1],
camera_angle=parameters.OBS_CAMERA_ROTATION,
flip_ra_sign=parameters.OBS_CAMERA_RA_FLIP_SIGN,
flip_dec_sign=parameters.OBS_CAMERA_DEC_FLIP_SIGN))
ax.annotate("N", xy=origin, xycoords='axes fraction', xytext=N_xy + origin, textcoords='axes fraction',
arrowprops=dict(arrowstyle="<|-", fc="yellow", ec="yellow"), color="yellow",
horizontalalignment='center', verticalalignment='center')
# West arrow
W_arrow = [arrow_size, 0]
W_xy = np.asarray(flip_and_rotate_radec_to_image_xy_coordinates(W_arrow[0], W_arrow[1],
camera_angle=parameters.OBS_CAMERA_ROTATION,
flip_ra_sign=parameters.OBS_CAMERA_RA_FLIP_SIGN,
flip_dec_sign=parameters.OBS_CAMERA_DEC_FLIP_SIGN))
ax.annotate("W", xy=origin, xycoords='axes fraction', xytext=W_xy + origin, textcoords='axes fraction',
arrowprops=dict(arrowstyle="<|-", fc="yellow", ec="yellow"), color="yellow",
horizontalalignment='center', verticalalignment='center')
# Central dot
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
ax.scatter(origin[0] * xmax, origin[1] * ymax, color="yellow", s=20)
# Zenith direction
if parallactic_angle is not None:
p_arrow = [0, arrow_size] # angle with respect to North in RADEC counterclockwise
angle = parameters.OBS_CAMERA_ROTATION + parameters.OBS_CAMERA_RA_FLIP_SIGN * parallactic_angle
p_xy = np.asarray(flip_and_rotate_radec_to_image_xy_coordinates(p_arrow[0], p_arrow[1],
camera_angle=angle,
flip_ra_sign=parameters.OBS_CAMERA_RA_FLIP_SIGN,
flip_dec_sign=parameters.OBS_CAMERA_DEC_FLIP_SIGN))
ax.annotate("Z", xy=origin, xycoords='axes fraction', xytext=p_xy + origin, textcoords='axes fraction',
arrowprops=dict(arrowstyle="<|-", fc="lightgreen", ec="lightgreen"), color="lightgreen",
horizontalalignment='center', verticalalignment='center')
def load_fits(file_name, hdu_index=0):
"""Generic function to load a FITS file.
Parameters
----------
file_name: str
The FITS file name.
hdu_index: int, optional
The HDU index in the file (default: 0).
Returns
-------
header: fits.Header
Header of the FITS file.
data: np.array
The data array.
Examples
--------
>>> header, data = load_fits("./tests/data/reduc_20170530_134.fits")
>>> header["DATE-OBS"]
'2017-05-31T02:53:52.356'
>>> data.shape
(2048, 2048)
"""
hdu_list = fits.open(file_name)
header = hdu_list[0].header
data = hdu_list[hdu_index].data
hdu_list.close() # need to free allocation for file description
return header, data
def save_fits(file_name, header, data, overwrite=False):
"""Generic function to save a FITS file.
Parameters
----------
file_name: str
The FITS file name.
header: fits.Header
Header of the FITS file.
data: np.array
The data array.
overwrite: bool, optional
If True and the file already exists, it is overwritten (default: False).
Examples
--------
>>> header, data = load_fits("./tests/data/reduc_20170530_134.fits")
>>> save_fits("./outputs/save_fits_test.fits", header, data, overwrite=True)
>>> assert os.path.isfile("./outputs/save_fits_test.fits")
.. doctest:
:hide:
>>> os.remove("./outputs/save_fits_test.fits")
"""
hdu = fits.PrimaryHDU()
hdu.header = header
hdu.data = data
output_directory = '/'.join(file_name.split('/')[:-1])
ensure_dir(output_directory)
hdu.writeto(file_name, overwrite=overwrite)
def dichotomie(f, a, b, epsilon):
"""
Dichotomie method to find a function root.
Parameters
----------
f: callable
The function
a: float
Left bound to the expected root
b: float
Right bound to the expected root
epsilon: float
Precision
Returns
-------
root: float
The root of the function.
Examples
--------
Search for the Gaussian FWHM:
>>> p = [1,0,1]
>>> xx = np.arange(-10,10,0.1)
>>> PSF = gauss(xx, *p)
>>> def eq(x):
... return np.interp(x, xx, PSF) - 0.5
>>> root = dichotomie(eq, 0, 10, 1e-6)
>>> assert np.isclose(2*root, 2.355*p[2], 1e-3)
"""
x = 0.5 * (a + b)
N = 1
while b - a > epsilon and N < 100:
x = 0.5 * (a + b)
if f(x) * f(a) > 0:
a = x
else:
b = x
N += 1
return x
def wavelength_to_rgb(wavelength, gamma=0.8):
""" taken from http://www.noah.org/wiki/Wavelength_to_RGB_in_Python
This converts a given wavelength of light to an
approximate RGB color value. The wavelength must be given
in nanometers in the range from 380 nm through 750 nm
(789 THz through 400 THz).
Based on code by <NAME>
http://www.physics.sfasu.edu/astro/color/spectra.html
Additionally alpha value set to 0.5 outside range
"""
wavelength = float(wavelength)
if 380 <= wavelength <= 750:
A = 1.
else:
A = 0.5
if wavelength < 380:
wavelength = 380.
if wavelength > 750:
wavelength = 750.
if 380 <= wavelength <= 440:
attenuation = 0.3 + 0.7 * (wavelength - 380) / (440 - 380)
R = ((-(wavelength - 440) / (440 - 380)) * attenuation) ** gamma
G = 0.0
B = (1.0 * attenuation) ** gamma
elif 440 <= wavelength <= 490:
R = 0.0
G = ((wavelength - 440) / (490 - 440)) ** gamma
B = 1.0
elif 490 <= wavelength <= 510:
R = 0.0
G = 1.0
B = (-(wavelength - 510) / (510 - 490)) ** gamma
elif 510 <= wavelength <= 580:
R = ((wavelength - 510) / (580 - 510)) ** gamma
G = 1.0
B = 0.0
elif 580 <= wavelength <= 645:
R = 1.0
G = (-(wavelength - 645) / (645 - 580)) ** gamma
B = 0.0
elif 645 <= wavelength <= 750:
attenuation = 0.3 + 0.7 * (750 - wavelength) / (750 - 645)
R = (1.0 * attenuation) ** gamma
G = 0.0
B = 0.0
else:
R = 0.0
G = 0.0
B = 0.0
return R, G, B, A
def from_lambda_to_colormap(lambdas):
"""Convert an array of wavelength in nm into a color map.
Parameters
----------
lambdas: array_like
Wavelength array in nm.
Returns
-------
spectral_map: matplotlib.colors.LinearSegmentedColormap
Color map.
Examples
--------
>>> lambdas = np.arange(300, 1000, 10)
>>> spec = from_lambda_to_colormap(lambdas)
>>> plt.scatter(lambdas, np.zeros(lambdas.size), cmap=spec, c=lambdas) #doctest: +ELLIPSIS
<matplotlib.collections.PathCollection object at ...>
>>> plt.grid()
>>> plt.xlabel("Wavelength [nm]") #doctest: +ELLIPSIS
Text(..., 'Wavelength [nm]')
>>> plt.show()
..plot::
import numpy as np
import matplotlib.pyplot as plt
from spectractor.tools import from_lambda_to_colormap
lambdas = np.arange(300, 1000, 10)
spec = from_lambda_to_colormap(lambdas)
plt.scatter(lambdas, np.zeros(lambdas.size), cmap=spec, c=lambdas)
plt.xlabel("Wavelength [nm]")
plt.grid()
plt.show()
"""
colorlist = [wavelength_to_rgb(lbda) for lbda in lambdas]
spectralmap = matplotlib.colors.LinearSegmentedColormap.from_list("spectrum", colorlist)
return spectralmap
def rebin(arr, new_shape):
"""Rebin and reshape a numpy array.
Parameters
----------
arr: np.array
Numpy array to be reshaped.
new_shape: array_like
New shape of the array.
Returns
-------
arr_rebinned: np.array
Rebinned array.
Examples
--------
>>> a = 4 * np.ones((10, 10))
>>> b = rebin(a, (5, 5))
>>> b
array([[4., 4., 4., 4., 4.],
[4., 4., 4., 4., 4.],
[4., 4., 4., 4., 4.],
[4., 4., 4., 4., 4.],
[4., 4., 4., 4., 4.]])
"""
if np.any(new_shape * parameters.CCD_REBIN != arr.shape):
shape_cropped = new_shape * parameters.CCD_REBIN
margins = np.asarray(arr.shape) - shape_cropped
arr = arr[:-margins[0], :-margins[1]]
shape = (new_shape[0], arr.shape[0] // new_shape[0],
new_shape[1], arr.shape[1] // new_shape[1])
return arr.reshape(shape).mean(-1).mean(1)
def set_wcs_output_directory(file_name, output_directory=""):
"""Returns the WCS output directory corresponding to the analyzed image. The name of the directory is
the anme of the image with the suffix _wcs.
Parameters
----------
file_name: str
File name of the image.
output_directory: str, optional
If not set, the main output directory is the one the image,
otherwise the specified directory is taken (default: "").
Returns
-------
output: str
The name of the output directory
Examples
--------
>>> set_wcs_output_directory("image.fits", output_directory="")
'image_wcs'
>>> set_wcs_output_directory("image.png", output_directory="outputs")
'outputs/image_wcs'
"""
outdir = os.path.dirname(file_name)
if output_directory != "":
outdir = output_directory
output_directory = os.path.join(outdir, os.path.splitext(os.path.basename(file_name))[0]) + "_wcs"
return output_directory
def set_wcs_tag(file_name):
"""Returns the WCS tag name associated to the analyzed image: the file name without the extension.
Parameters
----------
file_name: str
File name of the image.
Returns
-------
tag: str
The tag.
Examples
--------
>>> set_wcs_tag("image.fits")
'image'
"""
tag = os.path.splitext(os.path.basename(file_name))[0]
return tag
def set_wcs_file_name(file_name, output_directory=""):
"""Returns the WCS file name associated to the analyzed image, placed in the output directory.
The extension is .wcs.
Parameters
----------
file_name: str
File name of the image.
output_directory: str, optional
If not set, the main output directory is the one the image,
otherwise the specified directory is taken (default: "").
Returns
-------
wcs_file_name: str
The WCS file name.
Examples
--------
>>> set_wcs_file_name("image.fits", output_directory="")
'image_wcs/image.wcs'
>>> set_wcs_file_name("image.png", output_directory="outputs")
'outputs/image_wcs/image.wcs'
"""
output_directory = set_wcs_output_directory(file_name, output_directory=output_directory)
tag = set_wcs_tag(file_name)
return os.path.join(output_directory, tag + '.wcs')
def set_sources_file_name(file_name, output_directory=""):
"""Returns the file name containing the deteted sources associated to the analyzed image,
placed in the output directory. The suffix is _source.fits.
Parameters
----------
file_name: str
File name of the image.
output_directory: str, optional
If not set, the main output directory is the one the image,
otherwise the specified directory is taken (default: "").
Returns
-------
sources_file_name: str
The detected sources file name.
Examples
--------
>>> set_sources_file_name("image.fits", output_directory="")
'image_wcs/image_sources.fits'
>>> set_sources_file_name("image.png", output_directory="outputs")
'outputs/image_wcs/image_sources.fits'
"""
output_directory = set_wcs_output_directory(file_name, output_directory=output_directory)
tag = set_wcs_tag(file_name)
return os.path.join(output_directory, f"{tag}_sources.fits")
def set_gaia_catalog_file_name(file_name, output_directory=""):
"""Returns the file name containing the Gaia catalog associated to the analyzed image,
placed in the output directory. The suffix is _gaia.ecsv.
Parameters
----------
file_name: str
File name of the image.
output_directory: str, optional
If not set, the main output directory is the one the image,
otherwise the specified directory is taken (default: "").
Returns
-------
sources_file_name: str
The Gaia catalog file name.
Examples
--------
>>> set_gaia_catalog_file_name("image.fits", output_directory="")
'image_wcs/image_gaia.ecsv'
>>> set_gaia_catalog_file_name("image.png", output_directory="outputs")
'outputs/image_wcs/image_gaia.ecsv'
"""
output_directory = set_wcs_output_directory(file_name, output_directory=output_directory)
tag = set_wcs_tag(file_name)
return os.path.join(output_directory, f"{tag}_gaia.ecsv")
def load_wcs_from_file(file_name):
"""Open the WCS FITS file and returns a WCS astropy object.
Parameters
----------
file_name: str
File name of the WCS FITS file.
Returns
-------
wcs: WCS
WCS Astropy object.
"""
# Load the FITS hdulist using astropy.io.fits
hdulist = fits.open(file_name)
# Parse the WCS keywords in the primary HDU
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
wcs = WCS.WCS(hdulist[0].header, fix=False)
return wcs
def imgslice(slicespec):
"""
Utility function: convert a FITS slice specification (1-based)
into the corresponding numpy array slice spec (0-based as python does, xy swapped).
Parameters
----------
slicespec: str
FITS slice specification with the format [xmin:xmax,ymin:ymax]
Returns
-------
slice: slice
Slice object to be injected in a np.array for instance.
Examples
--------
>>> imgslice('[11:522,1:2002]')
(slice(0, 2002, None), slice(10, 522, None))
"""
parts = slicespec.replace('[', '').replace(']', '').split(',')
xbegin, xend = [int(i) for i in parts[0].split(':')]
ybegin, yend = [int(i) for i in parts[1].split(':')]
xbegin -= 1
ybegin -= 1
return np.s_[ybegin:yend, xbegin:xend]
def compute_correlation_matrix(cov):
rho = np.zeros_like(cov)
for i in range(cov.shape[0]):
for j in range(cov.shape[1]):
rho[i, j] = cov[i, j] / np.sqrt(cov[i, i] * cov[j, j])
return rho
def plot_correlation_matrix_simple(ax, rho, axis_names=None, ipar=None):
if ipar is None:
ipar = np.arange(rho.shape[0]).astype(int)
im = plt.imshow(rho[ipar[:, None], ipar], interpolation="nearest", cmap='bwr', vmin=-1, vmax=1)
ax.set_title("Correlation matrix")
if axis_names is not None:
names = [axis_names[ip] for ip in ipar]
plt.xticks(np.arange(ipar.size), names, rotation='vertical', fontsize=11)
plt.yticks(np.arange(ipar.size), names, fontsize=11)
cbar = plt.colorbar(im)
cbar.ax.tick_params(labelsize=9)
plt.gcf().tight_layout()
def resolution_operator(cov, Q, reg):
N = cov.shape[0]
return np.eye(N) - reg * cov @ Q
def flip_and_rotate_radec_to_image_xy_coordinates(ra, dec, camera_angle=0, flip_ra_sign=1, flip_dec_sign=1):
"""Flip and rotate the vectors in pixels along (RA,DEC) directions to (x, y) image coordinates.
The parity transformations are applied first, then rotation.
Parameters
----------
ra: array_like
Vector coordinates along RA direction.
dec: array_like
Vector coordinates along DEC direction.
camera_angle: float
Angle of the camera between y axis and the North Celestial Pole counterclockwise, or equivalently between
the x axis and the West direction counterclokwise. Units are degrees. (default: 0).
flip_ra_sign: -1, 1, optional
Flip RA axis is value is -1 (default: 1).
flip_dec_sign: -1, 1, optional
Flip DEC axis is value is -1 (default: 1).
Returns
-------
x: array_like
Vector coordinates along the x direction.
y: array_like
Vector coordinates along the y direction.
Examples
--------
>>> from spectractor import parameters
>>> parameters.OBS_CAMERA_ROTATION = 180
>>> parameters.OBS_CAMERA_DEC_FLIP_SIGN = 1
>>> parameters.OBS_CAMERA_RA_FLIP_SIGN = 1
North vector
>>> N_ra, N_dec = [0, 1]
Compute North direction in (x, y) frame
>>> flip_and_rotate_radec_to_image_xy_coordinates(N_ra, N_dec, 0, flip_ra_sign=1, flip_dec_sign=1)
(0.0, 1.0)
>>> "%.1f, %.1f" % flip_and_rotate_radec_to_image_xy_coordinates(N_ra, N_dec, 180, flip_ra_sign=1, flip_dec_sign=1)
'-0.0, -1.0'
>>> "%.1f, %.1f" % flip_and_rotate_radec_to_image_xy_coordinates(N_ra, N_dec, 90, flip_ra_sign=1, flip_dec_sign=1)
'-1.0, 0.0'
>>> "%.1f, %.1f" % flip_and_rotate_radec_to_image_xy_coordinates(N_ra, N_dec, 90, flip_ra_sign=1, flip_dec_sign=-1)
'1.0, -0.0'
>>> "%.1f, %.1f" % flip_and_rotate_radec_to_image_xy_coordinates(N_ra, N_dec, 90, flip_ra_sign=-1, flip_dec_sign=-1)
'1.0, -0.0'
>>> "%.1f, %.1f" % flip_and_rotate_radec_to_image_xy_coordinates(N_ra, N_dec, 0, flip_ra_sign=1, flip_dec_sign=-1)
'0.0, -1.0'
>>> "%.1f, %.1f" % flip_and_rotate_radec_to_image_xy_coordinates(N_ra, N_dec, 0, flip_ra_sign=-1, flip_dec_sign=1)
'0.0, 1.0'
"""
flip = np.array([[flip_ra_sign, 0], [0, flip_dec_sign]], dtype=float)
a = - camera_angle * np.pi / 180
# minus sign as rotation matrix is apply on the right on the adr vector
rotation = np.array([[np.cos(a), -np.sin(a)], [np.sin(a), np.cos(a)]], dtype=float)
transformation = flip @ rotation
x, y = (np.asarray([ra, dec]).T @ transformation).T
return x, y
if __name__ == "__main__":
import doctest
doctest.testmod()
| StarcoderdataPython |
380259 | from datetime import datetime
import time
import wget
import os
now = datetime.now()
cu= now.strftime("%H:%M:%S")
# print("Current Time =", cu)
t=True
urls=["url1","url2"] # in place of url1 place your direct downloadable links
while t:
now = datetime.now()
cu= now.strftime("%H:%M:%S")
print("Current Time =", cu)
#time.sleep(3)
if cu=="24:01:00":
print("\n $ started Downloadings....")
for url in range(len(urls)):
try:
os.system(f"wget --no-check-certificate {urls[url]}")
except:
pass
print("\n $ ended the task..")
# wget.download(url)
t=False
print(type(cu))
print("\n $ Done ..")
| StarcoderdataPython |
8136800 | <gh_stars>0
# Generated by Django 2.1 on 2018-08-24 21:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cars', '0005_auto_20180824_1123'),
]
operations = [
migrations.CreateModel(
name='CarModels',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10)),
],
),
migrations.AlterField(
model_name='car',
name='model',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cars.CarModels'),
),
]
| StarcoderdataPython |
3393851 | <gh_stars>10-100
from typing import *
from cognite.client import utils
from cognite.client._api_client import APIClient
from cognite.client.data_classes import (
DataSet,
DataSetAggregate,
DataSetFilter,
DataSetList,
DataSetUpdate,
TimestampRange,
)
class DataSetsAPI(APIClient):
_RESOURCE_PATH = "/datasets"
_LIST_CLASS = DataSetList
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._CREATE_LIMIT = 10
def __call__(
self,
chunk_size: int = None,
metadata: Dict[str, str] = None,
created_time: Union[Dict[str, Any], TimestampRange] = None,
last_updated_time: Union[Dict[str, Any], TimestampRange] = None,
external_id_prefix: str = None,
write_protected: bool = None,
limit: int = None,
) -> Generator[Union[DataSet, DataSetList], None, None]:
"""Iterate over data sets
Fetches data sets as they are iterated over, so you keep a limited number of data sets in memory.
Args:
chunk_size (int, optional): Number of data sets to return in each chunk. Defaults to yielding one data set a time.
metadata (Dict[str, str]): Custom, application-specific metadata. String key -> String value.
created_time (Union[Dict[str, Any], TimestampRange]): Range between two timestamps.
last_updated_time (Union[Dict[str, Any], TimestampRange]): Range between two timestamps.
external_id_prefix (str): Filter by this (case-sensitive) prefix for the external ID.
write_protected (bool): Specify whether the filtered data sets are write-protected, or not. Set to True to only list write-protected data sets.
limit (int, optional): Maximum number of data sets to return. Defaults to return all items.
Yields:
Union[DataSet, DataSetList]: yields DataSet one by one if chunk is not specified, else DataSetList objects.
"""
filter = DataSetFilter(
metadata=metadata,
created_time=created_time,
last_updated_time=last_updated_time,
external_id_prefix=external_id_prefix,
write_protected=write_protected,
).dump(camel_case=True)
return self._list_generator(method="POST", chunk_size=chunk_size, filter=filter, limit=limit)
def __iter__(self) -> Generator[DataSet, None, None]:
"""Iterate over data sets
Fetches data sets as they are iterated over, so you keep a limited number of data sets in memory.
Yields:
Event: yields DataSet one by one.
"""
return self.__call__()
def create(self, data_set: Union[DataSet, List[DataSet]]) -> Union[DataSet, DataSetList]:
"""`Create one or more data sets. <https://docs.cognite.com/api/v1/#operation/createDataSets>`_
Args:
data_set: Union[DataSet, List[DataSet]]: Data set or list of data sets to create.
Returns:
Union[DataSet, DataSetList]: Created data set(s)
Examples:
Create new data sets::
>>> from cognite.client import CogniteClient
>>> from cognite.client.data_classes import DataSet
>>> c = CogniteClient()
>>> data_sets = [DataSet(name="1st level"), DataSet(name="2nd level")]
>>> res = c.data_sets.create(data_sets)
"""
return self._create_multiple(items=data_set)
def retrieve(self, id: Optional[int] = None, external_id: Optional[str] = None) -> Optional[DataSet]:
"""`Retrieve a single data set by id. <https://docs.cognite.com/api/v1/#operation/getDataSets>`_
Args:
id (int, optional): ID
external_id (str, optional): External ID
Returns:
Optional[DataSet]: Requested data set or None if it does not exist.
Examples:
Get data set by id::
>>> from cognite.client import CogniteClient
>>> c = CogniteClient()
>>> res = c.data_sets.retrieve(id=1)
Get data set by external id::
>>> from cognite.client import CogniteClient
>>> c = CogniteClient()
>>> res = c.data_sets.retrieve(external_id="1")
"""
utils._auxiliary.assert_exactly_one_of_id_or_external_id(id, external_id)
return self._retrieve_multiple(ids=id, external_ids=external_id, wrap_ids=True)
def retrieve_multiple(
self,
ids: Optional[List[int]] = None,
external_ids: Optional[List[str]] = None,
ignore_unknown_ids: bool = False,
) -> DataSetList:
"""`Retrieve multiple data sets by id. <https://docs.cognite.com/api/v1/#operation/getDataSets>`_
Args:
ids (List[int], optional): IDs
external_ids (List[str], optional): External IDs
ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
Returns:
DataSetList: The requested data sets.
Examples:
Get data sets by id::
>>> from cognite.client import CogniteClient
>>> c = CogniteClient()
>>> res = c.data_sets.retrieve_multiple(ids=[1, 2, 3])
Get data sets by external id::
>>> from cognite.client import CogniteClient
>>> c = CogniteClient()
>>> res = c.data_sets.retrieve_multiple(external_ids=["abc", "def"], ignore_unknown_ids=True)
"""
utils._auxiliary.assert_type(ids, "id", [List], allow_none=True)
utils._auxiliary.assert_type(external_ids, "external_id", [List], allow_none=True)
return self._retrieve_multiple(
ids=ids, external_ids=external_ids, ignore_unknown_ids=ignore_unknown_ids, wrap_ids=True
)
def list(
self,
metadata: Dict[str, str] = None,
created_time: Union[Dict[str, Any], TimestampRange] = None,
last_updated_time: Union[Dict[str, Any], TimestampRange] = None,
external_id_prefix: str = None,
write_protected: bool = None,
limit: int = 25,
) -> DataSetList:
"""`List data sets <https://docs.cognite.com/api/v1/#operation/listDataSets>`_
Args:
metadata (Dict[str, str]): Custom, application-specific metadata. String key -> String value.
created_time (Union[Dict[str, Any], TimestampRange]): Range between two timestamps.
last_updated_time (Union[Dict[str, Any], TimestampRange]): Range between two timestamps.
external_id_prefix (str): Filter by this (case-sensitive) prefix for the external ID.
write_protected (bool): Specify whether the filtered data sets are write-protected, or not. Set to True to only list write-protected data sets.
limit (int, optional): Maximum number of data sets to return. Defaults to 25. Set to -1, float("inf") or None
to return all items.
Returns:
DataSetList: List of requested data sets
Examples:
List data sets and filter on write_protected::
>>> from cognite.client import CogniteClient
>>> c = CogniteClient()
>>> data_sets_list = c.data_sets.list(limit=5, write_protected=False)
Iterate over data sets::
>>> from cognite.client import CogniteClient
>>> c = CogniteClient()
>>> for data_set in c.data_sets:
... data_set # do something with the data_set
Iterate over chunks of data sets to reduce memory load::
>>> from cognite.client import CogniteClient
>>> c = CogniteClient()
>>> for data_set_list in c.data_sets(chunk_size=2500):
... data_set_list # do something with the list
"""
filter = DataSetFilter(
metadata=metadata,
created_time=created_time,
last_updated_time=last_updated_time,
external_id_prefix=external_id_prefix,
write_protected=write_protected,
).dump(camel_case=True)
return self._list(method="POST", limit=limit, filter=filter)
def aggregate(self, filter: Union[DataSetFilter, Dict] = None) -> List[DataSetAggregate]:
"""`Aggregate data sets <https://docs.cognite.com/api/v1/#operation/aggregateDataSets>`_
Args:
filter (Union[DataSetFilter, Dict]): Filter on data set filter with exact match
Returns:
List[DataSetAggregate]: List of data set aggregates
Examples:
Aggregate data_sets:
>>> from cognite.client import CogniteClient
>>> c = CogniteClient()
>>> aggregate_protected = c.data_sets.aggregate(filter={"write_protected": True})
"""
return self._aggregate(filter=filter, cls=DataSetAggregate)
def update(
self, item: Union[DataSet, DataSetUpdate, List[Union[DataSet, DataSetUpdate]]]
) -> Union[DataSet, DataSetList]:
"""`Update one or more data sets <https://docs.cognite.com/api/v1/#operation/updateDataSets>`_
Args:
item: Union[DataSet, DataSetUpdate, List[Union[DataSet, DataSetUpdate]]]: Data set(s) to update
Returns:
Union[DataSet, DataSetList]: Updated data set(s)
Examples:
Update a data set that you have fetched. This will perform a full update of the data set::
>>> from cognite.client import CogniteClient
>>> c = CogniteClient()
>>> data_set = c.data_sets.retrieve(id=1)
>>> data_set.description = "New description"
>>> res = c.data_sets.update(data_set)
Perform a partial update on a data set, updating the description and removing a field from metadata::
>>> from cognite.client import CogniteClient
>>> from cognite.client.data_classes import DataSetUpdate
>>> c = CogniteClient()
>>> my_update = DataSetUpdate(id=1).description.set("New description").metadata.remove(["key"])
>>> res = c.data_sets.update(my_update)
"""
return self._update_multiple(items=item)
| StarcoderdataPython |
5161205 | # http://www.djangosnippets.org/snippets/741/
from django.template import Library
register = Library()
@register.filter_function
def order_by(queryset, args):
args = [x.strip() for x in args.split(',')]
return queryset.order_by(*args)
| StarcoderdataPython |
9704810 | <gh_stars>1-10
from . import vgg
from . import estimator
from . import util
__author__ = "<NAME>"
__version__ = "0.1.0"
__license__ = "MIT"
| StarcoderdataPython |
6507270 | <filename>daily-questions/28-11-19/anne18#5106.py
l=[0,1,1]
n=int(input("Enter number of terms to be displayed"))
if n<=3:
for i in range(n):
print(l[i], end=" ")
else:
for i in range(3,n):
a=l[i-3]
b=l[i-2]
c=l[i-1]
d=a+b+c
l.append(d)
for i in range(n):
print(l[i], end=" ")
| StarcoderdataPython |
8023823 | <filename>functions/__init__.py
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 6 13:53:50 2017
@author: Lennart
"""
# Empty file
| StarcoderdataPython |
8129815 | #!/usr/bin/env python3
"""Simple script that effectively "greps" for function calls. By default it
looks for calls to the system allocator. For projects that provide their own
allocation interface, this helps ensure that direct calls to the system
allocator aren't accidentally introduced. It can also check for calls to exit
or calls to a user provided list of functions.
Finds all C/C++ files in the search directory, and uses cscope to check for and
report calls to the specified routines.
Prints out the location of any calls to the functions. Exit status is 0 on
success, 1 if calls were found, and 2 if there were fatal errors trying to run
this script.
"""
import contextlib
import fnmatch
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
def log_error(msg, fatal=False):
"""Log an error, exiting if fatal"""
sys.stdout.write('Error: {0}\n'.format(msg))
if fatal:
sys.exit(2)
def run_command(cmd):
"""Simple subprocess wrapper. Similar to subprocess.check_output, but that
is only available after Python 2.7, and we still support 2.6 :("""
try:
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
log_error('command not found: "{0}"'.format(cmd[0]), fatal=True)
output = process.communicate()
output = (output[0].decode(), output[1].decode())
if process.returncode != 0:
msg = 'command failed: "{0}"\nstderr was:\n{1}'.format(cmd, output[1])
log_error(msg, fatal=True)
else:
return output[0]
def find_files(search_dir, extensions):
"""Return a list of absolute paths to files with any of the provided
extensions in the search_dir."""
source_files = []
for root, _, filenames in os.walk(search_dir):
for ext in extensions:
for filename in fnmatch.filter(filenames, '*.{0}'.format(ext)):
source_files.append(os.path.join(root, filename))
return source_files
def find_source_files(search_dir):
"""Return a list of absolute paths to any C/C++ source files in
the search_dir"""
cpp_sources = ['cc', 'cp', 'cxx', 'cpp', 'CPP', 'c++', 'C']
cpp_headers = ['hh', 'H', 'hp', 'hxx', 'hpp', 'HPP', 'h++', 'tcc']
c_sources = ['c']
c_headers = ['h']
code_file_exts = set(cpp_sources + cpp_headers + c_sources + c_headers)
return find_files(search_dir, code_file_exts)
def build_cscope_ref(src_files, cscope_database):
"""Build a cscope cross-reference for crazy fast searching of src_files"""
# -bu -- build cross ref only, wipe any old cross refs
# -q -- enable faster symbol lookup
# -k -- turns off default include dir (don't include/parse system files
# since we only care if our files call the system allocator)
db_name = '-f{0}'.format(cscope_database)
cscope_cmd = ['cscope', '-bu', '-q', '-k', db_name] + src_files
run_command(cscope_cmd)
def cscope_find_calls(func_call, cscope_database):
"""Run cscope searching for calls to func_call"""
# -d -- don't rebuild the cross-reference, use the one we already built
# -L3 -- line based "Find functions calling this function"
db_name = '-f{0}'.format(cscope_database)
cscope_cmd = ['cscope', '-d', '-L3{0}'.format(func_call), db_name]
return run_command(cscope_cmd)
@contextlib.contextmanager
def get_tmp_dir():
"""Contextlib for a temp dir. Would use tempfile.TemporaryDirectory(), but
that's python >= 3.2"""
try:
tmp_dir = tempfile.mkdtemp()
yield tmp_dir
finally:
shutil.rmtree(tmp_dir)
def check_for_calls(functions, search_dir, exclude_paths=None, rel_paths=True):
"""Check source files in search_dir for calls to functions. Don't check
files that contain any of the exclude_paths. Report files relative to
search_dir/../ if rel_paths is True, otherwise use absolute paths.
Returns True calls were found, False otherwise"""
rel_dir = ''
if rel_paths:
rel_dir = os.path.abspath(os.path.join(search_dir, '..')) + os.path.sep
src_files = find_source_files(search_dir)
if exclude_paths:
for exclude_path in exclude_paths:
src_files = [s for s in src_files if exclude_path not in s]
with get_tmp_dir() as tmp_dir:
cscope_database_name = os.path.join(tmp_dir, 'cscope')
build_cscope_ref(src_files, cscope_database_name)
found_calls = False
for func in functions:
# If func is a tuple consider the first element to be the function
# we're searching for and the second an alternative to suggest
# to the user.
alternative = None
if isinstance(func, tuple):
func, alternative = func
out = cscope_find_calls(func, cscope_database_name)
if out:
found_calls = True
msg = 'found call to "{0}"'.format(func)
if alternative:
msg += ' consider using "{0}" instead'.format(alternative)
log_error(msg)
sys.stdout.write(out.replace(rel_dir, '') + '\n')
return found_calls
def get_alloc_funcs():
"""Return a list of the possible C alloc/dealloc routines"""
# If list element is a tuple then consider the second element to
# be an alternative for the first. If the list element is a
# not a tuple then their is no known alternative.
std = [('malloc', 'chpl_mem_alloc'),
('calloc', 'chpl_mem_calloc'),
('realloc', 'chpl_mem_realloc'),
('free', 'chpl_mem_free')]
align = ['aligned_alloc', 'posix_memalign', 'memalign']
page_align = ['valloc', 'pvalloc']
string = ['strdup', 'strndup', 'asprintf', 'vasprintf']
obscure = ['getline', 'getdelim']
return std + align + page_align + string + obscure
def get_exit_funcs():
"""Return a list of the possible C exit routines"""
# TODO look for abort too (need to remove calls from the runtime first)
std = ['exit']
return std
def main():
"""Parse options and check for calls"""
class MyParser(optparse.OptionParser):
"""Optparse wrapper that doesn't strip newlines from the epilog"""
def format_epilog(self, formatter):
return self.epilog
parser = MyParser(epilog='\n{0}'.format(__doc__))
parser.add_option('--search-dir', dest='search_dir', default=os.getcwd(),
help='directory to check for alloc calls [default: CWD]')
parser.add_option('--exclude-paths', dest='exclude_paths', default='',
help='comma separated list of (sub)paths/files to skip')
parser.add_option('--abs-paths', dest='abs_paths', action="store_true",
help='report abs paths vs. rel to --search-dir/../')
parser.add_option('--check-alloc', dest='check_alloc', action="store_true",
help='check for calls to the system allocator')
parser.add_option('--check-exit', dest='check_exit', action="store_true",
help='check for calls to exit routines')
parser.add_option('--check-calls', dest='check_calls', default='',
help='comma separated list of calls to check for')
options = parser.parse_args()[0]
check_calls = [x.strip() for x in options.check_calls.split(',') if x]
if options.check_exit:
check_calls += get_exit_funcs()
if options.check_alloc or not check_calls:
check_calls += get_alloc_funcs()
search_dir = os.path.abspath(options.search_dir)
exclude_paths = [x.strip() for x in options.exclude_paths.split(',') if x]
rel_paths = not options.abs_paths
return check_for_calls(check_calls, search_dir, exclude_paths, rel_paths)
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
4828162 | <reponame>felixpelaez/python-sdk<filename>tests/common/chain_dict.py
import unittest
from devo.common import ChainDict
class TestChainDict(unittest.TestCase):
def setUp(self):
self.test_dict = {'film': "Kung Fury", 'disc': {'type': 'dvd'}}
self.test_values = [['film', 'Kung Fury'], [['disc', 'type'], 'dvd']]
def test_set_key_chain(self):
self.assertDictEqual({'one': {'two': 'yes'}},
ChainDict.set_key_chain(dict(),
['one', 'two'],
'yes'))
def test_set_key(self):
t = ChainDict()
for item in self.test_values:
t.set_key(item[0], item[1])
self.assertDictEqual(self.test_dict, t)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
5142794 | <reponame>lycantropos/ground<filename>tests/base_tests/context_tests/test_polygons_box.py
from typing import (Sequence,
Tuple)
from hypothesis import given
from ground.base import Context
from ground.hints import Polygon
from tests.utils import (is_box,
permute,
reverse_box_coordinates,
reverse_polygons_coordinates,
reverse_sequence)
from . import strategies
@given(strategies.contexts_with_polygons_sequences)
def test_basic(context_with_polygons: Tuple[Context, Sequence[Polygon]]
) -> None:
context, polygons = context_with_polygons
result = context.polygons_box(polygons)
assert is_box(result)
@given(strategies.contexts_with_polygons_sequences)
def test_reversals(context_with_polygons: Tuple[Context, Sequence[Polygon]]
) -> None:
context, polygons = context_with_polygons
result = context.polygons_box(polygons)
assert result == context.polygons_box(reverse_sequence(polygons))
assert result == reverse_box_coordinates(context.polygons_box(
reverse_polygons_coordinates(polygons)))
@given(strategies.contexts_with_polygons_sequences, strategies.indices)
def test_permutations(context_with_polygons: Tuple[Context, Sequence[Polygon]],
index: int) -> None:
context, polygons = context_with_polygons
result = context.polygons_box(polygons)
assert result == context.polygons_box(permute(polygons, index))
| StarcoderdataPython |
3247995 | import numpy as np
import cv2 as cv
import time
image = np.array([[150, 2, 5], [80, 145, 45], [74, 102, 165]]) # a 3X3 array of numbers
pad = 1;
padded_image = np.pad(image, pad, 'constant'); # Padding the image with zeros
# This reduces the need to verfiy corner pixels in the 8-connectivity algo
# The extra zeros can be discarded later on
def decompose(image):
pad = 1;
padded_image = np.pad(image, pad, 'constant');
d = {}
start_time = time.time()
for x in range(pad, (padded_image.shape[0] - (pad))):
for y in range(pad, (padded_image.shape[1] - (pad))):
a = list();
temp = np.array(padded_image[x - pad: x + pad + 1, y - pad:y + pad + 1]);
temp [0,0] = 0
temp [0,2] = 0
temp [2,0] = 0
temp [2,2] = 0
for i in range(temp.shape[0]):
for j in range(temp.shape[1]):
if not ((temp[i][j] == 0) or (temp[i][j] == padded_image[x][y])):
a.append(temp[i][j]);
d[padded_image[x][y]] = a;
final_time = time.time() - start_time;
return d, final_time;
def DFS(graph1, start):
path = [];
q = [];
q.append(start);
while (len(q)!=0):
node = q.pop();
if node not in path:
path.append(node)
neighbours = graph1[node];
for neighbour in neighbours:
q.append(neighbour);
return path;
graph, time = decompose(image);
path = DFS(graph,150);
print(graph)
print(path)
print('time taken----->');
print(time) | StarcoderdataPython |
141999 | <filename>hello.py
#!/usr/bin/env python3
import os
import json
import templates
print('Content-Type: application/json')
print()
print(json.dumps(dict(os.environ), indent=2))
# print('Content-Type: text/html')
# print()
# print("""<!DOCTYPE html>
# <html>
# <body>
# <h1>HELLO I AM HTML</h1>
# """)
# print("<ul>")
# print(f"<p> QUERY_STRING={os.environ['QUERY_STRING']} </p >")
# print(f"<p> HTTP_USER_AGENT={os.environ['HTTP_USER_AGENT']} </p>")
# print("""
# </ul>
# """)
# print("""
# </ul>
# </body>
# </html>
# """)
| StarcoderdataPython |
390643 | <gh_stars>1-10
from .game import Game
from .flowchart import Flowchart
from .position import Position | StarcoderdataPython |
4917506 | <filename>import/party_scraper/src/party_scraper/spiders/muenster.py
# -*- coding: utf-8 -*-
""" Copyright (C) 2019 <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Contact: https://github.com/thunfischtoast or christian.roemer[ät]posteo.de
"""
import scrapy
import urllib.request
import urllib.parse
import json
import logging
import os
import sys
import datetime
import pytz
from scrapy.shell import inspect_response
from party_scraper import items
def clean_date(str, inf='%Y-%m-%d', outf='%d.%m.%Y'):
date = datetime.datetime.strptime(str, inf)
news = datetime.datetime.strftime(date, outf)
return news
class MuensterSpider(scrapy.Spider):
mapquest_api_key = None
name = "muenster"
allowed_domains = ["muenster.de"]
def start_requests(self):
start_url = "https://www.muenster.de/veranstaltungskalender/scripts/frontend/suche.php"
if "ELASTICSEARCH_URL_PREFIX" in os.environ:
self.elasticsearch_url_param = os.environ["ELASTICSEARCH_URL_PREFIX"]
if ('SCRAPE_START' in os.environ and 'SCRAPE_END' in os.environ):
self.start = clean_date(os.environ['SCRAPE_START'])
self.end = clean_date(os.environ['SCRAPE_END'])
else:
self.start = datetime.datetime.strftime(datetime.datetime.today(), "%d.%m.%Y")
self.end = datetime.datetime.strftime(datetime.datetime.today() + datetime.timedelta(days=6), "%d.%m.%Y")
self.log("------------ START PARAMETERS1 -------------- ")
self.log(f"START: {self.start}")
self.log(f"END: {self.end}")
self.log("------------ ")
if self.mapquest_api_key is None and "MAPQUEST_KEY" in os.environ:
self.mapquest_api_key = os.environ["MAPQUEST_KEY"]
if hasattr(self, "elasticsearch_url_param") == False:
self.elasticsearch_url_param = None
self.elasticsearch_url = getattr(
self, "elasticsearch_url_prefix", self.elasticsearch_url_param
)
# TODO: validate start/end dates
yield scrapy.Request(start_url, self.parse)
def parse(self, response):
"""Submit the search form searching for events that start today."""
datum_von = self.start
datum_bis = self.end
zeitraum = 'zeitraum'
self.log("------------ START PARAMETERS2 -------------- ")
self.log("START: " + datum_von)
self.log("END: " + datum_bis)
self.log("ES: " + str(self.elasticsearch_url))
self.log("------------ ")
return scrapy.FormRequest.from_response(
response,
formid="submit",
dont_filter=True,
formdata={
"datum_bis": datum_bis,
"datum_von": datum_von,
"submit": "Suchen",
"suchstring": "",
"volltextsuche-verknuepfung": "und",
"zeitraum": zeitraum,
"zielgruppe": "alle",
},
callback=self.after_post,
)
def after_post(self, response):
"""Response here is the overview page over all events. We collect the links to the individual detail pages."""
# Uncomment following line to interactively debug the response in the browser
# inspect_response(response, self)
detail_links = response.xpath("//a[text() = 'Details']/@href").extract()
for href in detail_links:
categories = response.xpath(
"//a[@href = '"
+ href
+ "']/ancestor::div[@class = 'eintrag ']/preceding-sibling::div[@class = 'kategorie']/text()"
).extract()
category = categories[-1]
# select the last of all preceding categories
if category is not None:
category = category.strip(" \t\n\r")
yield response.follow(
href, callback=self.extract_event, meta={"category": category}
)
def extract_event(self, response):
"""Callback function for the detail pages. We find the indivudal data points and try to bring the date/time in proper form, then
summarize it into a Event-object and return it."""
# extract the interesting data points
title = self.getText(response, "titel")
subtitle = self.getText(response, "untertitel")
raw_datetime = self.getText(response, "datum-uhrzeit")
description = self.getText(response, "detailbeschreibung")
location = self.getText(response, "location")
location_adresse = self.getText(response, "location-adresse")
link = (
response.xpath("//div[@class='detail-link']/a/@href")
.extract_first()
)
if link is not None:
link = link.strip(" \t\n\r")
else:
link = None
pos = (
response.xpath("//input[@name='pos']/@value")
.extract_first()
.strip(" \t\n\r")
)
times = self.produce_dates(raw_datetime)
start_date = times[0]
end_date = times[1]
lat = ""
lng = ""
# if a mapquest api key was provided we use it for geocoding
if self.mapquest_api_key is not None:
latLng = self.fetchMapquestCoordinates(location_adresse)
if latLng is not None:
lat = latLng[0]
lng = latLng[1]
else:
self.log("No mapquest_api_key! Skip location mapping.")
event = items.PartyItem(
title=title,
subtitle=subtitle,
start_date=start_date,
end_date=end_date,
description=description,
link=link,
category=response.meta["category"],
location_name=location,
location_address=location_adresse,
geo=dict(lat=lat, lon=lng),
#location_lng=lng,
#pos=pos,
source='muenster.de'
)
if (
self.elasticsearch_url is not None
and isinstance(lat, float)
and isinstance(lng, float)
):
print(
"Check before ES: "
+ str(self.elasticsearch_url)
+ "places/event_"
+ event["pos"]
+ " at pos lat:"
+ str(lat)
+ "; lng:"
+ str(lng)
)
self.log("Putting into ES")
self.put_into_es(event)
return event
def getText(self, response, clazz):
"""Find the first div with the class clazz and extract the text, stripping whitespaces and such."""
return (
response.xpath("//div[@class='" + clazz + "']/text()")
.extract_first()
.strip(" \t\n\r")
)
def produce_dates(self, raw_datetime):
""" Try to produce a clean start and end date (if it exists)."""
# dates are usually of format "Donnerstag, 26.7.2018, 21.30 - 23.30 Uhr"
# if there is only a start time it's just "Donnerstag, 26.7.2018, 21.30 Uhr"
# sometimes the time is missing entirely, then it's just "Donnerstag, 26.7.2018,"
# we'll ignore the leading day of the week
datetime_parts = raw_datetime.split(",") # split at commas
date = datetime_parts[1].strip(" \t\n\r") # drop whitespaces and such
start_time = ""
end_time = ""
if len(datetime_parts) > 2: # if there is a time given
time = datetime_parts[2].replace("Uhr", "") # drop unnessary string
time_splits = time.split("-") # split start and end time
start_time = time_splits[0].strip(" \t\n\r")
if len(time_splits) > 1:
end_time = time_splits[1].strip(" \t\n\r")
start_date = ""
end_date = ""
# sometimes, if the event contains two start/end times, the time looks like
# 14.00 u. 16.00
# in that case, use the first one for now. In future it would be better
# to retain all times
if " u. " in start_time:
start_time = start_time.split(" u. ")[0]
if " u. " in end_time:
end_time = end_time.split(" u. ")[0]
# produce proper ISO conform datetime strings
if start_time is "":
start_date = datetime.datetime.strptime(date, "%d.%m.%Y") # case: no time
else:
start_date = datetime.datetime.strptime(
date + " " + start_time, "%d.%m.%Y %H.%M"
).isoformat()
if end_time is not "":
end_date = datetime.datetime.strptime(
date + " " + end_time, "%d.%m.%Y %H.%M"
).isoformat()
return (start_date, end_date)
def fetchMapquestCoordinates(self, location_adresse):
"""Try calling the geocoding api from mapquest. It it fails return None
Documentation: https://developer.mapquest.com/documentation/open/geocoding-api/address/get/"""
self.log("Attempt geocoding: " + location_adresse)
contents_json = None
try:
parsed_location_adresse = urllib.parse.quote(location_adresse)
mapquest_url = (
"http://open.mapquestapi.com/geocoding/v1/address?key="
+ self.mapquest_api_key
+ "&location="
+ parsed_location_adresse
+ ",M%C3%BCnster,Germany"
)
logging.debug("Attempting to fetch " + mapquest_url)
resource = urllib.request.urlopen(mapquest_url)
contents = resource.read().decode(resource.headers.get_content_charset())
contents_json = json.loads(contents)
except Exception as e:
logging.warning("Location geocoding failed with exception: " + str(e))
return None
status_code = contents_json["info"]["statuscode"]
if status_code != 0: # some kind of error happened
logging.warning("Location geocoding failed with code " + status_code)
return None
latLng = contents_json["results"][0]["locations"][0]["latLng"]
lat = latLng["lat"]
lng = latLng["lng"]
self.log("LOCATION: " + str(lat) + ", " + str(lng))
if lat > 52.3 or lat < 51.8 or lng > 8 or lng < 7.3:
self.log("NOT MUENSTER! Setting location to ZERO")
return None # not in Muenster
return (lat, lng)
def put_into_es(self, event):
"""Push the given event into Elasticsearch"""
from elasticsearch import Elasticsearch
esurl, index_prefix = os.environ["ELASTICSEARCH_URL_PREFIX"].rsplit(
"/", maxsplit=1
)
if hasattr(self, "es") == False:
self.es = Elasticsearch(esurl)
content = {
"address": {
"geo": {"lat": event["location_lat"], "lon": event["location_lng"]},
"geometry": {
"type": "Point",
"coordinates": [event["location_lng"], event["location_lat"]],
},
"street": event["location_addresse"],
},
"date_start": event["start_date"],
"type": "event",
"name": event["title"],
"id": event["pos"],
"properties": {
"ID": event["pos"],
"name": event["title"],
"subtitle": event["subtitle"],
"description": event["description"],
"link": event["link"],
"location": event["location"],
"street": event["location_addresse"],
},
}
if "end_date" in event and len(event["end_date"]) > 0:
content["date_end"] = event["end_date"]
res = self.es.index(
index=(index_prefix + "places"),
doc_type="_doc",
body=content,
id="event_" + event["pos"],
)
self.log(res)
| StarcoderdataPython |
11297711 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
##########################################################################
# object_storage_bulk_copy.py
#
# @author: <NAME> and <NAME>
#
# Supports Python 3
##########################################################################
# Info:
# Bulk copy object storage bucket to other bucket with parallel threads
#
##########################################################################
# Application Command line parameters
#
# -c config - Config file section to use (tenancy profile)
# -t profile - Profile in config file, DEFAULT as default
# -p proxy - Set Proxy (i.e. www-proxy-server.com:80)
# -ip - Use Instance Principals for Authentication
# -dt - Use Instance Principals with delegation token for cloud shell
# -sb source_bucket
# -sr source_region
# -sn source_namespace
# -sp source_prefix_include
# -se source_prefix_exclude
# -db destination_bucket
# -dr destination_region
# -ig ignore_check_exist
##########################################################################
import pickle
import threading
import time
import queue
import oci
import argparse
import datetime
import sys
import os
##########################################################################
# Pre Main
##########################################################################
# Get Command Line Parser
parser = argparse.ArgumentParser()
parser.add_argument('-t', default="", dest='config_profile', help='Config file section to use (tenancy profile)')
parser.add_argument('-p', default="", dest='proxy', help='Set Proxy (i.e. www-proxy-server.com:80) ')
parser.add_argument('-ip', action='store_true', default=False, dest='is_instance_principals', help='Use Instance Principals for Authentication')
parser.add_argument('-dt', action='store_true', default=False, dest='is_delegation_token', help='Use Delegation Token for Authentication')
parser.add_argument('-c', default="", dest='config_file', help="Config File (default=~/.oci/config)")
parser.add_argument('-sb', default="", dest='source_bucket', help='Source Bucket Name')
parser.add_argument('-sr', default="", dest='source_region', help='Source Region (Default current connection)')
parser.add_argument('-sn', default="", dest='source_namespace', help='Source Namespace (Default current connection)')
parser.add_argument('-sp', default="", dest='source_prefix_include', help='Source Prefix Include')
parser.add_argument('-se', default="", dest='source_prefix_exclude', help='Source Prefix Exclude')
parser.add_argument('-db', default="", dest='destination_bucket', help='Destination Bucket Name')
parser.add_argument('-dr', default="", dest='destination_region', help='Destination Region')
parser.add_argument('-dn', default="", dest='destination_namespace', help='Destination Namespace (Default current connection)')
parser.add_argument('-ig', action='store_true', default=False, dest='ignore_exist', help='Ignore Check if files exist at Destination')
cmd = parser.parse_args()
if len(sys.argv) < 2:
parser.print_help()
raise SystemExit
if not cmd.source_bucket or not cmd.destination_bucket:
print("Source and Destination buckets parameters are required !!!\n")
parser.print_help()
raise SystemExit
# Worker configuration
request_worker_count = 50
status_worker_count = 50
status_interval = 60
# Try timeout
base_retry_timeout = 2
max_retry_timeout = 16**2
# Global Variables and queues
data = {}
data_lock = threading.Lock()
dest_bucket_memory = {}
known_q = queue.Queue()
update_q = queue.Queue()
# Global Variables
object_storage_client = None
object_storage_client_dest = None
source_bucket = cmd.source_bucket
source_region = cmd.source_region
source_namespace = cmd.source_namespace
destination_namespace = cmd.destination_namespace
source_prefix = cmd.source_prefix_include
source_prefix_exclude = cmd.source_prefix_exclude
destination_bucket = cmd.destination_bucket
destination_region = cmd.destination_region
state_file = source_bucket + "." + destination_bucket + ".wrk"
# Update Variables based on the parameters
config_file = (cmd.config_file if cmd.config_file else oci.config.DEFAULT_LOCATION)
config_profile = (cmd.config_profile if cmd.config_profile else oci.config.DEFAULT_PROFILE)
##########################################################################
# Create signer for Authentication
# Input - config_file, config_profile and is_instance_principals and is_delegation_token
# Output - config and signer objects
##########################################################################
def create_signer(config_file, config_profile, is_instance_principals, is_delegation_token):
# if instance principals authentications
if is_instance_principals:
try:
signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
config = {'region': signer.region, 'tenancy': signer.tenancy_id}
return config, signer
except Exception:
print_header("Error obtaining instance principals certificate, aborting")
raise SystemExit
# -----------------------------
# Delegation Token
# -----------------------------
elif is_delegation_token:
try:
# check if env variables OCI_CONFIG_FILE, OCI_CONFIG_PROFILE exist and use them
env_config_file = os.environ.get('OCI_CONFIG_FILE')
env_config_section = os.environ.get('OCI_CONFIG_PROFILE')
# check if file exist
if env_config_file is None or env_config_section is None:
print("*** OCI_CONFIG_FILE and OCI_CONFIG_PROFILE env variables not found, abort. ***")
print("")
raise SystemExit
config = oci.config.from_file(env_config_file, env_config_section)
delegation_token_location = config["delegation_token_file"]
with open(delegation_token_location, 'r') as delegation_token_file:
delegation_token = delegation_token_file.read().strip()
# get signer from delegation token
signer = oci.auth.signers.InstancePrincipalsDelegationTokenSigner(delegation_token=delegation_token)
return config, signer
except KeyError:
print("* Key Error obtaining delegation_token_file")
raise SystemExit
except Exception:
raise
# -----------------------------
# config file authentication
# -----------------------------
else:
config = oci.config.from_file(
(config_file if config_file else oci.config.DEFAULT_LOCATION),
(config_profile if config_profile else oci.config.DEFAULT_PROFILE)
)
signer = oci.signer.Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=oci.config.get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
return config, signer
##############################################################################
# get time
##############################################################################
def get_time(full=False):
if full:
return str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
else:
return str(datetime.datetime.now().strftime("%H:%M:%S"))
##########################################################################
# Print header centered
##########################################################################
def print_header(name):
chars = int(90)
print("")
print('#' * chars)
print("#" + name.center(chars - 2, " ") + "#")
print('#' * chars)
##########################################################################
# Print Info
##########################################################################
def print_command_info():
print_header("Running Object Storage Bulk Copy")
print("Written by <NAME> and <NAME>, July 2020")
print("Starts at : " + get_time(True))
print("Command Line : " + ' '.join(x for x in sys.argv[1:]))
print("Source Namespace : " + source_namespace)
print("Source Region : " + source_region)
print("Source Bucket : " + source_bucket)
print("Source Prefix : " + source_prefix)
print("Dest Namespace : " + destination_namespace)
print("Dest Region : " + destination_region)
print("Dest Bucket : " + destination_bucket)
print("State File : " + state_file)
##############################################################################
# copy_request_worker
##############################################################################
def copy_request_worker():
while True:
object_ = known_q.get()
state = get_state_for_object(object_)
response = None
interval_exp = base_retry_timeout
while True:
try:
response = copy_object(source_namespace, source_bucket, object_, destination_namespace, destination_region, destination_bucket, object_)
break
except Exception:
if interval_exp > max_retry_timeout:
raise
print(" Received %s from API for object %s, will wait %s seconds before retrying." % (response.status, object_, interval_exp))
time.sleep(interval_exp)
interval_exp **= 2
continue
state['work-request-id'] = response.headers.get('opc-work-request-id')
state['status'] = 'REQUESTED'
set_state_for_object(object_, state, persist=False)
known_q.task_done()
##############################################################################
# work_request_status_worker
##############################################################################
def work_request_status_worker():
while True:
object_ = update_q.get()
state = get_state_for_object(object_)
interval_exp = base_retry_timeout
while True:
try:
response = object_storage_client.get_work_request(state['work-request-id'])
state['status'] = response.data.status
break
except Exception:
if interval_exp > max_retry_timeout:
raise
print(" Received %s from API for work request %s, will wait %s seconds before retrying." % (response.status, state['work-request-id'], interval_exp))
time.sleep(interval_exp)
interval_exp **= 2
continue
set_state_for_object(object_, state, persist=False)
update_q.task_done()
##############################################################################
# add_objects_to_queue
##############################################################################
def load_dest_bucket_to_mem(object_storage_client_dest, destination_namespace, destination_bucket):
global dest_bucket_memory
loaded_page = 0
next_starts_with = None
while True:
response = object_storage_client_dest.list_objects(destination_namespace, destination_bucket, start=next_starts_with, prefix=source_prefix, fields="md5", retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
next_starts_with = response.data.next_start_with
if loaded_page % 100 == 0 and loaded_page > 0:
print(get_time() + " - Loaded " + str(len(dest_bucket_memory)) + " files...")
for osb in response.data.objects:
dest_bucket_memory[str(osb.name)] = str(osb.md5)
if not next_starts_with:
break
loaded_page += 1
print(get_time() + " - Loaded " + str(len(dest_bucket_memory)) + " files.")
##############################################################################
# add_objects_to_queue
##############################################################################
def add_objects_to_queue(ns, bucket):
global known_q
global dest_bucket_memory
count = 0
skipped = 0
next_starts_with = None
while True:
response = object_storage_client.list_objects(ns, bucket, start=next_starts_with, prefix=source_prefix, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
next_starts_with = response.data.next_start_with
for object_ in response.data.objects:
if source_prefix and not object_.name.startswith(source_prefix):
continue
if source_prefix_exclude and object_.name.startswith(source_prefix_exclude):
continue
# skip if exist in the dest bucket (Option is to use MD5 for comparison)
if str(object_.name) in dest_bucket_memory:
skipped += 1
if skipped % 100000 == 0:
print(get_time() + " - Skipped " + str(skipped) + " exist files...")
continue
set_state_for_object(object_.name, {'status': 'KNOWN'}, persist=False)
known_q.put(object_.name)
count += 1
if count % 100000 == 0:
print(get_time() + " - Added " + str(count) + " files to queue...")
if not next_starts_with:
break
# if skipped files, print
if skipped > 0:
print(get_time() + " - Skipped " + str(skipped) + " exist files...")
save_all_state()
return count
##############################################################################
# set_state_for_object
##############################################################################
def set_state_for_object(object_, state, persist=True):
global data
data_lock.acquire()
data[object_] = state
if persist:
with open(state_file, 'wb') as sf:
pickle.dump(data, sf, protocol=pickle.HIGHEST_PROTOCOL)
data_lock.release()
return data[object_]
##############################################################################
# save_all_state
##############################################################################
def save_all_state():
data_lock.acquire()
with open(state_file, 'wb') as sf:
pickle.dump(data, sf, protocol=pickle.HIGHEST_PROTOCOL)
data_lock.release()
##############################################################################
# get_state_for_object
##############################################################################
def get_state_for_object(object_):
return data[object_]
##############################################################################
# get_work_request_count_by_status
##############################################################################
def get_work_request_count_by_status(status):
return len([x for x in data.keys() if data[x].get('status') == status])
##############################################################################
# copy_object
##############################################################################
def copy_object(src_ns, src_b, src_o, dst_ns, dst_r, dst_b, dst_o):
copy_request = oci.object_storage.models.copy_object_details.CopyObjectDetails()
copy_request.source_object_name = src_o
copy_request.destination_namespace = dst_ns
copy_request.destination_region = dst_r
copy_request.destination_bucket = dst_b
copy_request.destination_object_name = dst_o
return object_storage_client.copy_object(src_ns, src_b, copy_request)
##############################################################################
# update_all_work_requests_status
##############################################################################
def update_all_work_requests_status(ns, bucket):
for object_ in data.keys():
state = get_state_for_object(object_)
if state['status'] not in ('KNOWN', 'COMPLETED', 'FAILED', 'CANCELED'):
update_q.put(object_)
update_q.join()
save_all_state()
##############################################################################
# connect to objec storage
##############################################################################
def connect_to_object_storage():
# global parameters
global source_region
global destination_region
global source_namespace
global destination_namespace
global object_storage_client
global object_storage_client_dest
print_header("Connecting to Object Storage")
# get signer
config, signer = create_signer(cmd.config_file, cmd.config_profile, cmd.is_instance_principals, cmd.is_delegation_token)
# assign region from config file
if not source_region:
source_region = config['region']
if not destination_region:
destination_region = config['region']
try:
# connect to source region
print("\nConnecting to Object Storage Service for source region - " + source_region)
object_storage_client = oci.object_storage.ObjectStorageClient(config, signer=signer)
if cmd.proxy:
object_storage_client.base_client.session.proxies = {'https': cmd.proxy}
# retrieve namespace from object storage
if not source_namespace:
source_namespace = object_storage_client.get_namespace(retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
print("Succeed.")
except Exception as e:
print("\nError connecting to object storage at source region - " + str(e))
raise SystemExit
try:
# connect to destination object storage
print("\nConnecting to Object Storage Service for destination region - " + destination_region)
config_destination = config
config_destination['region'] = destination_region
object_storage_client_dest = oci.object_storage.ObjectStorageClient(config_destination, signer=signer)
if cmd.proxy:
object_storage_client_dest.base_client.session.proxies = {'https': cmd.proxy}
# retrieve namespace from object storage
if not destination_namespace:
destination_namespace = object_storage_client_dest.get_namespace(retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
print("Succeed.")
except Exception as e:
print("\nError connecting to object storage at destination region - " + str(e))
raise SystemExit
##############################################################################
# main
##############################################################################
def main():
# connect to object storage
connect_to_object_storage()
# print command info
print_command_info()
print_header("Start Processing")
print(get_time() + " - Creating %s copy request workers." % (request_worker_count))
for i in range(request_worker_count):
worker = threading.Thread(target=copy_request_worker)
worker.daemon = True
worker.start()
print(get_time() + " - Creating %s status workers." % (status_worker_count))
for i in range(status_worker_count):
worker = threading.Thread(target=work_request_status_worker)
worker.daemon = True
worker.start()
if not cmd.ignore_exist:
print(get_time() + " - Loading list of objects from destination bucket (%s) to ignore exiting files." % (destination_bucket))
load_dest_bucket_to_mem(object_storage_client_dest, destination_namespace, destination_bucket)
print(get_time() + " - Getting list of objects from source bucket (%s). Copies will start immediately." % (source_bucket))
count = add_objects_to_queue(source_namespace, source_bucket)
print(get_time() + " - Enqueued %s objects to be copied" % (count))
if count > 0:
print_header("Finish queuing files, start checking")
while count > 0:
print(get_time() + " - Waiting %s seconds before checking status." % (status_interval))
time.sleep(status_interval)
if get_work_request_count_by_status('KNOWN') > 0 or get_work_request_count_by_status('REQUESTED') > 0:
print(get_time() + " - Determining copy status")
update_all_work_requests_status(source_namespace, source_bucket)
data_lock.acquire()
print(get_time() + " - KNOWN: %s, REQUESTED: %s, COMPLETED: %s, FAILED: %s, CANCELED: %s"
% (
get_work_request_count_by_status('KNOWN'),
get_work_request_count_by_status('REQUESTED'),
get_work_request_count_by_status('COMPLETED'),
get_work_request_count_by_status('FAILED'),
get_work_request_count_by_status('CANCELED'))
)
if get_work_request_count_by_status('KNOWN') == 0 and get_work_request_count_by_status('REQUESTED') == 0:
data_lock.release()
break
else:
data_lock.release()
known_q.join()
print_header("Copy Completed at " + get_time())
##############################################################################
# Execute
##############################################################################
if __name__ == '__main__':
main()
| StarcoderdataPython |
8000753 | """
Quick example to illustrate how a raw STEREO EUVI image will be prepped to lvl 1.0 via SSW/IDL.
- Here the compressed file we save via a query/download is uncompressed and sent to an IDL subprocess
that calls secchi_prep and writes the output.
"""
from chmap.utilities.file_io import io_helpers
from chmap.utilities.idl_connect import idl_helper
import time
import os.path
from chmap.settings.app import App
# file locations
fits_compressed = os.path.join(App.APP_HOME, 'reference_data', 'sta_euvi_20140413T190530_195.fits')
fits_uncompressed = os.path.join(App.TMP_HOME, 'tmp_euvi_uncompressed.fits')
fits_prepped = os.path.join(App.TMP_HOME, 'tmp_euvi_prepped.fits')
print(fits_compressed)
print(fits_uncompressed)
# uncompress the image to a temporary location
io_helpers.uncompress_compressed_fits_image(fits_compressed, fits_uncompressed, int=True)
# begin the IDL session (opens a subprocess)
idl_session = idl_helper.Session()
# call secchi_prep (time it)
t1 = time.perf_counter()
idl_session.secchi_prep(fits_uncompressed, fits_prepped)
t2 = time.perf_counter()
print(t2 - t1)
# end the IDL session (closes a subprocess)
idl_session.end()
| StarcoderdataPython |
5118759 | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import petstore_api
from petstore_api.models.type_holder_default import TypeHolderDefault # noqa: E501
from petstore_api.rest import ApiException
class TestTypeHolderDefault(unittest.TestCase):
"""TypeHolderDefault unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTypeHolderDefault(self):
"""Test TypeHolderDefault"""
# required_vars are set to None now until swagger-parser/swagger-core fixes
# https://github.com/swagger-api/swagger-parser/issues/971
required_vars = ['number_item', 'integer_item', 'array_item']
sample_values = [5.67, 4, [-5, 2, -6]]
assigned_variables = {}
for index, required_var in enumerate(required_vars):
with self.assertRaises(ValueError):
model = TypeHolderDefault(**assigned_variables)
assigned_variables[required_var] = sample_values[index]
# assigned_variables is fully set, all required variables passed in
model = TypeHolderDefault(**assigned_variables)
self.assertEqual(model.string_item, 'what')
self.assertEqual(model.bool_item, True)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.