content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from api.helpers import random_string_generator
from tests.conftest import ValueStorage
name = random_string_generator(20)
| [
6738,
40391,
13,
16794,
364,
1330,
4738,
62,
8841,
62,
8612,
1352,
198,
6738,
5254,
13,
1102,
701,
395,
1330,
11052,
31425,
198,
198,
3672,
796,
4738,
62,
8841,
62,
8612,
1352,
7,
1238,
8,
628
] | 3.472222 | 36 |
"""
@author Zhenyu Jiang
@email stevetod98@gmail.com
@date 2022-01-12
@desc Point cloud utilities
"""
import numpy as np
import open3d as o3d
def assert_array_shape(xyz, shapes=((-1, 3),)):
"""check array shape
Args:
xyz (np.ndarray): array
shape (tuple of tuple of ints, optional): possible target shapes, -1 means arbitrary. Defaults to ((-1, 3)).
Raises:
ValueError.
"""
flags = {x: True for x in range(len(shapes))}
for idx, shape in enumerate(shapes):
if len(xyz.shape) != len(shape):
flags[idx] = False
for dim, num in enumerate(shape):
if num == -1:
continue
elif xyz.shape[dim] != num:
flags[idx] = False
if sum(flags.values()) == 0: # None of the possible shape works
raise ValueError(f"Input array {xyz.shape} is not in target shapes {shapes}!")
def np_to_o3d_pointcloud(xyz, color=None):
"""convert numpy array to open3d point cloud
Args:
xyz (np.ndarray): point cloud
color (np.ndarray, optional): colors of input point cloud. Can be N*3 or 3. Defaults to None.
Returns:
o3d.geometry.PointCloud: open3d point cloud
"""
assert_array_shape(xyz)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(xyz)
# add color
if color is not None:
if len(color.shape) == 2:
# same number of colors as the points
assert_array_shape(color, shapes=(xyz.shape,))
pcd.colors = o3d.utility.Vector3dVector(color)
elif len(color.shape) == 1:
# N*3
color = np.tile(color, (xyz.shape[0], 1))
assert_array_shape(color, shapes=(xyz.shape,))
pcd.colors = o3d.utility.Vector3dVector(color)
else:
raise ValueError(f"Bad color with shape {color.shape}")
return pcd
def normalize_pointcloud(xyz, padding=0.0):
"""normalize point cloud to [-0.5, 0.5]
Args:
xyz (np.ndarray): input point cloud, N*3
padding (float, optional): padding. Defaults to 0.0.
Returns:
np.ndarray: normalized point cloud
np.ndarray: original center
float: original scale
"""
assert_array_shape(xyz)
bound_max = xyz.max(0)
bound_min = xyz.min(0)
center = (bound_max + bound_min) / 2
scale = (bound_max - bound_min).max()
scale = scale * (1 + padding)
normalized_xyz = (xyz - center) / scale
return normalized_xyz, center, scale
def sample_pointcloud(xyz, color=None, num_points=2048):
"""random subsample point cloud
Args:
xyz (np.ndarray): input point cloud of N*3.
color (np.ndarray, optional): color of the points, N*3 or None. Defaults to None.
num_points (int, optional): number of subsampled point cloud. Defaults to 2048.
Returns:
np.ndarray: subsampled point cloud
"""
assert_array_shape(xyz)
replace = num_points > xyz.shape[0]
sample_idx = np.random.choice(np.arange(xyz.shape[0]), size=(num_points,), replace=replace)
if color is not None:
assert_array_shape(color, shapes=(xyz.shape,))
color = color[sample_idx]
xyz = xyz[sample_idx]
return xyz, color
| [
37811,
198,
2488,
9800,
1168,
831,
24767,
32294,
198,
2488,
12888,
2876,
16809,
375,
4089,
31,
14816,
13,
785,
198,
2488,
4475,
33160,
12,
486,
12,
1065,
198,
2488,
20147,
6252,
6279,
20081,
198,
37811,
198,
198,
11748,
299,
32152,
355,... | 2.284615 | 1,430 |
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2019-2020, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Acceptance tests for various compression techniques """
import math
import os
import unittest
import unittest.mock
import logging
import shutil
import pickle
from decimal import Decimal
from glob import glob
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.mobilenet import MobileNet
from tensorflow.examples.tutorials.mnist import input_data
import aimet_common.defs
import aimet_tensorflow.utils.graph_saver
from aimet_common.utils import AimetLogger
import aimet_tensorflow.defs
from aimet_tensorflow.defs import ModuleCompRatioPair
from aimet_tensorflow.common import graph_eval
from aimet_tensorflow.compress import ModelCompressor
from aimet_tensorflow.common import tfrecord_generator
from aimet_tensorflow.common.tfrecord_generator import MnistParser
from aimet_tensorflow.examples.test_models import model_with_three_convs
logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Test)
def tiny_imagenet_parse(serialized_example):
"""
Parser for TINY IMAGENET models, reads the tfRecords file
:param serialized_example:
:return: Input image
"""
# This works for tf_slim model: resnet_50_v2 but NOT for Keras VGG16
# Dense features in Example proto.
feature_map = {
'height': tf.FixedLenFeature((), tf.int64),
'width': tf.FixedLenFeature((), tf.int64),
'channel': tf.FixedLenFeature((), tf.int64),
'label': tf.FixedLenFeature((), tf.int64),
'image_raw': tf.FixedLenFeature((), tf.string),
'location_raw': tf.FixedLenFeature((), tf.string)}
features = tf.parse_single_example(serialized_example, feature_map)
image_raw = tf.decode_raw(features["image_raw"], tf.uint8)
image = tf.reshape(image_raw, [64, 64, 3])
return image
def imagenet_parse(serialized_example):
"""
Parser for IMAGENET models, reads the tfRecords file
:param serialized_example:
:return: Input image and labels
"""
dim = 224
features = tf.parse_single_example(serialized_example,
features={
'image/class/label': tf.FixedLenFeature([], tf.int64),
'image/encoded': tf.FixedLenFeature([], tf.string)})
image_data = features['image/encoded']
# Decode the jpeg
with tf.name_scope('prep_image', [image_data], None):
# decode and reshape to default 224x224
# pylint: disable=no-member
image = tf.image.decode_jpeg(image_data, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.image.resize_images(image, [dim, dim])
return image
def evaluate(model: tf.Session, iterations: int, use_cuda: bool):
"""
eval function for MNIST LeNet model
:param model: tf.Session
:param iterations: iterations
:param use_cuda: use_cuda
:return:
"""
total_test_images = 10000
batch_size = 64
# iterate over entire test data set, when iterations is None
# TODO : figure out way to end iterator when the data set is exhausted
if iterations is None:
iterations = int(total_test_images / batch_size)
parser = MnistParser(data_inputs=['reshape_input'], validation_inputs=['labels'], batch_size=batch_size)
# Allocate the generator you wish to use to provide the network with data
generator = tfrecord_generator.TfRecordGenerator(tfrecords=[os.path.join('data', 'mnist', 'validation.tfrecords')],
parser=parser, num_gpus=1)
# Create the tensor map for input and ground truth ops
input_tensor_map = {}
inputs = ['reshape_input', 'labels']
for name in inputs:
input_tensor_map[name] = model.graph.get_tensor_by_name(name + ':0')
# get the evaluation tensor
eval_tensor = model.graph.get_tensor_by_name('accuracy:0')
avg_accuracy = 0
current_iterations = 0
for batch in generator:
current_iterations += 1
# Setup the feed dictionary
feed_dict = {}
for name, data in batch.items():
feed_dict[input_tensor_map[name]] = data
with model.as_default():
accuracy = model.run(eval_tensor, feed_dict=feed_dict)
avg_accuracy += accuracy
if current_iterations >= iterations:
break
return avg_accuracy / current_iterations
| [
2,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
13,
20,
198,
2,
532,
9,
12,
4235,
25,
21015,
532,
9,
12,
198,
2,
38093,
25609,
198,
2,
220,
25248,
12,
34,
3185,
38162,
9947,
12,
2257,
7227,
12,
12404,
198,
2,
198,
2,
220,
1506... | 2.770555 | 2,323 |
import logging
from src.filters import *
from glob import glob
import cv2
def read_files(folder_path):
"""
Read image files in a specified input folder.
Arguments:
folder_path{str} -- path to the folder holding the original images to be read
Returns:
arrays{list} -- list of images converted to numpy arrays
files{list} -- list of file paths as strings
"""
file_types = ["jpg", "jpeg", "png"]
if type(folder_path) != str:
return False, "Input folder must be a string."
#determine if path is a folder or single image file
if folder_path.split('.')[-1] not in file_types:
files = glob(folder_path + "/*")
else:
files = [folder_path]
if len(files) == 0:
return False, "Input a valid folder path"
for file in files:
if file.split('.')[-1] not in file_types:
return False, "Ayye! Yaar File Tis No Be An Image, Maytee!"
arrays = [cv2.imread(file) for file in files]
return True, [arrays, files]
def save_files(filtered_photos_to_save, file_names, output_folder, filter_type):
"""
Convert modified numpy arrays into images and save as original file name plus filter_type to a specified output folder.
Arguments:
filtered_photos_to_save{list} -- list of modified numpy arrays
file_names{list} -- list of strings -- image file paths showing original image name
output_folder{str} -- path to the folder to save the output images
filter_type{str} -- type of filter we're going to run
Returns:
None -- modified images saved in designated output folder
"""
if len(filtered_photos_to_save) != len(file_names):
return False, 'An unexpected error occured, image data does not match.'
for photo,f_name in zip(filtered_photos_to_save, file_names):
if type(photo) != np.ndarray:
return False, "Filtered photos are not arrays."
# .format function turns input into a str and puts that str into position where the brackets are
output_path = output_folder + f_name.split('/')[-1].replace('.','_{}.'.format(filter_type))
cv2.imwrite(output_path, photo)
return True, "Filtered files saved successfully!"
def run_filter(input_folder, output_folder, filter_type):
"""
Read files in the input_folder, run the specified filter_type on each image file and save new images to output_folder.
Arguments:
input_folder{str} -- path to the folder holding the input images
ouput_folder{str} -- path to the folder to save the output images
filter_type{str} -- filter to be run on images, 'gray' or 'sepia'
Returns:
None -- modified images saved in designated output folder
"""
# read all files from our input path
read_status, read_output = read_files(input_folder)
if read_status: # if we didn't hit any errors (read_files returned: True, [array,files])
filters_input, file_names = read_output #then assign [array, files] to variables
else: # we hit one of our errors (read_files returned: False, "some message")
logging.error(read_output) # log our returned message as an error
return # don't run any more code from below
# run correct filter type
if filter_type == "gray":
filtered_photos_to_save = grayscale_filter(filters_input)
if filter_type == "sepia":
filtered_photos_to_save = sepia_filter(filters_input)
save_status, message = save_files(filtered_photos_to_save, file_names, output_folder, filter_type)
if save_status:
logging.info(message)
else:
logging.error(message)
| [
11748,
18931,
198,
6738,
12351,
13,
10379,
1010,
1330,
1635,
198,
6738,
15095,
1330,
15095,
198,
11748,
269,
85,
17,
628,
198,
4299,
1100,
62,
16624,
7,
43551,
62,
6978,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
4149,
2939,... | 2.808869 | 1,308 |
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class ResponseCache(object):
"""
Extend this class to create your own caching for Basecamp 3 API calls. The responsibility of the cache is to
remember the ETag and Last-Modified headers returned by a call and send it on a subsequent call. Basecamp 3 will
respond with a "304 Not Modified" and no content if the response would be the same as last time. Then you can just
return the cached Response.
requests.Response objects can be Pickled, so they can be stored externally like in Redis, a file, or a database.
"""
@abc.abstractmethod
def get_cached_headers(self, method, url):
"""
Given a request, gets the cached headers if we've seen this exact request METHOD and URL before.
The headers will be given in the form of tuple(ETAG, LAST_MODIFIED).
:param method: the HTTP method in all caps (i.e. 'GET', 'POST', 'PUT')
:type method: str
:param url: the URL of the request
:type url: str
:return: the headers as a 2-element tuple. One or both can be `None` if the headers have not been cached before.
:rtype: tuple(str)
"""
raise NotImplementedError()
@abc.abstractmethod
def get_cached_response(self, method, url):
"""
Get a requests.Response object that was cached. The key is the METHOD and URL.
:param method: the HTTP method in all caps (i.e. 'GET', 'POST', 'PUT')
:type method: str
:param url: the URL of the request
:type url: str
:return: the Response object that has been cached since the last time this endpoint was called
:rtype: requests.Response
"""
raise NotImplementedError()
@abc.abstractmethod
def set_cached(self, response):
"""
Cache this Response object.
:param response: the Response object from a successful call to be retrieved later
:type response: requests.Response
"""
raise NotImplementedError()
| [
11748,
450,
66,
198,
11748,
2237,
628,
198,
31,
19412,
13,
2860,
62,
4164,
330,
31172,
7,
39305,
13,
24694,
48526,
8,
198,
4871,
18261,
30562,
7,
15252,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
46228,
428,
1398,
284,
225... | 2.838174 | 723 |
from typing import Optional
from storage import IR, BLK, Control_Area, Track_Section, Railway
CONTROL_AREA_IDS = {
1: 1000, # Skuhrov
2: 2000, # Odbocka Skuhrov
3: 4000, # Lesna
4: 5000, # Skaly
5: 3000, # Hrad
6: 10000, # Ivancice
7: 9000, # Metro
8: 8000, # Podhradi
9: 6000, # Brizky
10: 11000, # Namest
11: 20000, # Depo
}
| [
6738,
19720,
1330,
32233,
198,
198,
6738,
6143,
1330,
14826,
11,
9878,
42,
11,
6779,
62,
30547,
11,
17762,
62,
16375,
11,
32130,
198,
198,
10943,
5446,
3535,
62,
12203,
32,
62,
14255,
796,
1391,
198,
220,
220,
220,
352,
25,
8576,
11... | 2.025 | 200 |
#!/usr/bin/env python
import os
import argparse
from Bio import SeqIO
from tral.sequence import sequence
from tral.hmm import hmm
__all__ = [
"TRFinder",
]
if __name__ == "__main__":
args = parser()
finder = TRFinder(fasta_file=args.fasta, output_dir=args.outdir)
for record in finder.sequences:
repeat_list = finder.detect_in_sequence(record)
if not repeat_list:
continue
repeat_list = finder.filter(repeat_list=repeat_list, criterion="pvalue", threshold=0.05)
repeat_list = finder.filter(repeat_list=repeat_list, criterion="divergence", threshold=0.1)
repeat_list = finder.filter(repeat_list=repeat_list, criterion="n_effective", threshold=2.5, filter_type="min")
repeat_list = finder.filter(repeat_list=repeat_list, criterion="l_effective", threshold=3, filter_type="max")
if len(repeat_list.repeats) == 0:
continue
clustered_list = finder.cluster(repeat_list)
# create filename
seq_name = record.id.split("|")[1]
output_pickle_file = os.path.join(finder.output_dir, seq_name + ".pkl")
output_tsv_file = os.path.join(finder.output_dir, seq_name + ".tsv")
# save TR-file
clustered_list.write(output_format="pickle", file=output_pickle_file)
clustered_list.write(output_format="tsv", file=output_tsv_file)
print("Found {} TR(s) in protein {} (after filtering and clustering)".format(len(clustered_list.repeats), seq_name))
finder.merge_repeat_files()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
198,
6738,
16024,
1330,
1001,
80,
9399,
198,
198,
6738,
491,
282,
13,
43167,
1330,
8379,
198,
6738,
491,
282,
13,
71,
3020,
1330,
289,
3020,... | 2.459936 | 624 |
from setuptools import setup
setup(
name='scrapy-fake-useragent',
version='1.2.5',
description='Use a random User-Agent provided by fake-useragent for every request',
long_description=open('README.rst').read(),
keywords='scrapy proxy user-agent web-scraping',
license='New BSD License',
author="Alexander Afanasyev / Alfonso de la Guarda",
author_email='alfonsodg@gmail.com',
url='https://github.com/alfonsodg/scrapy-fake-useragent',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Scrapy',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
packages=[
'scrapy_fake_useragent',
],
install_requires=['fake-useragent==0.1.15'],
dependency_links=['git+ssh://git@github.com/alfonsodg/fake-useragent@0.1.15#egg=fake-useragent-0.1.15'],
# install_requires=['package @ git+ssh://git@github.com/alfonsodg/fake-useragent@0.1.15#egg=fake-useragent-0.1.15'],
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
1416,
2416,
88,
12,
30706,
12,
7220,
25781,
3256,
198,
220,
220,
220,
2196,
11639,
16,
13,
17,
13,
20,
3256,
198,
220,
220,
220,
6764,
11639,
... | 2.489749 | 439 |
from .util import *
from .TestApp import *
from .features import *
from .ingest import *
from .dim_reduce import *
from .classifier import *
from .initialize import *
from .Model import *
from .create_temporary import *
from .__main__ import main
| [
6738,
764,
22602,
1330,
1635,
198,
6738,
764,
14402,
4677,
1330,
1635,
198,
6738,
764,
40890,
1330,
1635,
198,
6738,
764,
278,
395,
1330,
1635,
198,
6738,
764,
27740,
62,
445,
7234,
1330,
1635,
198,
6738,
764,
4871,
7483,
1330,
1635,
... | 3.430556 | 72 |
from collections import deque, namedtuple
from tempfile import TemporaryFile as tf
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.patches import Circle, ConnectionPatch
from ..viz import Monitor
from .vertex import Vertex
from .edge import Edge
| [
6738,
17268,
1330,
390,
4188,
11,
3706,
83,
29291,
198,
6738,
20218,
7753,
1330,
46042,
8979,
355,
48700,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
6738,
2603,
2... | 2.776786 | 112 |
import mock
import time
from raven.utils.testutils import TestCase
from raven.base import Client
from raven.transport.threaded import ThreadedHTTPTransport
from raven.utils.urlparse import urlparse
| [
11748,
15290,
198,
11748,
640,
198,
6738,
37735,
13,
26791,
13,
9288,
26791,
1330,
6208,
20448,
198,
198,
6738,
37735,
13,
8692,
1330,
20985,
198,
6738,
37735,
13,
7645,
634,
13,
16663,
276,
1330,
14122,
276,
40717,
8291,
634,
198,
6738... | 3.865385 | 52 |
""" PLT Calculator
PLT calculator functions.
"""
import pandas as pd
import numpy as np
from aggregationtools import ep_curve, PLT
def calculate_oep_curve(plt, number_of_simulations):
""" This function calculates the OEP of a given PLT over a set number of simulations
Parameters
----------
plt : pandas dataframe containing PLT
number_of_simulations :
Number of simulation periods. Important to supply as cannot assume
that the max number of periods is the number of simulation periods
Returns
-------
EPCurve :
An exceedance probability curve for the occurrence of a single event in a given year
"""
complete_plt = _fill_plt_empty_periods(plt, number_of_simulations)
max_period_losses = complete_plt.groupby(
'PeriodId').max().fillna(0).sort_values(by=['Loss'])
max_period_losses = _calculate_probabilities_for_period_losses(
max_period_losses)
return ep_curve.EPCurve(max_period_losses, ep_type=ep_curve.EPType.OEP)
def calculate_aep_curve(plt, number_of_simulations):
""" This function calculates the AEP of a given PLT over a set number of simulations
Parameters
----------
plt : pandas dataframe containing PLT
number_of_simulations :
Number of simulation periods. Important to supply as cannot assume
that the max number of periods is the number of simulation periods
Returns
-------
EPCurve :
An exceedance probability curve for the aggregate losses in a given year
"""
complete_plt = _fill_plt_empty_periods(plt, number_of_simulations)
sum_period_losses = complete_plt.groupby(
'PeriodId').sum().fillna(0).sort_values(by=['Loss'])
sum_period_losses = _calculate_probabilities_for_period_losses(
sum_period_losses)
return ep_curve.EPCurve(sum_period_losses, ep_type=ep_curve.EPType.AEP)
def group_plts(plt1, plt2=None):
""" This function groups two PLTs together
Parameters
----------
plt1 : pandas dataframe containing PLT
plt2 : pandas dataframe containing PLT
Returns
-------
plt :
A pandas dataframe containing a PLT
"""
if plt2 is None:
grouped_plt = plt1.plt
num_simulations = plt1.simulations
elif plt1.simulations == plt2.simulations:
grouped_plt = pd.concat([plt1.plt, plt2.plt], axis=0)
num_simulations = plt1.simulations
else:
raise Exception('Please provide PLTs with the same number of simulations to be grouped.')
concatenated_plt = grouped_plt.groupby(['PeriodId',
'EventId',
'EventDate',
'Weight'], observed=True).sum().reset_index()
return PLT(concatenated_plt, number_of_simulations=num_simulations)
def roll_up_plts(plt1, plt2=None):
""" This function rolls up two or more PLTs together
Parameters
----------
plt1 : pandas dataframe containing PLT
plt2 : pandas dataframe containing PLT
Returns
-------
plt :
A pandas dataframe containing a PLT
"""
if plt2 is None:
grouped_plt = plt1.plt
num_simulations = plt1.simulations
elif plt1.simulations == plt2.simulations:
grouped_plt = pd.concat([plt1.plt, plt2.plt], axis=0)
num_simulations = plt1.simulations
else:
raise Exception('Please provide PLTs with the same number of simulations to be rolled up.')
concatenated_plt = grouped_plt.groupby(['PeriodId', 'Weight'], as_index=False).sum()
return PLT(concatenated_plt, number_of_simulations=num_simulations)
| [
37811,
9297,
51,
43597,
198,
6489,
51,
28260,
5499,
13,
198,
37811,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
46500,
31391,
1330,
2462,
62,
22019,
303,
11,
9297,
51,
628,
198,
4299,
1528... | 2.458918 | 1,497 |
#test.py
import os
import torch
from tqdm import tqdm
import pdb
from sklearn.metrics import accuracy_score
cwd = os.getcwd()
| [
2,
9288,
13,
9078,
198,
198,
11748,
28686,
198,
11748,
28034,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
11748,
279,
9945,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
9922,
62,
26675,
198,
198,
66,
16993,
796,
28686,
13,... | 2.723404 | 47 |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from model_utils.fields import AutoCreatedField, AutoLastModifiedField
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
198,
6738,
2746,
62,
26791,
13,
25747,
1330,
11160,
41972,
15878,
11,
11160,
5956,
5841,
1431,
158... | 3.674419 | 43 |
# Generated by Django 3.1.6 on 2021-03-09 06:07
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
21,
319,
33448,
12,
3070,
12,
2931,
9130,
25,
2998,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
import os
from flakon import SwaggerBlueprint
HERE = os.path.dirname(__file__)
YML = os.path.join(HERE, '..', 'static', 'api.yaml')
api = SwaggerBlueprint('API', __name__, swagger_spec=YML)
@api.operation('getUserIds')
| [
11748,
28686,
198,
6738,
781,
461,
261,
1330,
2451,
7928,
14573,
4798,
628,
198,
39,
9338,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
8,
198,
56,
5805,
796,
28686,
13,
6978,
13,
22179,
7,
39,
9338,
11,
705,
492,
325... | 2.534091 | 88 |
from django.core.exceptions import MultipleObjectsReturned
from django.test import TestCase
from model_mommy import mommy
import mox
from .models import Experience, Rating, Review, SubjectiveMixin
from .exptests.models import Reviewed_Item
#Add to settings file
#TESTING = 'test' in sys.argv
#if TESTING:
# INSTALLED_APPS += ['experiences.exptests',]
| [
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
20401,
10267,
82,
13615,
276,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
2746,
62,
32542,
1820,
1330,
1995,
1820,
198,
11748,
285,
1140,
198,
198,
6738,
764,
275... | 3.121739 | 115 |
import os
import secrets
basedir = os.path.abspath(os.path.dirname(__file__))
| [
11748,
28686,
198,
11748,
13141,
198,
198,
3106,
343,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
628
] | 2.666667 | 30 |
#!/usr/bin/env python3
# encoding: utf-8
title = 'CH2NH2 PES'
description = """
Calculations done using ARC at the following levels of theory:
opt: wb97xd/6-311++g(d,p)
freq: wb97xd/6-311++g(d,p)
sp: ccsd(t)-f12/cc-pvqz-f12
1D rotor scans: wb97xd/6-311++g(d,p)
"""
modelChemistry = "CCSD(T)-F12/cc-pvqz-f12"
useHinderedRotors = True
useBondCorrections = False
species('CH2NH2', 'yaml_files/CH2NH2.yml',
collisionModel = TransportData(sigma=(3.626,'angstrom'), epsilon=(481.8,'J/mol')),
energyTransferModel = SingleExponentialDown(alpha0=(133,'cm^-1'), T0=(300,'K'), n=0.85), # C3H4/N2
)
species('CH3NH', 'yaml_files/CH3NH.yml',
collisionModel = TransportData(sigma=(3.626,'angstrom'), epsilon=(481.8,'J/mol')),
energyTransferModel = SingleExponentialDown(alpha0=(133,'cm^-1'), T0=(300,'K'), n=0.85), # C3H4/N2
)
species('CH2NH', 'yaml_files/CH2NH.yml',
collisionModel = TransportData(sigma=(3.690,'angstrom'), epsilon=(417.0,'J/mol')),
energyTransferModel = SingleExponentialDown(alpha0=(133,'cm^-1'), T0=(300,'K'), n=0.85), # C3H4/N2
)
species('H', 'yaml_files/H.yml',
collisionModel = TransportData(sigma=(2.050,'angstrom'), epsilon=(145.0,'J/mol')),
energyTransferModel = SingleExponentialDown(alpha0=(133,'cm^-1'), T0=(300,'K'), n=0.85), # C3H4/N2
)
species(
label = 'Ar',
structure = SMILES('[Ar]'),
E0 = (-6.19426,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (39.348,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1134.93,'J/mol'), sigma=(3.33,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(133,'cm^-1'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,2.64613e-14,-3.72536e-17,1.7192e-20,-2.44483e-24,-745,4.3663], Tmin=(100,'K'), Tmax=(3802.52,'K')), NASAPolynomial(coeffs=[2.5,1.04239e-10,-3.81845e-14,6.18592e-18,-3.73869e-22,-745,4.3663], Tmin=(3802.52,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-6.19426,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ar""", comment="""Thermo library: BurkeH2O2"""),
)
transitionState('CH2NH_to_CH2NH2','yaml_files/CH2NH_to_CH2NH2.yml')
transitionState('CH2NH_to_CH3NH','yaml_files/CH2NH_to_CH3NH.yml')
transitionState('CH3NH_to_CH2NH2','yaml_files/CH3NH_to_CH2NH2.yml')
reaction(
label = 'CH3NH = CH2NH2',
reactants = ['CH3NH'],
products = ['CH2NH2'],
transitionState = 'CH3NH_to_CH2NH2',
tunneling = 'Eckart',
)
reaction(
label = 'CH2NH + H = CH2NH2',
reactants = ['CH2NH', 'H'],
products = ['CH2NH2'],
transitionState = 'CH2NH_to_CH2NH2',
tunneling = 'Eckart',
)
reaction(
label = 'CH2NH + H = CH3NH',
reactants = ['CH2NH', 'H'],
products = ['CH3NH'],
transitionState = 'CH2NH_to_CH3NH',
tunneling = 'Eckart',
)
network(
label = 'CH2NH2',
isomers = ['CH2NH2', 'CH3NH'],
reactants = [('CH2NH', 'H')],
bathGas = {'Ar': 1}
)
pressureDependence(
label='CH2NH2',
Tmin=(500.0,'K'), Tmax=(2500.0,'K'), Tcount=25,
Pmin=(0.01,'bar'), Pmax=(100.0,'bar'), Pcount=15,
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'chemically-significant eigenvalues', # use the CSE method which is more expensive, less robust, yet more accurate, see: http://reactionmechanismgenerator.github.io/RMG-Py/theory/pdep/methods.html#the-chemically-signficant-eigenvalues-method
interpolationModel = ('chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
201,
198,
2,
21004,
25,
3384,
69,
12,
23,
201,
198,
201,
198,
7839,
796,
705,
3398,
17,
33863,
17,
350,
1546,
6,
201,
198,
11213,
796,
37227,
201,
198,
9771,
3129,
602,
1760,
1262,
... | 2.115406 | 1,785 |
import re
import platform
import requests
import toolz.curried as _
import larc
import larc.common as __
log = larc.logging.new_log(__name__)
ip_re = r'\d+\.\d+\.\d+\.\d+'
float_re = r'[+-]?(?:[0-9]*[.])?[0-9]+'
getoutput = larc.shell.getoutput(echo=False)
@_.curry
win_mac_conv = mac_conv('-')
macos_mac_conv = mac_conv(':')
# ----------------------------------------------------------------------
# ARP
# ----------------------------------------------------------------------
arp_output_macos = _.partial(getoutput, 'arp -a')
arp_macos_re = re.compile(
fr'^(?P<name>[?.\w-]*)\s+\((?P<ip>{ip_re})\) at (?P<mac>.*?) on .*$'
)
arp_output_win = _.partial(getoutput, 'arp -a')
arp_win_re = re.compile(
fr'^\s+(?P<ip>{ip_re})\s+(?P<mac>.*?)\s+\w+\s*$'
)
get_arp_macos = get_arp(arp_output_macos, arp_macos_re, macos_mac_conv)
get_arp_win = get_arp(arp_output_win, arp_win_re, win_mac_conv)
# ----------------------------------------------------------------------
# ICMP ping
# ----------------------------------------------------------------------
@_.curry
ping_re_macos = {
'tick': re_map(re.compile(
fr'\d+ bytes from (?P<ip>{ip_re}): icmp_seq=\d+ ttl=\d+'
fr' time=(?P<ms>\d+(?:\.\d+)?) ms'
), {'ms': float}),
'totals': re_map(re.compile(
r'(?P<sent>\d+) packets transmitted,'
r' (?P<received>\d+) packets received,'
r' (?P<lost>\d+(?:\.\d+))% packet loss'
), {'sent': int, 'received': int, 'lost': float}),
'stats': re_map(re.compile(
fr'round-trip min/avg/max/stddev ='
fr' (?P<min>{float_re})/'
fr'(?P<avg>{float_re})/'
fr'(?P<max>{float_re})/'
fr'(?P<std>{float_re}) ms'
), {'min': float, 'avg': float, 'max': float, 'std': float}),
}
ping_re_win = {
'stats': re_map(re.compile(
r' Minimum = (?P<min>\d+(?:\.\d+)?)ms,'
r' Maximum = (?P<max>\d+(?:\.\d+)?)ms,'
r' Average = (?P<avg>\d+(?:\.\d+)?)ms',
), {'min': float, 'max': float, 'avg': float}),
'totals': re_map(re.compile(
r'Packets: Sent = (?P<sent>\d+(?:\.\d+)?),'
r' Received = (?P<received>\d+(?:\.\d+)?),'
r' Lost = (?P<lost>\d+(?:\.\d+)?) (0% loss),'
), {'sent': float, 'received': float, 'lost': float}),
}
get_ping_stats_macos = _.partial(
get_ping_data, ping_output_macos, ping_re_macos['stats'], ping_stats_data
)
get_ping_stats_win = _.partial(
get_ping_data, ping_output_win, ping_re_win['stats'], ping_stats_data
)
ping_output, ping_re = {
'Windows': (ping_output_win, ping_re_win),
'Darwin': (ping_output_macos, ping_re_macos),
}[platform.system()]
# ----------------------------------------------------------------------
# End-user functions
# ----------------------------------------------------------------------
get_arp = {
'Windows': get_arp_win,
'Darwin': get_arp_macos,
}[platform.system()]
| [
11748,
302,
201,
198,
11748,
3859,
201,
198,
201,
198,
11748,
7007,
201,
198,
11748,
2891,
89,
13,
22019,
2228,
355,
4808,
201,
198,
11748,
300,
5605,
201,
198,
11748,
300,
5605,
13,
11321,
355,
11593,
201,
198,
201,
198,
6404,
796,
... | 2.143472 | 1,394 |
import os
from importd import d
__version__ = '0.2.1'
d(
DEBUG='RESSL_DEBUG' in os.environ,
INSTALLED_APPS=['djangosecure'],
MIDDLEWARE_CLASSES=['djangosecure.middleware.SecurityMiddleware'],
SECURE_SSL_REDIRECT=True,
SECURE_PROXY_SSL_HEADER=(
os.environ.get('RESSL_PROXY_PROTOCOL', 'HTTP_X_FORWARDED_PROTOCOL'),
'https'
),
ALLOWED_HOSTS=[
host.strip()
for host in os.environ.get('RESSL_ALLOWED_HOSTS', '').split(',')
],
)
# just for gunicorn
application = d
if __name__ == "__main__":
d.main()
| [
11748,
28686,
198,
6738,
1330,
67,
1330,
288,
198,
198,
834,
9641,
834,
796,
705,
15,
13,
17,
13,
16,
6,
198,
198,
67,
7,
198,
220,
220,
220,
16959,
11639,
49,
7597,
43,
62,
30531,
6,
287,
28686,
13,
268,
2268,
11,
198,
220,
2... | 2.054348 | 276 |
"""Logging setup."""
import logging
class ColoredFormatter(logging.Formatter):
"""Colored formatter."""
prefix = "[%(asctime)s: %(levelname)s/%(name)s]:"
default = f"{prefix} %(message)s"
error_fmt = f"\x1b[31m{prefix}\x1b[0m %(message)s"
warning_fmt = f"\x1b[33m{prefix}\x1b[0m %(message)s"
info_fmt = f"\x1b[32m{prefix}\x1b[0m %(message)s"
debug_fmt = f"\x1b[34m{prefix}\x1b[0m %(message)s"
def __init__(self, fmt=default):
"""Initialize."""
logging.Formatter.__init__(self, fmt)
def format(self, record):
"""Format record."""
format_orig = self._style._fmt
if record.levelno == logging.DEBUG:
self._style._fmt = ColoredFormatter.debug_fmt
elif record.levelno == logging.INFO:
self._style._fmt = ColoredFormatter.info_fmt
elif record.levelno == logging.WARNING:
self._style._fmt = ColoredFormatter.warning_fmt
elif record.levelno == logging.ERROR:
self._style._fmt = ColoredFormatter.error_fmt
result = logging.Formatter.format(self, record)
self._style._fmt = format_orig
return result
def setup_logger():
"""Set up Strider logger."""
logger = logging.getLogger("binder")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
handler.setFormatter(ColoredFormatter())
logger.addHandler(handler)
| [
37811,
11187,
2667,
9058,
526,
15931,
198,
11748,
18931,
628,
198,
4871,
1623,
1850,
8479,
1436,
7,
6404,
2667,
13,
8479,
1436,
2599,
198,
220,
220,
220,
37227,
5216,
1850,
1296,
1436,
526,
15931,
628,
220,
220,
220,
21231,
796,
12878,
... | 2.260938 | 640 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrest.exceptions import HttpOperationError
from ... import models
class DeviceOperations:
"""DeviceOperations async operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
async def connect(
self, transport_type, connection_string, ca_certificate=None, *, custom_headers=None, raw=False, **operation_config):
"""Connect to the azure IoT Hub as a device.
:param transport_type: Transport to use. Possible values include:
'amqp', 'amqpws', 'mqtt', 'mqttws', 'http'
:type transport_type: str
:param connection_string: connection string
:type connection_string: str
:param ca_certificate:
:type ca_certificate: ~e2erestapi.models.Certificate
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ConnectResponse or ClientRawResponse if raw=true
:rtype: ~e2erestapi.models.ConnectResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.connect.metadata['url']
path_format_arguments = {
'transportType': self._serialize.url("transport_type", transport_type, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['connectionString'] = self._serialize.query("connection_string", connection_string, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if ca_certificate is not None:
body_content = self._serialize.body(ca_certificate, 'Certificate')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
connect.metadata = {'url': '/device/connect/{transportType}'}
async def disconnect(
self, connection_id, *, custom_headers=None, raw=False, **operation_config):
"""Disconnect the device.
:param connection_id: Id for the connection
:type connection_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.disconnect.metadata['url']
path_format_arguments = {
'connectionId': self._serialize.url("connection_id", connection_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
disconnect.metadata = {'url': '/device/{connectionId}/disconnect'}
async def create_from_connection_string(
self, transport_type, connection_string, ca_certificate=None, *, custom_headers=None, raw=False, **operation_config):
"""Create a device client from a connection string.
:param transport_type: Transport to use. Possible values include:
'amqp', 'amqpws', 'mqtt', 'mqttws', 'http'
:type transport_type: str
:param connection_string: connection string
:type connection_string: str
:param ca_certificate:
:type ca_certificate: ~e2erestapi.models.Certificate
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ConnectResponse or ClientRawResponse if raw=true
:rtype: ~e2erestapi.models.ConnectResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.create_from_connection_string.metadata['url']
path_format_arguments = {
'transportType': self._serialize.url("transport_type", transport_type, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['connectionString'] = self._serialize.query("connection_string", connection_string, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if ca_certificate is not None:
body_content = self._serialize.body(ca_certificate, 'Certificate')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_from_connection_string.metadata = {'url': '/device/createFromConnectionString/{transportType}'}
async def create_from_x509(
self, transport_type, x509, *, custom_headers=None, raw=False, **operation_config):
"""Create a device client from X509 credentials.
:param transport_type: Transport to use. Possible values include:
'amqp', 'amqpws', 'mqtt', 'mqttws', 'http'
:type transport_type: str
:param x509:
:type x509: object
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ConnectResponse or ClientRawResponse if raw=true
:rtype: ~e2erestapi.models.ConnectResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.create_from_x509.metadata['url']
path_format_arguments = {
'transportType': self._serialize.url("transport_type", transport_type, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(x509, 'object')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_from_x509.metadata = {'url': '/device/createFromX509/{transportType}'}
async def create_from_symmetric_key(
self, transport_type, device_id, hostname, symmetric_key, *, custom_headers=None, raw=False, **operation_config):
"""Create a device client from a symmetric key.
:param transport_type: Transport to use. Possible values include:
'amqp', 'amqpws', 'mqtt', 'mqttws', 'http'
:type transport_type: str
:param device_id:
:type device_id: str
:param hostname: name of the host to connect to
:type hostname: str
:param symmetric_key: key to use for connection
:type symmetric_key: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ConnectResponse or ClientRawResponse if raw=true
:rtype: ~e2erestapi.models.ConnectResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.create_from_symmetric_key.metadata['url']
path_format_arguments = {
'transportType': self._serialize.url("transport_type", transport_type, 'str'),
'deviceId': self._serialize.url("device_id", device_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['hostname'] = self._serialize.query("hostname", hostname, 'str')
query_parameters['symmetricKey'] = self._serialize.query("symmetric_key", symmetric_key, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_from_symmetric_key.metadata = {'url': '/device/createFromSymmetricKey/{deviceId}/{transportType}'}
async def connect2(
self, connection_id, *, custom_headers=None, raw=False, **operation_config):
"""Connect the device.
:param connection_id: Id for the connection
:type connection_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.connect2.metadata['url']
path_format_arguments = {
'connectionId': self._serialize.url("connection_id", connection_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
connect2.metadata = {'url': '/device/{connectionId}/connect2'}
async def reconnect(
self, connection_id, force_renew_password=None, *, custom_headers=None, raw=False, **operation_config):
"""Reconnect the device.
:param connection_id: Id for the connection
:type connection_id: str
:param force_renew_password: True to force SAS renewal
:type force_renew_password: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.reconnect.metadata['url']
path_format_arguments = {
'connectionId': self._serialize.url("connection_id", connection_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if force_renew_password is not None:
query_parameters['forceRenewPassword'] = self._serialize.query("force_renew_password", force_renew_password, 'bool')
# Construct headers
header_parameters = {}
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
reconnect.metadata = {'url': '/device/{connectionId}/reconnect'}
async def disconnect2(
self, connection_id, *, custom_headers=None, raw=False, **operation_config):
"""Disconnect the device.
:param connection_id: Id for the connection
:type connection_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.disconnect2.metadata['url']
path_format_arguments = {
'connectionId': self._serialize.url("connection_id", connection_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
disconnect2.metadata = {'url': '/device/{connectionId}/disconnect2'}
async def destroy(
self, connection_id, *, custom_headers=None, raw=False, **operation_config):
"""Disconnect and destroy the device client.
:param connection_id: Id for the connection
:type connection_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.destroy.metadata['url']
path_format_arguments = {
'connectionId': self._serialize.url("connection_id", connection_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
destroy.metadata = {'url': '/device/{connectionId}/destroy'}
async def enable_methods(
self, connection_id, *, custom_headers=None, raw=False, **operation_config):
"""Enable methods.
:param connection_id: Id for the connection
:type connection_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.enable_methods.metadata['url']
path_format_arguments = {
'connectionId': self._serialize.url("connection_id", connection_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
enable_methods.metadata = {'url': '/device/{connectionId}/enableMethods'}
async def wait_for_method_and_return_response(
self, connection_id, method_name, request_and_response, *, custom_headers=None, raw=False, **operation_config):
"""Wait for a method call, verify the request, and return the response.
This is a workaround to deal with SDKs that only have method call
operations that are sync. This function responds to the method with
the payload of this function, and then returns the method parameters.
Real-world implemenatations would never do this, but this is the only
same way to write our test code right now (because the method handlers
for C, Java, and probably Python all return the method response instead
of supporting an async method call).
:param connection_id: Id for the connection
:type connection_id: str
:param method_name: name of the method to handle
:type method_name: str
:param request_and_response:
:type request_and_response:
~e2erestapi.models.MethodRequestAndResponse
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.wait_for_method_and_return_response.metadata['url']
path_format_arguments = {
'connectionId': self._serialize.url("connection_id", connection_id, 'str'),
'methodName': self._serialize.url("method_name", method_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(request_and_response, 'MethodRequestAndResponse')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
wait_for_method_and_return_response.metadata = {'url': '/device/{connectionId}/waitForMethodAndReturnResponse/{methodName}'}
async def enable_c2d_messages(
self, connection_id, *, custom_headers=None, raw=False, **operation_config):
"""Enable c2d messages.
:param connection_id: Id for the connection
:type connection_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.enable_c2d_messages.metadata['url']
path_format_arguments = {
'connectionId': self._serialize.url("connection_id", connection_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
enable_c2d_messages.metadata = {'url': '/device/{connectionId}/enableC2dMessages'}
async def send_event(
self, connection_id, event_body, *, custom_headers=None, raw=False, **operation_config):
"""Send an event.
:param connection_id: Id for the connection
:type connection_id: str
:param event_body:
:type event_body: ~e2erestapi.models.EventBody
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.send_event.metadata['url']
path_format_arguments = {
'connectionId': self._serialize.url("connection_id", connection_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(event_body, 'EventBody')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
send_event.metadata = {'url': '/device/{connectionId}/event'}
async def wait_for_c2d_message(
self, connection_id, *, custom_headers=None, raw=False, **operation_config):
"""Wait for a c2d message.
:param connection_id: Id for the connection
:type connection_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: EventBody or ClientRawResponse if raw=true
:rtype: ~e2erestapi.models.EventBody or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.wait_for_c2d_message.metadata['url']
path_format_arguments = {
'connectionId': self._serialize.url("connection_id", connection_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EventBody', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
wait_for_c2d_message.metadata = {'url': '/device/{connectionId}/c2dMessage'}
async def enable_twin(
self, connection_id, *, custom_headers=None, raw=False, **operation_config):
"""Enable device twins.
:param connection_id: Id for the connection
:type connection_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.enable_twin.metadata['url']
path_format_arguments = {
'connectionId': self._serialize.url("connection_id", connection_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
enable_twin.metadata = {'url': '/device/{connectionId}/enableTwin'}
async def get_twin(
self, connection_id, *, custom_headers=None, raw=False, **operation_config):
"""Get the device twin.
:param connection_id: Id for the connection
:type connection_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Twin or ClientRawResponse if raw=true
:rtype: ~e2erestapi.models.Twin or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.get_twin.metadata['url']
path_format_arguments = {
'connectionId': self._serialize.url("connection_id", connection_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Twin', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_twin.metadata = {'url': '/device/{connectionId}/twin'}
async def patch_twin(
self, connection_id, twin, *, custom_headers=None, raw=False, **operation_config):
"""Updates the device twin.
:param connection_id: Id for the connection
:type connection_id: str
:param twin:
:type twin: ~e2erestapi.models.Twin
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.patch_twin.metadata['url']
path_format_arguments = {
'connectionId': self._serialize.url("connection_id", connection_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(twin, 'Twin')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
patch_twin.metadata = {'url': '/device/{connectionId}/twin'}
async def wait_for_desired_properties_patch(
self, connection_id, *, custom_headers=None, raw=False, **operation_config):
"""Wait for the next desired property patch.
:param connection_id: Id for the connection
:type connection_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Twin or ClientRawResponse if raw=true
:rtype: ~e2erestapi.models.Twin or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.wait_for_desired_properties_patch.metadata['url']
path_format_arguments = {
'connectionId': self._serialize.url("connection_id", connection_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Twin', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
wait_for_desired_properties_patch.metadata = {'url': '/device/{connectionId}/twinDesiredPropPatch'}
async def get_connection_status(
self, connection_id, *, custom_headers=None, raw=False, **operation_config):
"""get the current connection status.
:param connection_id: Id for the connection
:type connection_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: str or ClientRawResponse if raw=true
:rtype: str or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.get_connection_status.metadata['url']
path_format_arguments = {
'connectionId': self._serialize.url("connection_id", connection_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_connection_status.metadata = {'url': '/device/{connectionId}/connectionStatus'}
async def wait_for_connection_status_change(
self, connection_id, connection_status, *, custom_headers=None, raw=False, **operation_config):
"""wait for the current connection status to change and return the changed
status.
:param connection_id: Id for the connection
:type connection_id: str
:param connection_status: Desired connection status. Possible values
include: 'connected', 'disconnected'
:type connection_status: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: str or ClientRawResponse if raw=true
:rtype: str or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.wait_for_connection_status_change.metadata['url']
path_format_arguments = {
'connectionId': self._serialize.url("connection_id", connection_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['connectionStatus'] = self._serialize.query("connection_status", connection_status, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
wait_for_connection_status_change.metadata = {'url': '/device/{connectionId}/connectionStatusChange'}
async def get_storage_info_for_blob(
self, connection_id, blob_name, *, custom_headers=None, raw=False, **operation_config):
"""Get storage info for uploading into blob storage.
:param connection_id: Id for the connection
:type connection_id: str
:param blob_name: name of blob for blob upload
:type blob_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: BlobStorageInfo or ClientRawResponse if raw=true
:rtype: ~e2erestapi.models.BlobStorageInfo or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.get_storage_info_for_blob.metadata['url']
path_format_arguments = {
'connectionId': self._serialize.url("connection_id", connection_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['blobName'] = self._serialize.query("blob_name", blob_name, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BlobStorageInfo', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_storage_info_for_blob.metadata = {'url': '/device/{connectionId}/storageInfoForBlob'}
async def notify_blob_upload_status(
self, connection_id, correlation_id, is_success, status_code, status_description, *, custom_headers=None, raw=False, **operation_config):
"""notify iothub about blob upload status.
:param connection_id: Id for the connection
:type connection_id: str
:param correlation_id: correlation id for blob upload
:type correlation_id: str
:param is_success: True if blob upload was successful
:type is_success: bool
:param status_code: status code for blob upload
:type status_code: str
:param status_description: human readable descripton of the status for
blob upload
:type status_description: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.notify_blob_upload_status.metadata['url']
path_format_arguments = {
'connectionId': self._serialize.url("connection_id", connection_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['correlationId'] = self._serialize.query("correlation_id", correlation_id, 'str')
query_parameters['isSuccess'] = self._serialize.query("is_success", is_success, 'bool')
query_parameters['statusCode'] = self._serialize.query("status_code", status_code, 'str')
query_parameters['statusDescription'] = self._serialize.query("status_description", status_description, 'str')
# Construct headers
header_parameters = {}
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
response = await self._client.async_send(request, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
notify_blob_upload_status.metadata = {'url': '/device/{connectionId}/blobUploadStatus'}
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
16529,
35937,
198,
2,
6127,
7560,
416,
5413,
357,
49,
8,
11160,
19452,
6127,
35986,
13,
198,
2,
19179,
743,
2728,
11491,
4069,
290,
481,
307,
2626,
611,
262,
2438,
318,
198,
2,
16935,
515,
13,
... | 2.568524 | 18,738 |
from . import unittest, numpy
from shapely.geometry import Point, asPoint
from shapely.errors import DimensionError
| [
6738,
764,
1330,
555,
715,
395,
11,
299,
32152,
198,
6738,
5485,
306,
13,
469,
15748,
1330,
6252,
11,
355,
12727,
198,
6738,
5485,
306,
13,
48277,
1330,
34024,
12331,
628,
198
] | 3.6875 | 32 |
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 28 10:04:26 2016
PCA source code
@author: liudiwei
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#计算均值,要求输入数据为numpy的矩阵格式,行表示样本数,列表示特征
#计算方差,传入的是一个numpy的矩阵格式,行表示样本数,列表示特征
#标准化,传入的是一个numpy的矩阵格式,行表示样本数,列表示特征
"""
参数:
- XMat:传入的是一个numpy的矩阵格式,行表示样本数,列表示特征
- k:表示取前k个特征值对应的特征向量
返回值:
- finalData:参数一指的是返回的低维矩阵,对应于输入参数二
- reconData:参数二对应的是移动坐标轴后的矩阵
"""
#简单测试
#数据来源:http://www.cnblogs.com/jerrylead/archive/2011/04/18/2020209.html
#根据数据集data.txt
if __name__ == "__main__":
finalData, reconMat = main()
plotBestFit(finalData, reconMat) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3825,
3158,
2579,
838,
25,
3023,
25,
2075,
1584,
198,
5662,
32,
2723,
2438,
198,
31,
9800,
25,
7649,
47928,
42990,
198,
37811,
198,
198,
11748,
... | 1.042071 | 618 |
"""Unusually poor sorting algorithms that work (eventually)."""
from random import shuffle
from itertools import permutations
from algs.sorting import check_sorted
def random_sort(A):
"""
Randomly shuffle A until it is sorted.
This can take arbitrarily long and may never actually
produce the sorted answer. However, with non-zero
probability it might generate the answer.
"""
while not check_sorted(A):
shuffle(A)
def permutation_sort(A):
"""
Generates all permutation of A until one is sorted.
Guaranteed to sort the values in A.
"""
for attempt in permutations(A):
if check_sorted(attempt):
A[:] = attempt[:] # copy back into A
return
| [
37811,
3118,
23073,
3595,
29407,
16113,
326,
670,
357,
15596,
935,
21387,
15931,
198,
6738,
4738,
1330,
36273,
198,
6738,
340,
861,
10141,
1330,
9943,
32855,
198,
6738,
435,
14542,
13,
82,
24707,
1330,
2198,
62,
82,
9741,
198,
198,
4299... | 2.852713 | 258 |
"""
File holds self contained TRPO agent.
"""
import torch
import gym
from numpy.random import choice
from copy import deepcopy
from torch.nn.utils.convert_parameters import parameters_to_vector
from torch.nn.utils.convert_parameters import vector_to_parameters
class TRPOAgent:
"""Continuous TRPO agent."""
def __call__(self, state):
"""
Peforms forward pass on the NN and parameterized distribution.
Parameters
----------
state : torch.Tensor
Tensor passed into NN and distribution.
Returns
-------
Action choice for each action dimension.
"""
state = torch.as_tensor(state, dtype=torch.float32, device=self.device)
# Parameterize distribution with policy, sample action
normal_dist = self.distribution(self.policy(state), self.logstd.exp())
action = normal_dist.sample()
# Save information
self.buffers['actions'].append(action)
self.buffers['log_probs'].append(normal_dist.log_prob(action))
self.buffers['states'].append(state)
return action.cpu().numpy()
def kl(self, new_policy, new_std, states, grad_new=True):
"""Compute KL divergence between current policy and new one.
Parameters
----------
new_policy : TRPOAgent
new_std : torch.Tensor
states : torch.Tensor
States to compute KL divergence over.
grad_new : bool, optional
Enable gradient of new policy.
"""
mu1 = self.policy(states)
log_sigma1 = self.logstd
mu2 = new_policy(states)
log_sigma2 = new_std
# Detach other as gradient should only be w.r.t. to one
if grad_new:
mu1, log_sigma1 = mu1.detach(), log_sigma1.detach()
else:
mu2, log_sigma2 = mu2.detach(), log_sigma2.detach()
# Compute KL over all states
kl_matrix = ((log_sigma2 - log_sigma1) + 0.5 * (log_sigma1.exp().pow(2)
+ (mu1 - mu2).pow(
2)) / log_sigma2.exp().pow(2) - 0.5)
# Sum over action dim, average over all states
return kl_matrix.sum(1).mean()
def fisher_vector_direct(self, vector, states):
"""Computes the fisher vector product through direct method.
The FVP can be determined by first taking the gradient of KL
divergence w.r.t. the parameters and the dot product of this
with the input vector, then a gradient over this again w.r.t.
the parameters.
"""
vector = vector.clone().requires_grad_()
# Gradient of KL w.r.t. network param
self.policy.zero_grad()
kl_divergence = self.kl(self.policy, self.logstd, states)
grad_kl = torch.autograd.grad(kl_divergence, self.policy.parameters(),
create_graph=True)
grad_kl = torch.cat([grad.view(-1) for grad in grad_kl])
# Gradient of the gradient vector dot product w.r.t. param
grad_vector_dot = grad_kl.dot(vector)
fisher_vector_product = torch.autograd.grad(grad_vector_dot,
self.policy.parameters())
fisher_vector_product = torch.cat([out.view(-1) for out in
fisher_vector_product]).detach()
# Apply CG dampening and return fisher vector product
return fisher_vector_product + self.cg_dampening * vector.detach()
def conjugate_gradient(self, b, states):
"""
Solve Ax = b for A as FIM and b as initial gradient.
Source:
https://github.com/ikostrikov/pytorch-trpo/blob/master/trpo.py
Slight modifications to original, all credit to original.
"""
p = b.clone()
r = b.clone().double()
x = torch.zeros(*p.shape, device=self.device).double()
rdotr = r.dot(r)
for _ in range(self.cg_iteration):
fvp = self.fisher_vector_direct(p, states).double()
v = rdotr / p.double().dot(fvp)
x += v * p.double()
r -= v * fvp
new_rdotr = r.dot(r)
mu = new_rdotr / rdotr
p = (r + mu * p.double()).float()
rdotr = new_rdotr
if rdotr < self.cg_tolerance:
break
return x.float() | [
37811,
198,
8979,
6622,
2116,
7763,
7579,
16402,
5797,
13,
198,
37811,
198,
11748,
28034,
198,
11748,
11550,
198,
6738,
299,
32152,
13,
25120,
1330,
3572,
198,
6738,
4866,
1330,
2769,
30073,
198,
6738,
28034,
13,
20471,
13,
26791,
13,
1... | 2.162003 | 2,037 |
from django.core import management
from theraq.celery import app as celery_app
@celery_app.task
| [
6738,
42625,
14208,
13,
7295,
1330,
4542,
198,
198,
6738,
262,
3766,
13,
7015,
88,
1330,
598,
355,
18725,
1924,
62,
1324,
628,
198,
31,
7015,
88,
62,
1324,
13,
35943,
198
] | 3.09375 | 32 |
#!/usr/bin/env python
import multiprocessing
import resource
max_cpu_cores = multiprocessing.cpu_count()
print(f"Max CPU cores for multiprocessing: {max_cpu_cores}")
max_open_files = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
print(f"Max Open Files/Sockets for asyncio IO: {max_open_files:,}")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
18540,
305,
919,
278,
198,
11748,
8271,
198,
198,
9806,
62,
36166,
62,
66,
2850,
796,
18540,
305,
919,
278,
13,
36166,
62,
9127,
3419,
198,
4798,
7,
69,
1,
11518,
9135,
21758,... | 2.743119 | 109 |
from __future__ import absolute_import
import yaml
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.plugins.filter.core import to_nice_yaml
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
331,
43695,
198,
198,
6738,
9093,
856,
13,
48277,
1330,
28038,
856,
12331,
198,
6738,
9093,
856,
13,
21412,
62,
26791,
13,
19412,
1330,
4731,
62,
19199,
198,
6738,
9093,
... | 3.465517 | 58 |
from .lexer import Lexer
from .parse import Parser
from .slimish_jinja import SlimishExtension
| [
6738,
764,
2588,
263,
1330,
17210,
263,
198,
6738,
764,
29572,
1330,
23042,
263,
198,
6738,
764,
82,
2475,
680,
62,
18594,
6592,
1330,
34199,
680,
11627,
3004,
198
] | 3.275862 | 29 |
import discord
from discord.ext import commands
from bot_key import bot_key
client = commands.Bot(command_prefix = '.')
@client.event
client.run(bot_key) | [
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
6738,
10214,
62,
2539,
1330,
10214,
62,
2539,
198,
198,
16366,
796,
9729,
13,
20630,
7,
21812,
62,
40290,
796,
705,
2637,
8,
198,
198,
31,
16366,
13,
15596,
198,
198,
16366,
... | 3.183673 | 49 |
"""Provides classes that will read and parse Material definitions and related information in RSE game formats."""
import logging
from typing import List
from RainbowFileReaders.R6Constants import RSEGameVersions, RSEMaterialFormatConstants
from RainbowFileReaders.MathHelpers import normalize_color, unnormalize_color
from RainbowFileReaders.CXPMaterialPropertiesReader import get_cxp_definition, CXPMaterialProperties
from FileUtilities.BinaryConversionUtilities import BinaryFileDataStructure, SizedCString, BinaryFileReader
log = logging.getLogger(__name__)
class RSEMaterialListHeader(BinaryFileDataStructure):
"""Reads and stores information in the header of a material list"""
class RSEMaterialDefinition(BinaryFileDataStructure):
"""Reads, stores and provides functionality to use material information stored in RSE game formats"""
def get_material_game_version(self) -> str:
"""Returns the game this type of material is used in"""
sizeWithoutStrings = self.size
sizeWithoutStrings -= self.material_name.string_length
sizeWithoutStrings -= self.version_string.string_length
sizeWithoutStrings -= self.texture_name.string_length
#check if it's a rainbow six file, or rogue spear file
#Pylint disabled R1705 because stylistically i prefer this way here so i can extend it easier
if sizeWithoutStrings == RSEMaterialFormatConstants.RSE_MATERIAL_SIZE_NO_STRINGS_RAINBOW_SIX or self.versionNumber is None: # pylint: disable=R1705
# Rainbow Six files typically have material sizes this size, or contain no version number
return RSEGameVersions.RAINBOW_SIX
else:
#It's probably a Rogue Spear file
#Material sizes in rogue spear files seem to be very inconsistent, so there needs to be a better detection method for future versions of the file
#Actually, material sizes in rogue spear appear consistently as 69 if you just remove the texturename string length
sizeWithoutStrings = self.size
sizeWithoutStrings -= self.texture_name.string_length
if sizeWithoutStrings == RSEMaterialFormatConstants.RSE_MATERIAL_SIZE_NO_STRINGS_ROGUE_SPEAR:
return RSEGameVersions.ROGUE_SPEAR
return RSEGameVersions.UNKNOWN
def add_CXP_information(self, CXPDefinitions):
"""Takes a list of CXPMaterialProperties, and adds matching information"""
cxp = get_cxp_definition(CXPDefinitions, self.texture_name.string)
self.CXPMaterialProperties = cxp
| [
37811,
15946,
1460,
6097,
326,
481,
1100,
290,
21136,
14633,
17336,
290,
3519,
1321,
287,
371,
5188,
983,
17519,
526,
15931,
198,
11748,
18931,
198,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
19909,
8979,
5569,
364,
13,
49,
21,
34184... | 3.061979 | 839 |
import os
import sys
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
import edutalk
BASE_DIR = os.path.dirname(__file__)
REQUIRES, DEP_LINKS = get_dep_links()
setup(
name='edutalk',
version=edutalk.version,
author='The EduTalk Team',
author_email='edutalk@pcs.cs.nctu.edu.tw',
url='https://gitlab.com/IoTtalk/edutalk/',
packages=find_packages(exclude=['docs']),
entry_points={
'console_scripts': ('edutalk=edutalk.cli:main',),
},
data_files=[
('share/edutalk', ['share/edutalk.ini.sample']),
],
install_requires=REQUIRES,
dependency_links=DEP_LINKS,
tests_require=get_test_requires(),
cmdclass={'test': PyTest},
platforms=['Linux', 'FreeBSD'],
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
],
)
| [
11748,
28686,
198,
11748,
25064,
198,
198,
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
198,
6738,
900,
37623,
10141,
13,
21812,
13,
9288,
1330,
1332,
355,
6208,
21575,
198,
198,
11748,
1225,
315,
971,
198,
198,
33,
11159,
... | 2.444724 | 398 |
import sys
# download necessary NLTK data
import nltk
nltk.download(['punkt', 'wordnet'])
# import libraries
import numpy as np
import pandas as pd
import pickle
from sqlalchemy import create_engine
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import GridSearchCV
from sklearn.multioutput import MultiOutputClassifier
def load_data(database_filepath):
"""
Function load_data takes in a database filepath and reads the data from it into dataframes
Parameters:
database_filepath: Filepath of the database
Returns:
X: The messages that we want to classify
y: The categories that we want to classify the messages into
category_names: The names of the categories
"""
# load data from database
engine = create_engine('sqlite:///' + database_filepath)
df = pd.read_sql_table('Messages', engine)
df = df[df.message.notnull()]
df = df.dropna()
# Build the X, y and category_names variables
X = df['message']
y = df.drop(['id','message','original','genre'], axis=1)
category_names = list(df.columns)
return X, y, category_names
def tokenize(text):
"""
Function tokenize takes in text and tokenizes it
Parameters:
text: A text string
Returns:
clean_tokens: The tokenized version of the text
"""
# tokenize text
tokens = word_tokenize(text)
# initiate lemmatizer
lemmatizer = WordNetLemmatizer()
# iterate through each token
clean_tokens = []
for tok in tokens:
# lemmatize, normalize case, and remove leading/trailing white space
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
"""
Function build_model builds the pipeline
Parameters:
N/A
Returns:
pipeline: The pipeline to be used
"""
# Build the machine learning pipeline
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier()))
])
parameters = {'clf__estimator__max_depth':[2,4,6,8,10],'clf__estimator__n_estimators':[50,100,250]}
# Create the GridSearchCV object
cv = GridSearchCV(pipeline, param_grid=parameters)
return cv
def evaluate_model(model, X_test, y_test, category_names):
"""
Function evaluate_model takes in a model, test variables and category names and predicts
based upon them and tests the model
Parameters:
model: The model to be used
X_test: The messages in the test dataset
y_test: The categories in the test dataset
category_names: The category names
Returns:
N/A
"""
# predict on test data
y_pred = model.predict(X_test)
# test the model
col_idx = 0
for col_name in y_test:
print('column: ', col_name)
print(classification_report(y_test[col_name], y_pred[:,col_idx]))
col_idx = col_idx + 1
def save_model(model, model_filepath):
"""
Function save_model takes in a model and a filepath and saves the model as
a pickle file at the filepath
Parameters:
model: The model to be saved
model_filepath: The filepath for the model to be saved to
Returns:
N/A
"""
# Save to file in the current working directory
with open(model_filepath, 'wb') as file:
pickle.dump(model, file)
if __name__ == '__main__':
main()
| [
11748,
25064,
198,
198,
2,
4321,
3306,
22879,
51,
42,
1366,
198,
11748,
299,
2528,
74,
198,
77,
2528,
74,
13,
15002,
7,
17816,
30354,
83,
3256,
705,
4775,
3262,
6,
12962,
198,
198,
2,
1330,
12782,
198,
11748,
299,
32152,
355,
45941,... | 2.813646 | 1,363 |
from finetune.lm_entailment import LanguageModelEntailment
from finetune.lm_multipurpose import LanguageModelGeneralAPI
from finetune.lm_classifier import LanguageModelClassifier | [
6738,
957,
316,
1726,
13,
75,
76,
62,
298,
603,
434,
1330,
15417,
17633,
14539,
603,
434,
198,
6738,
957,
316,
1726,
13,
75,
76,
62,
16680,
541,
333,
3455,
1330,
15417,
17633,
12218,
17614,
198,
6738,
957,
316,
1726,
13,
75,
76,
6... | 3.490196 | 51 |
# SPDX-License-Identifier: CC0-1.0
#
# Written in 2021 by Noralf Trønnes <noralf@tronnes.org>
#
# To the extent possible under law, the author(s) have dedicated all copyright and related and
# neighboring rights to this software to the public domain worldwide. This software is
# distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along with this software.
# If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
import pytest
import gpiod
import sys
import errno
from pathlib import Path
import subprocess
import time
gpio_unavail = (24, 23, 1, 0)
# the gpio's are connected through a 330 ohm resistor
gpio_pairs = [(2, 3),
(6, 7),
(8, 9),
(10, 11),
(12, 13),
(14, 15),
(27, 28),
]
gpio_pairs_flipped = [(pair[1], pair[0]) for pair in gpio_pairs]
gpio_pairs_all = gpio_pairs + gpio_pairs_flipped
@pytest.fixture(scope='module')
@pytest.mark.parametrize('gpio', gpio_unavail)
@pytest.mark.parametrize('pair', gpio_pairs_all, ids=[f'{pair[1]}->{pair[0]}' for pair in gpio_pairs_all])
@pytest.mark.parametrize('pair', gpio_pairs_all, ids=[f'{pair[1]}->{pair[0]}' for pair in gpio_pairs_all])
# Tests using an external source
# GP22 is connected to GPIO27 on the Pi4 through a 330 ohm resistor
ext_gpio_num = 27
@pytest.fixture(scope='module')
@pytest.fixture
edges = (gpiod.LINE_REQ_EV_BOTH_EDGES, gpiod.LINE_REQ_EV_RISING_EDGE, gpiod.LINE_REQ_EV_FALLING_EDGE)
edges_ids = ['BOTH', 'RISING', 'FALLING']
@pytest.mark.parametrize('edge', edges, ids=edges_ids)
# Pulse the gpio so fast that the Pico detects both a fall and a rise event before its interrupt routine is called
@pytest.mark.parametrize('edge', edges, ids=edges_ids)
@pytest.mark.parametrize('edge', edges, ids=edges_ids)
# Make sure the device runs out of slots and the events array is maxed out, then make sure it has recovered
| [
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
12624,
15,
12,
16,
13,
15,
198,
2,
198,
2,
22503,
287,
33448,
416,
5414,
1604,
833,
24172,
77,
2516,
1279,
13099,
1604,
31,
83,
1313,
2516,
13,
2398,
29,
198,
2,
198,
2,
1675,
262,
... | 2.494937 | 790 |
from flask_restx import fields
from flask_restx.fields import MarshallingError
from flask_restx.marshalling import marshal
from .common import ns
from .tracks import track, track_full
from .playlists import playlist_model, full_playlist_model
activity_model = ns.model(
"activity",
{
"timestamp": fields.String(allow_null=True),
"item_type": ItemType,
"item": ActivityItem,
},
)
activity_model_full = ns.model(
"activity_full",
{
"timestamp": fields.String(allow_null=True),
"item_type": ItemType,
"item": FullActivityItem,
},
)
| [
6738,
42903,
62,
2118,
87,
1330,
7032,
198,
6738,
42903,
62,
2118,
87,
13,
25747,
1330,
9786,
9221,
12331,
198,
6738,
42903,
62,
2118,
87,
13,
76,
5406,
9221,
1330,
22397,
282,
198,
6738,
764,
11321,
1330,
36545,
198,
6738,
764,
46074... | 2.594017 | 234 |
from __future__ import annotations
from abc import abstractmethod
from datetime import date, datetime
from typing import Any, Generic, Optional, TypeVar, Union
from goodboy.errors import Error
from goodboy.messages import DEFAULT_MESSAGES, MessageCollectionType, type_name
from goodboy.schema import Rule, SchemaWithUtils
D = TypeVar("D")
class DateBase(Generic[D], SchemaWithUtils):
"""
Abstract base class for Date/DateTime schemas, should not be used directly. Use
:class:`Date` or :class:`DateTime` instead.
"""
@abstractmethod
@abstractmethod
class Date(DateBase[date]):
"""
Accept ``datetime.date`` values.
When type casting enabled, strings are converted to ``datetime.date`` using
``format`` option as strptime format.
:param allow_none: If true, value is allowed to be ``None``.
:param messages: Override error messages.
:param rules: Custom validation rules.
:param earlier_than: Accept only values earlier than option value.
:param earlier_or_equal_to: Accept only values earlier than or equal to option
value.
:param later_than: Accept only values later than option value.
:param later_or_equal_to: Accept only values later than or equal to option value.
:param format: date format for type casting. See
`strftime() and strptime() Behavior <https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior>`_
for details.
:param allowed: Allow only certain values.
""" # noqa: E501
class DateTime(DateBase[datetime]):
"""
Accept ``datetime.datetime`` values.
When type casting enabled, strings are converted to ``datetime.datetime`` using
``format`` option as strptime format.
:param allow_none: If true, value is allowed to be ``None``.
:param messages: Override error messages.
:param rules: Custom validation rules.
:param earlier_than: Accept only values earlier than option value.
:param earlier_or_equal_to: Accept only values earlier than or equal to option
value.
:param later_than: Accept only values later than option value.
:param later_or_equal_to: Accept only values later than or equal to option value.
:param format: datetime format for type casting. See
`strftime() and strptime() Behavior <https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior>`_
for details.
:param allowed: Allow only certain values.
""" # noqa: E501
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
450,
66,
1330,
12531,
24396,
198,
6738,
4818,
8079,
1330,
3128,
11,
4818,
8079,
198,
6738,
19720,
1330,
4377,
11,
42044,
11,
32233,
11,
5994,
19852,
11,
4479,
198,
198,
6738,
922,
... | 3.155019 | 787 |
from sys import stdin, exit, argv
import os
from PyQt4 import QtCore, QtGui, QtNetwork
global port
port = 20320
if __name__ == "__main__":
app = QtGui.QApplication(argv)
receiver = Receiver()
sender = Sender()
sender.show()
exit( app.exec_())
| [
198,
6738,
25064,
1330,
14367,
259,
11,
8420,
11,
1822,
85,
198,
11748,
28686,
198,
6738,
9485,
48,
83,
19,
1330,
33734,
14055,
11,
33734,
8205,
72,
11,
33734,
26245,
220,
628,
220,
198,
20541,
2493,
198,
634,
796,
1160,
19504,
198,
... | 2.358333 | 120 |
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext, ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.core.fields import RichTextField
from mezzanine.core.models import Orderable, RichText
from mezzanine.forms import fields
from mezzanine.pages.models import Page
class Form(Page, RichText):
"""
A user-built form.
"""
button_text = models.CharField(_("Button text"), max_length=50,
default=ugettext("Submit"))
response = RichTextField(_("Response"))
send_email = models.BooleanField(_("Send email to user"), default=True,
help_text=_("To send an email to the email address supplied in "
"the form upon submission, check this box."))
email_from = models.EmailField(_("From address"), blank=True,
help_text=_("The address the email will be sent from"))
email_copies = models.CharField(_("Send email to others"), blank=True,
help_text=_("Provide a comma separated list of email addresses "
"to be notified upon form submission. Leave blank to "
"disable notifications."),
max_length=200)
email_subject = models.CharField(_("Subject"), max_length=200, blank=True)
email_message = models.TextField(_("Message"), blank=True,
help_text=_("Emails sent based on the above options will contain "
"each of the form fields entered. You can also enter "
"a message here that will be included in the email."))
need_payment = models.BooleanField(_('User need to pay'), default=False, help_text=_('Forms must be confirmed with a payment. IMPORTANT : You need to add an email field to the form!'))
amount = models.PositiveIntegerField(_('Amount'), default=0, help_text=_('In CHF'))
maximum_payable_forms = models.PositiveIntegerField(_('Maximum payed form entries'), default=0, help_text=_('Only used with payment'))
final_confirmation_message = models.TextField(_('Final confirmation message'), help_text=_("Final text after the user has paid for the form"), blank=True)
final_confirmation_email = models.TextField(_('Final confirmation email'), help_text=_("Message for the email to send to the user when he has paid for the form. Leave blank to not send a email."), blank=True)
final_confirmation_subject = models.CharField(_('Final confirmation subject'), max_length=200, help_text=_("Subject for the email to send to the user when he has paid for the form"), blank=True)
def can_start_payment(self):
"""Return true if the user can pay"""
if self.entries.filter(payment__is_valid=True).count() >= self.maximum_payable_forms:
return False
return True
class FieldManager(models.Manager):
"""
Only show visible fields when displaying actual form..
"""
@python_2_unicode_compatible
class Field(Orderable):
"""
A field for a user-built form.
"""
form = models.ForeignKey("Form", related_name="fields")
label = models.CharField(_("Label"),
max_length=settings.FORMS_LABEL_MAX_LENGTH)
field_type = models.IntegerField(_("Type"), choices=fields.NAMES)
required = models.BooleanField(_("Required"), default=True)
visible = models.BooleanField(_("Visible"), default=True)
choices = models.CharField(_("Choices"), max_length=4000, blank=True,
help_text=_("Comma separated options where applicable. If an option "
"itself contains commas, surround the option with `backticks`."))
default = models.CharField(_("Default value"), blank=True,
max_length=settings.FORMS_FIELD_MAX_LENGTH)
placeholder_text = models.CharField(_("Placeholder Text"), blank=True,
max_length=100, editable=settings.FORMS_USE_HTML5)
help_text = models.CharField(_("Help text"), blank=True, max_length=400)
objects = FieldManager()
def get_choices(self):
"""
Parse a comma separated choice string into a list of choices taking
into account quoted choices.
"""
choice = ""
(quote, unquote) = ("`", "`")
quoted = False
for char in self.choices:
if not quoted and char == quote:
quoted = True
elif quoted and char == unquote:
quoted = False
elif char == "," and not quoted:
choice = choice.strip()
if choice:
yield choice, choice
choice = ""
else:
choice += char
choice = choice.strip()
if choice:
yield choice, choice
def is_a(self, *args):
"""
Helper that returns ``True`` if the field's type is given in any arg.
"""
return self.field_type in args
class FormEntry(models.Model):
"""
An entry submitted via a user-built form.
"""
form = models.ForeignKey("Form", related_name="entries")
entry_time = models.DateTimeField(_("Date/time"))
def get_payment(self):
"""Return the payment object"""
p, _ = Payment.objects.get_or_create(entry=self)
return p
def is_payment_valid(self):
"""Return true if the form has a valid payment"""
return self.get_payment().is_valid
class FieldEntry(models.Model):
"""
A single field value for a form entry submitted via a user-built form.
"""
entry = models.ForeignKey("FormEntry", related_name="fields")
field_id = models.IntegerField()
value = models.CharField(max_length=settings.FORMS_FIELD_MAX_LENGTH,
null=True)
class Payment(models.Model):
"""A payment for a form"""
entry = models.ForeignKey(FormEntry)
is_valid = models.BooleanField(default=False)
started = models.BooleanField(default=False)
redirect_url = models.CharField(max_length=255, null=True)
def reference(self):
"""Return a reference for the payment"""
return "Form-ID-%s@mezzanine-payment-%s" % (self.entry.form.pk, self.pk, )
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
13,
12685,
7656,
1330,
21015,
62,
17,
62,
46903,
1098,
62,
38532,
198,
6738,
42625,
1420... | 2.695804 | 2,288 |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 18 15:56:56 2016
@author: ttshimiz
"""
import numpy as np
import pandas as pd
import astropy.units as u
import astropy.constants as c
import astropy.io.fits as fits
import astropy.modeling as apy_mod
import astropy.convolution as apy_conv
import astropy.coordinates as apy_coord
from astropy.stats import sigma_clipped_stats, sigma_clip
from astropy.wcs import WCS
from spectral_cube import SpectralCube
import aplpy
import matplotlib.pyplot as plt
import lines
import multiprocessing
import time
import peakutils
HOME = '/Users/ttshimiz/'
def read_data(fn, scale=True):
"""
Reads in SINFONI FITS cube, cleans up the header, and returns a
spectral-cube object.
Parameters
----------
fn = string; FITS file names
Returns
-------
cube = spectral_cube.SpectralCube object
"""
if scale:
data = fits.getdata(fn)*1e-17
else:
data = fits.getdata(fn)
header = fits.getheader(fn)
# Check the spectral axis units and values
# Switch to meters if the unit is micron
cunit3 = header['CUNIT3']
crval3 = header['CRVAL3']
cdelt3 = header['CDELT3']
if cunit3 == 'MICRON':
cunit3 = 'meter'
crval3 = crval3*10**-6
cdelt3 = cdelt3*10**-6
header['CUNIT3'] = cunit3
header['CRVAL3'] = crval3
header['CDELT3'] = cdelt3
wcs = WCS(header)
# Check now the cdelt3 value in the WCS object
try:
if wcs.wcs.cd[2, 2] != cdelt3:
wcs.wcs.cd[2, 2] = cdelt3
except AttributeError:
pass
cube = SpectralCube(data=data, wcs=wcs, read_beam=False, meta={'BUNIT':'W / (m2 micron)'})
# Convert to microns
cube = cube.with_spectral_unit(u.micron)
return cube
def cont_fit_single(x, spectrum, degree=1, errors=None, exclude=None):
"""
Function to fit the continuum of a single spectrum with a polynomial.
"""
if errors is None:
errors = np.ones(len(spectrum))
if exclude is not None:
x = x[~exclude]
spectrum = spectrum[~exclude]
errors = errors[~exclude]
cont = apy_mod.models.Polynomial1D(degree=degree)
# Use the endpoints of the spectrum to guess at zeroth and first order
# parameters
y1 = spectrum[0]
y2 = spectrum[-1]
x1 = x[0]
x2 = x[-1]
cont.c1 = (y2-y1)/(x2-x1)
cont.c0 = y1 - cont.c1*x1
# Initialize the main fitter and the fitter that implements outlier removal using
# sigma clipping. Default is to do 5 iterations removing all 3-sigma outliers
fitter = apy_mod.fitting.LevMarLSQFitter()
or_fitter = apy_mod.fitting.FittingWithOutlierRemoval(fitter, sigma_clip, niter=5, sigma=3.0)
filtered_data, cont_fit = or_fitter(cont, x, spectrum)
return cont_fit
def remove_cont(cube, degree=1, exclude=None):
"""
Function to loop through all of the spectra in a cube and subtract out the continuum
"""
xsize = cube.shape[1]
ysize = cube.shape[2]
nparams = degree+1
fit_params = np.zeros((nparams, xsize, ysize))
spec_ax = cube.spectral_axis.value
data_cont_remove = np.zeros(cube.shape)
for i in range(xsize):
for j in range(ysize):
spec = cube[:, i, j].value/10**(-17)
if np.any(~np.isnan(spec)):
cont = cont_fit_single(spec_ax, spec, degree=degree, exclude=exclude)
for n in range(nparams):
fit_params[n, i, j] = cont.parameters[n]
data_cont_remove[:, i, j] = (spec - cont(spec_ax))*10**(-17)
else:
fit_params[:,i, j] = np.nan
data_cont_remove[:, i, j] = np.nan
cube_cont_remove = SpectralCube(data=data_cont_remove, wcs=cube.wcs,
meta={'BUNIT':cube.unit.to_string()})
cube_cont_remove = cube_cont_remove.with_spectral_unit(cube.spectral_axis.unit)
return cube_cont_remove, fit_params
def calc_local_rms(cube, exclude=None):
"""
Function to calculate the local rms of the spectrum around the line.
Assumes the continuum has been subtracted already.
Excludes the region around the line center +/- 'region'
"""
xsize = cube.shape[1]
ysize = cube.shape[2]
flux_unit = cube.unit
spec_ax = cube.spectral_axis
#ind_use = ((spec_ax < (line_center+region)) & (spec_ax > (line_center-region)))
local_rms = np.zeros((xsize, ysize))*flux_unit
for i in range(xsize):
for j in range(ysize):
spec = cube[:, i, j].value
if exclude is not None:
local_rms[i, j] = np.std(spec[~exclude])*flux_unit
else:
local_rms[i, j] = np.std(spec)*flux_unit
return local_rms
def calc_line_params(fit_params, line_centers, fit_params_mc=None, inst_broad=0):
"""
Function to determine the integrated line flux, velocity, and linewidth
Assumes the units on the amplitude are W/m^2/micron and the units on the
mean and sigma are micron as well.
If there are parameters from a Monte Carlo session, use these to determine errors
on the flux, velocity, and velocity dispersion.
"""
line_params = {}
for k in fit_params.keys():
lc = line_centers[k]
line_params[k] = {}
amp = fit_params[k]['amplitude']
line_mean = fit_params[k]['mean']
line_sigma = fit_params[k]['sigma']
if line_mean.unit != u.micron:
print('Warning: Units on the line mean and sigma are not in microns.'
'Integrated line flux will not be correct.')
# Integrated flux is just a Gaussian integral from -inf to inf
int_flux = np.sqrt(2*np.pi)*amp*np.abs(line_sigma)
if fit_params_mc is not None:
amp_mc = fit_params_mc[k]['amplitude']
mean_mc = fit_params_mc[k]['mean']
sigma_mc = fit_params_mc[k]['sigma']
int_flux_mc = np.sqrt(2*np.pi)*amp_mc*np.abs(sigma_mc)
# Convert the line mean and line sigma to km/s if not already
if line_mean.unit.physical_type != 'speed':
velocity = line_mean.to(u.km/u.s, equivalencies=u.doppler_optical(lc))
veldisp = (line_mean+line_sigma).to(u.km/u.s, equivalencies=u.doppler_optical(line_mean))
if fit_params_mc is not None:
velocity_mc = mean_mc.to(u.km/u.s, equivalencies=u.doppler_optical(lc))
veldisp_mc = (mean_mc+sigma_mc).to(u.km/u.s, equivalencies=u.doppler_optical(mean_mc))
else:
velocity = line_mean.to(u.km/u.s)
veldisp = line_sigma.to(u.km/u.s)
if fit_params_mc is not None:
velocity_mc = mean_mc.to(u.km/u.s)
veldisp_mc = sigma_mc.to(u.km/u.s)
line_params[k]['int_flux'] = int_flux
line_params[k]['velocity'] = velocity
# Subtract off instrumental broadening
phys_veldisp = np.sqrt(veldisp**2 - inst_broad**2)
phys_veldisp[veldisp < inst_broad] = 0.*u.km/u.s
line_params[k]['veldisp'] = phys_veldisp
if fit_params_mc is not None:
int_flux_err = np.zeros(int_flux.shape)*int_flux.unit
vel_err = np.zeros(velocity.shape)*velocity.unit
veldisp_err = np.zeros(veldisp.shape)*veldisp.unit
for i in range(int_flux.shape[0]):
for j in range(int_flux.shape[1]):
int_flux_err[i, j] = np.nanstd(int_flux_mc[:, i, j].value)*int_flux.unit
vel_err[i, j] = np.nanstd(velocity_mc[:, i, j].value)*velocity.unit
veldisp_err[i, j] = np.nanstd(veldisp_mc[:, i, j].value)*veldisp.unit
line_params[k]['int_flux_err'] = int_flux_err
line_params[k]['velocity_err'] = vel_err
phys_veldisp_err = veldisp*veldisp_err/phys_veldisp
line_params[k]['veldisp_err'] = phys_veldisp_err
return line_params
def plot_line_params(line_params, header=None, vel_min=-200., vel_max=200.,
vdisp_max=300., flux_max=None, mask=None, flux_scale='arcsinh',
subplots=True):
"""
Function to plot the line intensity, velocity, and velocity dispersion in one figure
"""
int_flux_hdu = fits.PrimaryHDU()
velocity_hdu = fits.PrimaryHDU()
veldisp_hdu = fits.PrimaryHDU()
if (header is not None):
header['WCSAXES'] = 2
header['NAXIS'] = 2
try:
header.remove('CDELT3')
except KeyError:
pass
try:
header.remove('CRVAL3')
except KeyError:
pass
try:
header.remove('CUNIT3')
except KeyError:
pass
try:
header.remove('CRPIX3')
except KeyError:
pass
try:
header.remove('CTYPE3')
except KeyError:
pass
int_flux_hdu.header = header
velocity_hdu.header = header
veldisp_hdu.header = header
int_flux_hdu.data = line_params['int_flux'].value.copy()
velocity_hdu.data = line_params['velocity'].value.copy()
veldisp_hdu.data = line_params['veldisp'].value.copy()
if mask is not None:
int_flux_hdu.data[mask] = np.nan
velocity_hdu.data[mask] = np.nan
veldisp_hdu.data[mask] = np.nan
if subplots:
fig = plt.figure(figsize=(18,6))
ax_int = aplpy.FITSFigure(int_flux_hdu, figure=fig, subplot=(1,3,1))
ax_vel = aplpy.FITSFigure(velocity_hdu, figure=fig, subplot=(1,3,2))
ax_vdp = aplpy.FITSFigure(veldisp_hdu, figure=fig, subplot=(1,3,3))
else:
fig_int = plt.figure(figsize=(6,6))
fig_vel = plt.figure(figsize=(6,6))
fig_vdp = plt.figure(figsize=(6,6))
ax_int = aplpy.FITSFigure(int_flux_hdu, figure=fig_int)
ax_vel = aplpy.FITSFigure(velocity_hdu, figure=fig_vel)
ax_vdp = aplpy.FITSFigure(veldisp_hdu, figure=fig_vdp)
#int_mn, int_med, int_sig = sigma_clipped_stats(line_params['int_flux'].value, iters=100)
#vel_mn, vel_med, vel_sig = sigma_clipped_stats(line_params['velocity'].value[np.abs(line_params['velocity'].value) < 1000.], iters=100)
#vdp_mn, vdp_med, vdp_sig = sigma_clipped_stats(line_params['veldisp'].value, iters=100)
if flux_max is None:
flux_max = np.nanmax(int_flux_hdu.data)
ax_int.show_colorscale(cmap='cubehelix', stretch=flux_scale, vmin=0, vmid=-flux_max/1000., vmax=flux_max)
ax_vel.show_colorscale(cmap='RdBu_r', vmin=vel_min, vmax=vel_max)
ax_vdp.show_colorscale(cmap='inferno', vmin=0, vmax=vdisp_max)
ax_int.set_nan_color('k')
ax_vel.set_nan_color('k')
ax_vdp.set_nan_color('k')
ax_int.show_colorbar()
ax_vel.show_colorbar()
ax_vdp.show_colorbar()
ax_int.colorbar.set_axis_label_text(r'Flux [W m$^{-2}$]')
ax_vel.colorbar.set_axis_label_text(r'Velocity [km s$^{-1}$]')
ax_vdp.colorbar.set_axis_label_text(r'$\sigma_{\rm v}$ [km s$^{-1}$]')
ax_int.set_axis_labels_ydisp(-30)
if subplots:
ax_vel.hide_yaxis_label()
ax_vel.hide_ytick_labels()
ax_vdp.hide_yaxis_label()
ax_vdp.hide_ytick_labels()
else:
ax_vel.set_axis_labels_ydisp(-30)
ax_vdp.set_axis_labels_ydisp(-30)
if subplots:
fig.subplots_adjust(wspace=0.3)
return fig, [ax_int, ax_vel, ax_vdp]
else:
return [fig_int, fig_vel, fig_vdp], [ax_int, ax_vel, ax_vdp]
def create_line_ratio_map(line1, line2, header, cmap='cubehelix',
line1_name=None, line2_name=None):
"""
Function to create a line ratio map. Map will be line1/line2.
"""
lr_hdu = fits.PrimaryHDU()
header['WCSAXES'] = 2
header['NAXIS'] = 2
header.remove('CDELT3')
header.remove('CRVAL3')
header.remove('CUNIT3')
header.remove('CRPIX3')
header.remove('CTYPE3')
lr_hdu.header = header
lr_hdu.data = line1/line2
lr_fig = aplpy.FITSFigure(lr_hdu)
lr_mn, lr_med, lr_sig = sigma_clipped_stats(line1/line2, iters=100)
lr_fig.show_colorscale(cmap=cmap, vmin=0.0, vmax=lr_med+2*lr_sig)
lr_fig.show_colorbar()
if ((line1_name is not None) & (line2_name is not None)):
lr_fig.colorbar.set_axis_label_text(line1_name+'/'+line2_name)
lr_fig.set_axis_labels_ydisp(-30)
return lr_fig
def create_model(line_centers, amp_guess=None,
center_guess=None, width_guess=None,
center_limits=None, width_limits=None,
center_fixed=None, width_fixed=None):
"""
Function that allows for the creation of a generic model for a spectral region.
Each line specified in 'line_names' must be included in the file 'lines.py'.
Defaults for the amplitude guesses will be 1.0 for all lines.
Defaults for the center guesses will be the observed wavelengths.
Defaults for the line widths will be 100 km/s for narrow lines and 1000 km/s for the
broad lines.
All lines are considered narrow unless the name has 'broad' attached to the end of the name.
"""
nlines = len(line_centers.keys())
line_names = line_centers.keys()
# Create the default amplitude guesses for the lines if necessary
if amp_guess is None:
amp_guess = {l:1.0 for l in line_names}
# Create arrays to hold the default line center and width guesses
if center_guess is None:
center_guess = {l:0*u.km/u.s for l in line_names}
if width_guess is None:
width_guess = {l:100.*u.km/u.s for l in line_names}
# Loop through each line and create a model
mods = []
for i,l in enumerate(line_names):
# Equivalency to convert to/from wavelength from/to velocity
opt_conv = u.doppler_optical(line_centers[l])
# Convert the guesses for the line center and width to micron
center_guess_i = center_guess[l].to(u.micron, equivalencies=opt_conv)
if u.get_physical_type(width_guess[l].unit) == 'speed':
width_guess_i = width_guess[l].to(u.micron, equivalencies=u.doppler_optical(center_guess_i)) - center_guess_i
elif u.get_physical_type(width_guess[l].unit) == 'length':
width_guess_i = width_guess[i].to(u.micron)
center_guess_i = center_guess_i.value
width_guess_i = width_guess_i.value
# Create the single Gaussian line model for the emission line
mod_single = apy_mod.models.Gaussian1D(mean=center_guess_i, amplitude=amp_guess[l],
stddev=width_guess_i, name=l)
# Set the constraints on the parameters if necessary
mod_single.amplitude.min = 0 # always an emission line
if center_limits is not None:
if center_limits[l][0] is not None:
mod_single.mean.min = center_limits[l][0].to(u.micron, equivalencies=opt_conv).value
if center_limits[l][1] is not None:
mod_single.mean.max = center_limits[l][1].to(u.micron, equivalencies=opt_conv).value
if width_limits is not None:
if width_limits[l][0] is not None:
mod_single.stddev.min = width_limits[l][0].to(u.micron, equivalencies=opt_conv).value - line_centers[l].value
else:
mod_single.stddev.min = 0 # can't have negative width
if width_limits[l][1] is not None:
mod_single.stddev.max = width_limits[l][1].to(u.micron, equivalencies=opt_conv).value - line_centers[l].value
else:
mod_single.stddev.min = 0
# Set the fixed parameters
if center_fixed is not None:
mod_single.mean.fixed = center_fixed[l]
if width_fixed is not None:
mod_single.stddev.fixed = width_fixed[l]
# Add to the model list
mods.append(mod_single)
# Create the combined model by adding all of the models together
if nlines == 1:
final_model = mods[0]
else:
final_model = mods[0]
for m in mods[1:]:
final_model += m
return final_model
def cubefit(cube, model, skip=None, exclude=None, line_centers=None,
auto_guess=False, guess_type=None, guess_region=None,
calc_uncert=False, nmc=100., rms=None, parallel=False, cores=None):
"""
Function to loop through all of the spectra in a cube and fit a model.
"""
xsize = cube.shape[1]
ysize = cube.shape[2]
flux_unit = cube.unit
spec_ax = cube.spectral_axis
lam = spec_ax.to(u.micron).value
spec_ax_unit = cube.spectral_axis.unit
residuals = np.zeros(cube.shape)
fit_params = {}
if skip is None:
skip = np.zeros((xsize, ysize), dtype=np.bool)
print "Starting 'cubefit' with {0} spectral points and {1}x{2} image shape" .format(cube.shape[0], xsize, ysize)
print "Total number of spaxels to fit: {0}/{1}".format(np.int(np.sum(~skip)), xsize*ysize)
if calc_uncert:
fit_params_mc = {}
if hasattr(model, 'submodel_names'):
for n in model.submodel_names:
fit_params[n] = {'amplitude': np.zeros((xsize, ysize))*flux_unit*np.nan,
'mean': np.zeros((xsize, ysize))*spec_ax_unit*np.nan,
'sigma': np.zeros((xsize, ysize))*spec_ax_unit*np.nan}
if calc_uncert:
fit_params_mc[n] = {'amplitude': np.zeros((nmc, xsize, ysize))*flux_unit*np.nan,
'mean': np.zeros((nmc, xsize, ysize))*spec_ax_unit*np.nan,
'sigma': np.zeros((nmc, xsize, ysize))*spec_ax_unit*np.nan}
else:
fit_params[model.name] = {'amplitude': np.zeros((xsize, ysize))*flux_unit*np.nan,
'mean': np.zeros((xsize, ysize))*spec_ax_unit*np.nan,
'sigma': np.zeros((xsize, ysize))*spec_ax_unit*np.nan}
if calc_uncert:
fit_params_mc[model.name] = {'amplitude': np.zeros((nmc, xsize, ysize))*flux_unit*np.nan,
'mean': np.zeros((nmc, xsize, ysize))*spec_ax_unit*np.nan,
'sigma': np.zeros((nmc, xsize, ysize))*spec_ax_unit*np.nan}
print "Lines being fit: {0}".format(fit_params.keys())
if calc_uncert:
print "Calculating uncertainties using MC simulation with {0} iterations.".format(nmc)
else:
print "No calculation of uncertainties."
print "Starting fitting..."
for i in range(xsize):
for j in range(ysize):
spec = cube[:, i, j].value/10**(-17)
if calc_uncert:
rms_i = rms[i, j].value/10**(-17)
else:
rms_i = None
if (np.any(~np.isnan(spec)) & ~skip[i, j]):
if auto_guess:
# Use the bounds on the line center as the guess region for each line
if guess_type == 'limits':
if hasattr(model, 'submodel_names'):
for k in fit_params.keys():
min_lam = model[k].mean.min
max_lam = model[k].mean.max
guess_region_line = (lam >= min_lam) & (lam <= max_lam)
ind_max = np.argmax(spec[guess_region_line])
wave_max = lam[guess_region_line][ind_max]
flux_max = spec[guess_region_line][ind_max]
model[k].mean = wave_max
model[k].amplitude = flux_max
else:
min_lam = model.mean.min
max_lam = model.mean.max
guess_region_line = (lam >= min_lam) & (lam <= max_lam)
ind_max = np.argmax(spec[guess_region_line])
wave_max = lam[guess_region_line][ind_max]
flux_max = spec[guess_region_line][ind_max]
model.mean = wave_max
model.amplitude = flux_max
elif guess_type == 'peak-find':
if guess_region is None:
guess_region = np.ones(len(spec), dtype=np.bool)
model = findpeaks(spec, lam, model, guess_region, line_centers)
fit_results = specfit(lam, spec, model, exclude=exclude,
calc_uncert=calc_uncert, nmc=nmc, rms=rms_i,
parallel=parallel)
print "Pixel {0},{1} fit successfully.".format(i,j)
if calc_uncert:
best_fit = fit_results[0]
err_fits = fit_results[1]
else:
best_fit = fit_results
if hasattr(model, 'submodel_names'):
for n in model.submodel_names:
fit_params[n]['amplitude'][i,j] = best_fit[n].amplitude.value*flux_unit*10**(-17)
fit_params[n]['mean'][i,j] = best_fit[n].mean.value*spec_ax_unit
fit_params[n]['sigma'][i,j] = best_fit[n].stddev.value*spec_ax_unit
if calc_uncert:
mc_amps = np.array([err_fits[k][n].amplitude.value for k in range(nmc)])
mc_mean = np.array([err_fits[k][n].mean.value for k in range(nmc)])
mc_sig = np.array([err_fits[k][n].stddev.value for k in range(nmc)])
fit_params_mc[n]['amplitude'][:,i,j] = mc_amps*flux_unit*10**(-17)
fit_params_mc[n]['mean'][:,i,j] = mc_mean*spec_ax_unit
fit_params_mc[n]['sigma'][:,i,j] = mc_sig*spec_ax_unit
else:
fit_params[model.name]['amplitude'][i,j] = best_fit.amplitude.value*flux_unit*10**(-17)
fit_params[model.name]['mean'][i,j] = best_fit.mean.value*spec_ax_unit
fit_params[model.name]['sigma'][i,j] = best_fit.stddev.value*spec_ax_unit
if calc_uncert:
mc_amps = np.array([err_fits[k].amplitude.value for k in range(nmc)])
mc_mean = np.array([err_fits[k].mean.value for k in range(nmc)])
mc_sig = np.array([err_fits[k].stddev.value for k in range(nmc)])
fit_params_mc[model.name]['amplitude'][:,i,j] = mc_amps*flux_unit*10**(-17)
fit_params_mc[model.name]['mean'][:,i,j] = mc_mean*spec_ax_unit
fit_params_mc[model.name]['sigma'][:,i,j] = mc_sig*spec_ax_unit
residuals[:,i,j] = (spec - best_fit(spec_ax.to(u.micron).value))*10**(-17)
else:
print "Pixel {0},{1} skipped.".format(i,j)
residuals[:,i,j] = spec*10**(-17)
resid_cube = SpectralCube(data=residuals, wcs=cube.wcs,
meta={'BUNIT':cube.unit.to_string()})
resid_cube = resid_cube.with_spectral_unit(cube.spectral_axis.unit)
if calc_uncert:
return [fit_params, resid_cube, fit_params_mc]
else:
return [fit_params, resid_cube]
def specfit(x, fx, model, exclude=None, calc_uncert=False, rms=None, nmc=100,
parallel=False, cores=None):
"""
Function to fit a single spectrum with a model.
Option to run a Monte Carlo simulation to determine the uncertainties using
either a simple for-loop or parallel.
"""
if exclude is not None:
x = x[~exclude]
fx = fx[~exclude]
bestfit = specfit_single(x, fx, model)
if (calc_uncert) & (~parallel):
rand_fits = []
for i in range(nmc):
rand_spec = np.random.randn(len(fx))*rms + fx
rand_fit_i = specfit_single(x, rand_spec, model)
rand_fits.append(rand_fit_i)
elif (calc_uncert) & (parallel):
rand_fits = specfit_parallel(x, fx, model, rms, nmc=nmc, cores=cores)
if calc_uncert:
return [bestfit, rand_fits]
else:
return bestfit
def skip_pixels(cube, rms, sn_thresh=3.0, spec_use=None):
"""
Function to determine which pixels to skip based on a user defined S/N threshold.
Returns an NxM boolean array where True indicates a pixel to skip.
The signal used is the maximum value in the spectrum.
If the maximum value is a NaN then that pixel is also skipped.
"""
if spec_use is None:
spec_max = cube.max(axis=0)
sig_to_noise = spec_max/rms
skip = (sig_to_noise.value < sn_thresh) | (np.isnan(sig_to_noise.value))
else:
xsize = cube.shape[1]
ysize = cube.shape[2]
skip = np.zeros((xsize, ysize), dtype=np.bool)
for x in range(xsize):
for y in range(ysize):
s = cube[:,x,y]
s_n = np.max(s[spec_use])/rms[x,y]
skip[x,y] = (s_n.value < sn_thresh) | (np.isnan(s_n.value))
return skip
def prepare_cube(cube, slice_center, velrange=[-4000., 4000.]*u.km/u.s):
"""
Function to slice the cube and extract a specific spectral region
based on a user-defined central wavelength and velocity range.
"""
# Slice first based on velocity
slice = cube.with_spectral_unit(unit=u.km/u.s, velocity_convention='optical',
rest_value=slice_center).spectral_slab(velrange[0], velrange[1])
# Convert back to wavelength
slice = slice.with_spectral_unit(unit=u.micron, velocity_convention='optical',
rest_value=slice_center)
return slice
def write_files(results, header, savedir='', suffix=''):
"""
Writes out all of the results to FITS files.
"""
key_remove = ['CDELT3', 'CRPIX3', 'CUNIT3', 'CTYPE3', 'CRVAL3']
# Write out the continuum-subtracted spectral cube and the residuals
results['continuum_sub'].write(savedir+'continuum_sub'+suffix+'.fits', format='fits', overwrite=True)
results['residuals'].write(savedir+'residuals'+suffix+'.fits', format='fits', overwrite=True)
# Write out the best parameters for the continuum
hdu_cont_params =fits.PrimaryHDU(data=results['cont_params'], header=header)
hdu_cont_params.header.remove('WCSAXES')
for k in key_remove:
hdu_cont_params.header.remove(k)
hdu_cont_params.header.remove('BUNIT')
fits.HDUList([hdu_cont_params]).writeto(savedir+'cont_params'+suffix+'.fits', overwrite=True)
# Write out the pixels that were fit or skipped
hdu_skip =fits.PrimaryHDU(data=np.array(results['fit_pixels'], dtype=int), header=header)
hdu_skip.header['WCSAXES'] = 2
for k in key_remove:
hdu_skip.header.remove(k)
hdu_skip.header.remove('BUNIT')
fits.HDUList([hdu_skip]).writeto(savedir+'skippix'+suffix+'.fits', overwrite=True)
# Write out the rms estimate
hdu_rms =fits.PrimaryHDU(data=np.array(results['rms'].value), header=header)
hdu_rms.header['WCSAXES'] = 2
for k in key_remove:
hdu_rms.header.remove(k)
fits.HDUList([hdu_rms]).writeto(savedir+'rms'+suffix+'.fits', overwrite=True)
# For each line fit, write out both the best fit gaussian parameters
# and physical line parameters
lines = results['fit_params'].keys()
# See if an uncertainty estimate was made
unc_exist = results.has_key('fit_params_mc')
for l in lines:
gauss_params = results['fit_params'][l]
hdu_amp = fits.PrimaryHDU(data=gauss_params['amplitude'].value, header=header)
hdu_cent = fits.ImageHDU(data=gauss_params['mean'].value, header=header)
hdu_sig = fits.ImageHDU(data=gauss_params['sigma'].value, header=header)
line_params = results['line_params'][l]
hdu_flux = fits.PrimaryHDU(data=line_params['int_flux'].value, header=header)
hdu_vel = fits.ImageHDU(data=line_params['velocity'].value, header=header)
hdu_vdisp = fits.ImageHDU(data=line_params['veldisp'].value, header=header)
if unc_exist:
gauss_params_mc = results['fit_params_mc'][l]
hdu_amp_mc = fits.PrimaryHDU(data=gauss_params_mc['amplitude'].value, header=header)
hdu_cent_mc = fits.ImageHDU(data=gauss_params_mc['mean'].value, header=header)
hdu_sig_mc = fits.ImageHDU(data=gauss_params_mc['sigma'].value, header=header)
hdu_flux_err = fits.ImageHDU(data=line_params['int_flux_err'].value, header=header)
hdu_vel_err = fits.ImageHDU(data=line_params['velocity_err'].value, header=header)
hdu_vdisp_err = fits.ImageHDU(data=line_params['veldisp_err'].value, header=header)
hdu_amp.header['EXTNAME'] = 'amplitude'
hdu_cent.header['EXTNAME'] = 'line center'
hdu_sig.header['EXTNAME'] = 'sigma'
hdu_flux.header['EXTNAME'] = 'int flux'
hdu_vel.header['EXTNAME'] = 'velocity'
hdu_vdisp.header['EXTNAME'] = 'velocity dispersion'
hdu_amp.header['WCSAXES'] = 2
hdu_cent.header['WCSAXES'] = 2
hdu_sig.header['WCSAXES'] = 2
hdu_flux.header['WCSAXES'] = 2
hdu_vel.header['WCSAXES'] = 2
hdu_vdisp.header['WCSAXES'] = 2
if unc_exist:
hdu_amp_mc.header['EXTNAME'] = 'MC amplitude'
hdu_cent_mc.header['EXTNAME'] = 'MC line center'
hdu_sig_mc.header['EXTNAME'] = 'MC sigma'
hdu_flux_err.header['EXTNAME'] = 'int flux error'
hdu_vel_err.header['EXTNAME'] = 'velocity error'
hdu_vdisp_err.header['EXTNAME'] = 'velocity dispersion error'
hdu_amp.header['WCSAXES'] = 2
hdu_cent.header['WCSAXES'] = 2
hdu_sig.header['WCSAXES'] = 2
hdu_flux.header['WCSAXES'] = 2
hdu_vel.header['WCSAXES'] = 2
hdu_vdisp.header['WCSAXES'] = 2
for k in key_remove:
hdu_amp.header.remove(k)
hdu_cent.header.remove(k)
hdu_sig.header.remove(k)
hdu_flux.header.remove(k)
hdu_vel.header.remove(k)
hdu_vdisp.header.remove(k)
if unc_exist:
hdu_amp_mc.header.remove(k)
hdu_cent_mc.header.remove(k)
hdu_sig_mc.header.remove(k)
hdu_flux_err.header.remove(k)
hdu_vel_err.header.remove(k)
hdu_vdisp_err.header.remove(k)
hdu_cent.header['BUNIT'] = 'micron'
hdu_sig.header['BUNIT'] = 'micron'
hdu_flux.header['BUNIT'] = 'W m-2'
hdu_vel.header['BUNIT'] = 'km s-1'
hdu_vdisp.header['BUNIT'] = 'km s-1'
if unc_exist:
hdu_cent_mc.header['BUNIT'] = 'micron'
hdu_sig_mc.header['BUNIT'] = 'micron'
hdu_flux_err.header['BUNIT'] = 'W m-2'
hdu_vel_err.header['BUNIT'] = 'km s-1'
hdu_vdisp_err.header['BUNIT'] = 'km s-1'
gauss_list = fits.HDUList([hdu_amp, hdu_cent, hdu_sig])
gauss_list.writeto(savedir+l+'_gauss_params'+suffix+'.fits', overwrite=True)
if unc_exist:
gauss_mc_list = fits.HDUList([hdu_amp_mc, hdu_cent_mc, hdu_sig_mc])
gauss_mc_list.writeto(savedir+l+'_gauss_params_mc'+suffix+'.fits', overwrite=True)
line_list = fits.HDUList([hdu_flux, hdu_vel, hdu_vdisp, hdu_flux_err, hdu_vel_err, hdu_vdisp_err])
line_list.writeto(savedir+l+'_line_params'+suffix+'.fits', overwrite=True)
else:
line_list = fits.HDUList([hdu_flux, hdu_vel, hdu_vdisp])
line_list.writeto(savedir+l+'_line_params'+suffix+'.fits', overwrite=True)
return 0
def read_line_params(file):
"""
Function to read a FITS file containing the physical line params and return a
dictionary with the same structure as what is output of calc_line_params.
"""
hdu_list = fits.open(file)
line_params = {}
line_params['int_flux'] = hdu_list['int flux'].data*u.W/u.m**2/u.micron
line_params['velocity'] = hdu_list['velocity'].data*u.km/u.s
line_params['veldisp'] = hdu_list['velocity dispersion'].data*u.km/u.s
if len(hdu_list) == 6:
line_params['int_flux_err'] = hdu_list['int flux error'].data*u.W/u.m**2/u.micron
line_params['velocity_err'] = hdu_list['velocity error'].data*u.km/u.s
line_params['veldisp_err'] = hdu_list['velocity dispersion error'].data*u.km/u.s
return line_params
def read_fit_params(files, line_names):
"""
Function to read the FITS file(s) for the fit parameters of a spectral model.
"""
if type(line_names) == str:
line_names = [line_names]
files = [files]
fit_params = {n:{'amplitude':[], 'mean':[], 'sigma':[]} for n in line_names}
for i, f in enumerate(files):
hdu_list = fits.open(f)
fit_params[line_names[i]]['amplitude'] = hdu_list[0].data*u.W/u.m**2/u.micron
fit_params[line_names[i]]['mean'] = hdu_list[1].data*u.micron
fit_params[line_names[i]]['sigma'] = hdu_list[2].data*u.micron
return fit_params
def fitpa(vel, vel_err=None, xoff=None, yoff=None, mask=None):
"""
Function to fit the kinematic position angle using fit_kinematic_pa from Michele Cappelari
"""
from fit_kinematic_pa import fit_kinematic_pa
x, y = np.meshgrid(range(vel.shape[1]), range(vel.shape[0]))
if vel_err is None:
vel_err = vel*0 + 10.
if mask is None:
mask = np.isnan(vel)
xx = x.flatten()
yy = y.flatten()
vel_flat = vel.flatten()
vel_err_flat = vel_err.flatten()
mask_flat = mask.flatten()
xx = xx[~mask_flat]
yy = yy[~mask_flat]
vel_flat = vel_flat[~mask_flat]
vel_err_flat = vel_err_flat[~mask_flat]
if xoff is None:
xoff = vel.shape[0]/2
if yoff is None:
yoff = vel.shape[1]/2
xx = xx-xoff
yy = yy-yoff
angBest, angErr, vSyst = fit_kinematic_pa(xx, yy, vel_flat, dvel=vel_err_flat)
return angBest, angErr, vSyst
def find_cont_center(cube, lamrange, plot=False):
"""
Function to fit a 2D Gaussian to the image of a user-defined continuum
"""
slice = cube.spectral_slab(lamrange[0], lamrange[1])
int = slice.moment(0) # Zeroth moment is just the integral along the spectrum
img = int.value/np.nanmean(int.value)
xx, yy = np.meshgrid(range(img.shape[1]), range(img.shape[0]))
guess_x = img.shape[1]/2
guess_y = img.shape[0]/2
img_cut = img[guess_y-15:guess_y+15, guess_x-15:guess_x+15]
xx_cut = xx[guess_y-15:guess_y+15, guess_x-15:guess_x+15]
yy_cut = yy[guess_y-15:guess_y+15, guess_x-15:guess_x+15]
gauss_mod = apy_mod.models.Gaussian2D(x_mean=guess_x, y_mean=guess_y,
x_stddev=3.0, y_stddev=3.0)
fitter = apy_mod.fitting.LevMarLSQFitter()
best_fit = fitter(gauss_mod, xx_cut, yy_cut, img_cut)
center = [best_fit.x_mean.value, best_fit.y_mean.value]
if plot:
hdu = fits.PrimaryHDU(data=int.value, header=int.header)
fig = aplpy.FITSFigure(hdu)
fig.show_colorscale(cmap='cubehelix', stretch='linear')
ra, dec = fig.pixel2world(center[0]+1, center[1]+1)
fig.show_markers(ra, dec, marker='+', c='k', s=100, lw=1.0)
fig.show_colorbar()
fig.add_label(0.05, 0.95,
'Continuum = {0:0.3f} - {1:0.3f} micron'.format(lamrange[0].value, lamrange[1].value),
relative=True, color='r', size=14, horizontalalignment='left')
fig.add_label(0.05, 0.90,
'Pixel = [{0:0.2f},{1:0.2f}]'.format(center[0], center[1]),
relative=True, color='r', size=14, horizontalalignment='left')
fig.add_label(0.05, 0.85,
'RA, DEC = [{0:0.4f},{1:0.4f}]'.format(ra, dec),
relative=True, color='r', size=14, horizontalalignment='left')
return center, fig
else:
return center
def findpeaks(spec, lam, model, guess_region, line_centers):
"""
Function to find the peaks in a spectral region by smoothing with a Gaussian and
then using a peak finding algorithm. Then use the found peaks to adjust the initial
guesses for the model.
"""
# Make a copy of the model
mod = model.copy()
# Determine the number of lines being fit within the model
nlines = len(line_centers.keys())
# Smooth the spectrum with a 2 pixel wide Gaussian kernel to get rid of high frequency noise
gauss_kern = apy_conv.Gaussian1DKernel(2.0)
smoothed_spec = apy_conv.convolve(spec,gauss_kern)
# Find the peaks with peakutils
ind_peaks = peakutils.indexes(smoothed_spec[guess_region])
peak_waves = lam[guess_region][ind_peaks]
peak_flux = spec[guess_region][ind_peaks]
# Sort the peaks by flux and take the top N as estimates for the lines in the model
# Need to sort in descending order so I use the negative of the peak fluxes
ind_sort = np.argsort(-peak_flux)
peak_waves_sort = peak_waves[ind_sort]
peak_flux_sort = peak_flux[ind_sort]
if nlines > 1:
lc = np.array([line_centers[k].value for k in line_centers.keys()])
ln = np.array([k for k in line_centers.keys()])
# Sort the lines being fit by wavelength
ind_sort_lc = np.argsort(lc)
lc_sort = lc[ind_sort_lc]
ln_sort = ln[ind_sort_lc]
if len(ind_peaks) >= nlines:
peak_waves_use = peak_waves_sort[0:nlines]
peak_flux_use = peak_flux_sort[0:nlines]
# Now sort the useable peaks by wavelength and associate with the modeled lines
ind_sort_use = np.argsort(peak_waves_use)
peak_waves_use_sort = peak_waves_use[ind_sort_use]
peak_flux_use_sort = peak_flux_use[ind_sort_use]
for g, n in enumerate(ln_sort):
mod[n].mean = peak_waves_use_sort[g]
mod[n].amplitude = peak_flux_use_sort[g]
elif len(ind_peaks) > 0:
# Need to figure out which line each peak is associated with
# Use the difference between the peak wavelength and the line center
# Whichever line is closest to the peak is the one we'll assume the peak
# is associated with.
for l, w in enumerate(peak_waves_sort):
closest_line = ln[np.argmin(np.abs(lc-w))]
mod[closest_line].mean = w
mod[closest_line].amplitude = peak_flux_sort[l]
else:
# If there is only one line then just take the strongest peak
mod.mean = peak_waves_sort[0]
mod.amplitude = peak_flux_sort[0]
return mod
def calc_pixel_distance(header, center_coord):
"""Function to calculate the distance of each pixel from the central coordinate (center_coord).
header = astropy.io.fits Header object for the cube or image. Must contain valid WCS keywords
center_coord = astropy.coordinates.SkyCoord object with the RA and DEC of the position
that each pixel's distance from will be calculated.
"""
# Setup the arrays for the pixel X and Y positions
nx = header['NAXIS1']
ny = header['NAXIS2']
xx, yy = np.meshgrid(range(nx), range(ny))
# Setup the WCS object, remove the third axis if it exists
if header['NAXIS'] == 3:
header['NAXIS'] = 2
header['WCSAXES'] = 2
header.remove('NAXIS3')
header.remove('CRPIX3')
header.remove('CDELT3')
header.remove('CUNIT3')
header.remove('CTYPE3')
header.remove('CRVAL3')
dummy_wcs = WCS(header)
# Convert the pixel positions to RA and DEC
ras, decs = dummy_wcs.all_pix2world(xx, yy, 0)
world_coords = apy_coord.SkyCoord(ra=ras*u.deg, dec=decs*u.deg, frame='fk5')
# Calculate the separation
seps = center_coord.separation(world_coords).to(u.arcsec)
# Calculate the position angle using just the pixel position
# Need to convert the center position to pixels
centerx, centery = dummy_wcs.all_world2pix(center_coord.ra, center_coord.dec, 0)
dx = xx - centerx
dy = yy - centery
#ind1 = (dy > 0)
#ind2 = (dy < 0)
#ind3 = (dx > 0) & (dy > 0)
#pa = np.zeros(dx.shape)
pa = -np.arctan(dx/dy)*180./np.pi
#pa[ind2] = -np.arctan(dx[ind2]/dy[ind2])*180./np.pi
#pa[ind3] = 360.-np.arctan(dx[ind3]/dy[ind3])*180./np.pi
return seps, pa
def create_rgb_image(rfile, gfile, bfile, scale=1e21, stretch=12, Q=0.1,
sn_cut=3.0):
"""
Function to create an RGB image from 3 emission line flux maps.
Uses astropy.visualization.make_lupton_rgb to generate the RGB values.
Uses aplpy to create and show the figure.
Parameters
----------
rfile/gfile/bfile = strings for the FITS files containing the line maps corresponding
to each color
scale = float or 3 element array with values to multiply the integrated fluxes by.
stretch = float for the linear stretch of the image
Q = float for the asinh softening parameter
sn_cut = S/N threshold of each line, all pixels with S/N < sn_cut will be set to 0
For more information on stretch and Q see the documentation for make_lupton_rgb and
Lupton (2004).
Returns
-------
fig = aplpy.FITSFigure object
"""
from astropy.visualization import make_lupton_rgb
hdu_r = fits.open(rfile)
hdu_g = fits.open(gfile)
hdu_b = fits.open(bfile)
image_r = hdu_r['int flux'].data
image_r_err = hdu_r['int flux error'].data
image_g = hdu_g['int flux'].data
image_g_err = hdu_g['int flux error'].data
image_b = hdu_b['int flux'].data
image_b_err = hdu_b['int flux error'].data
ind_r = (image_r/image_r_err < sn_cut) | np.isnan(image_r)
ind_g = (image_g/image_g_err < sn_cut) | np.isnan(image_g)
ind_b = (image_b/image_b_err < sn_cut) | np.isnan(image_b)
image_r[ind_r] = 0
image_g[ind_g] = 0
image_b[ind_b] = 0
if np.isscalar(scale):
image_r = image_r*scale
image_g = image_g*scale
image_b = image_b*scale
else:
image_r = image_r*scale[0]
image_g = image_g*scale[1]
image_b = image_b*scale[2]
rgb = make_lupton_rgb(image_r, image_g, image_b, filename='rgb.png', Q=Q, stretch=stretch)
fig = aplpy.FITSFigure(hdu_r)
fig.show_rgb('rgb.png', interpolation='gaussian', vertical_flip=True)
return fig
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
5267,
1248,
1315,
25,
3980,
25,
3980,
1584,
198,
198,
31,
9800,
25,
256,
912,
38400,
528,
198,
37811,
198,
198,
11748,
299,
32152,
355,
... | 2.067194 | 20,612 |
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import os
import sys
import time
import polyaxon_sdk
from polyaxon_sdk.rest import ApiException
from urllib3.exceptions import HTTPError
from polyaxon import settings
from polyaxon.client import RunClient
from polyaxon.client.decorators import can_log_events, check_no_op, check_offline
from polyaxon.constants import UNKNOWN
from polyaxon.containers.contexts import (
CONTEXT_MOUNT_ARTIFACTS_FORMAT,
CONTEXT_MOUNT_RUN_OUTPUTS_FORMAT,
)
from polyaxon.env_vars.getters import (
get_collect_artifact,
get_collect_resources,
get_log_level,
)
from polyaxon.exceptions import PolyaxonClientException
from polyaxon.polyaxonfile import OperationSpecification
from polyaxon.polyboard.artifacts import V1ArtifactKind
from polyaxon.polyboard.events import LoggedEventSpec, V1Event, get_asset_path
from polyaxon.tracking.events import EventFileWriter, events_processors
from polyaxon.tracking.events.writer import ResourceFileWriter
from polyaxon.utils.env import get_run_env
from polyaxon.utils.path_utils import get_path_extension
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
15069,
2864,
12,
42334,
12280,
897,
261,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
... | 3.27112 | 509 |
"""Functions for saving and loading ProbFlow objects"""
import base64
import cloudpickle
__all__ = [
"dumps",
"loads",
"dump",
"load",
]
def dumps(obj):
"""Serialize a probflow object to a json-safe string.
Note
----
This removes the compiled ``_train_fn`` attribute of a |Model| which is
either a |TensorFlow| or |PyTorch| compiled function to perform a single
training step. Cloudpickle can't serialize it, and after de-serializing
will just JIT re-compile if needed.
"""
if hasattr(obj, "_train_fn"):
delattr(obj, "_train_fn")
return base64.b64encode(cloudpickle.dumps(obj)).decode("utf8")
def loads(s):
"""Deserialize a probflow object from string"""
return cloudpickle.loads(base64.b64decode(s.encode("utf8")))
def dump(obj, filename):
"""Serialize a probflow object to file
Note
----
This removes the compiled ``_train_fn`` attribute of a |Model| which is
either a |TensorFlow| or |PyTorch| compiled function to perform a single
training step. Cloudpickle can't serialize it, and after de-serializing
will just JIT re-compile if needed.
"""
if hasattr(obj, "_train_fn"):
delattr(obj, "_train_fn")
with open(filename, "wb") as f:
cloudpickle.dump(obj, f)
def load(filename):
"""Deserialize a probflow object from file"""
with open(filename, "rb") as f:
return cloudpickle.load(f)
| [
37811,
24629,
2733,
329,
8914,
290,
11046,
30873,
37535,
220,
5563,
37811,
198,
198,
11748,
2779,
2414,
198,
198,
11748,
6279,
27729,
293,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
366,
67,
8142,
1600,
198,
220,
220,
220,... | 2.685874 | 538 |
#!/usr/bin/env python3
import socket
HOST = '' # Use default address
PORT = 42424 # Port to listen on (non-privileged ports are > 1023)
Number = -1
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
print ("Starting server")
while True:
try:
conn, addr = s.accept()
with conn:
print('Connected by', addr)
Number += 1
conn.settimeout(1)
with open("memory" + ("" if Number == 0 else str(Number)) + ".bin", "wb") as file:
while True:
data = conn.recv(4096)
if not data:
break
file.write(data)
except socket.timeout:
print('Connection timed out') | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
17802,
198,
198,
39,
10892,
796,
10148,
220,
220,
220,
220,
220,
220,
1303,
5765,
4277,
2209,
198,
15490,
796,
48252,
1731,
220,
220,
220,
1303,
4347,
284,
6004,
319,
... | 1.889135 | 451 |
from pbxproj.pbxsections import *
from pbxproj.pbxextensions import FileOptions
import plistlib
| [
6738,
279,
65,
87,
1676,
73,
13,
40842,
87,
23946,
1330,
1635,
198,
6738,
279,
65,
87,
1676,
73,
13,
40842,
87,
2302,
5736,
1330,
9220,
29046,
198,
11748,
458,
396,
8019,
628,
198,
197,
197,
198
] | 2.72973 | 37 |
# Problem Statement: https://www.hackerrank.com/challenges/zipped/problem
N, X = map(int, input().split())
l = [map(float, input().split()) for _ in range(X)]
for scores in zip(*l):
print(sum(scores)/X) | [
2,
20647,
21983,
25,
3740,
1378,
2503,
13,
31153,
8056,
962,
13,
785,
14,
36747,
34120,
14,
89,
3949,
14,
45573,
198,
198,
45,
11,
1395,
796,
3975,
7,
600,
11,
5128,
22446,
35312,
28955,
198,
75,
796,
685,
8899,
7,
22468,
11,
5128... | 2.77027 | 74 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: versioner.py
# Author: Anicka Burova <anicka.burova@gmail.com>
# Date: 13.07.2017
# Last Modified Date: 03.08.2017
# Last Modified By: Anicka Burova <anicka.burova@gmail.com>
#
# versioner.py
# Copyright (c) 2017 Anicka Burova <anicka.burova@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
from optparse import OptionParser
import os
import re
usage = "usage: %prog [options] file"
version = "0.5.0.0"
version_text = "%prog {}".format(version)
opt = OptionParser(usage = usage, version = version_text)
opt.add_option ("-l","--language"
,action = "store"
,dest = "language", default = 0
,help = "manualy select the language")
opt.add_option ("-s","--show"
,action = "store_true"
,dest = "show", default = False
,help = "show the current version of the file")
opt.add_option ("","--major"
,action = "store_true"
,dest = "major", default = False
,help = "upgrade major version")
opt.add_option ("","--minor"
,action = "store_true"
,dest = "minor", default = False
,help = "upgrade minor version")
opt.add_option ("","--maintenance","--main"
,action = "store_true"
,dest = "maintenance", default = False
,help = "upgrade maintenance version")
opt.add_option ("","--build"
,action = "store_true"
,dest = "build", default = False
,help = "upgrade build version")
opt.add_option ("-e","--no-error"
,action = "store_true"
,dest = "no_error", default = False
,help = "no version is not considered as error")
opt.add_option ("-v","--version-only"
,action = "store_true"
,dest = "version_only", default = False
,help = "if showing, show only the current version")
(options, args) = opt.parse_args()
try:
options.file_path = args[0]
except:
# try .versionrc file
try:
with open(".versionrc", "r") as f:
m = re.compile("MAIN_VERSION_FILE=(.*)").match(f.read())
if m:
options.file_path = m.group(1)
else:
raise "no file path"
except:
sys.stderr.write("No input file!\n")
exit(2)
if not os.path.isfile(options.file_path):
sys.stderr.write("{} not exists!\n".format(options.file_path))
exit(3)
if options.language:
lan = Language.parse(options.language)
if lan == Language.Unknown:
sys.stderr.write("Incorrect language, available languages: {}\n".format(Language.languages()))
exit(1)
options.language = lan
else:
if options.file_path == "Cargo.toml":
options.language = Language.Rust
elif options.file_path == "package.yaml":
options.language = Language.Haskell
elif options.file_path == "engine.cfg":
options.language = Language.Godot
elif options.file_path == "project.godot":
options.language = Language.Godot3
elif options.file_path == "setup.py":
options.language = Language.PythonSetup
else:
_, ext = os.path.splitext(options.file_path)
exts = {
".py" : Language.Python,
".cabal" : Language.Haskell,
".hpp" : Language.Cpp,
".cpp" : Language.Cpp,
".go" : Language.Go,
}
options.language = exts.get(ext, Language.Unknown)
if options.language == Language.Unknown:
sys.stderr.write("Unknown language, cannot parse the file\n")
if options.no_error:
exit(0)
exit(4)
program_version_re = {
Language.Python : re.compile("version\s*=\s*\"(\d+)\.(\d+)\.(\d+).(\d+)\""),
Language.PythonSetup: re.compile("\s*version\s*=\s*\"(\d+)\.(\d+)\.(\d+).(\d+)\"\s*,\s*"),
Language.Godot : re.compile("version\s*=\s*\"(\d+)\.(\d+)\.(\d+).(\d+)\""),
Language.Godot3 : re.compile("config/Version\s*=\s*\"(\d+)\.(\d+)\.(\d+).(\d+)\""),
Language.Cpp : re.compile("string\s+version\s*=\s*\"(\d+)\.(\d+)\.(\d+).(\d+)\""),
Language.Haskell : re.compile("version\s*:\s*(\d+)\.(\d+)\.(\d+).(\d+)"),
Language.Rust : re.compile("version\s*=\s*\"(\d+)\.(\d+)\.(\d+)\""),
Language.Go : re.compile("const PackageVersion\s*=\s*\"(\d+)\.(\d+)\.(\d+).(\d+)\""),
}
program_version_update = {
Language.Python : "version = \"{}.{}.{}.{}\"",
Language.PythonSetup: "version = \"{}.{}.{}.{}\",",
Language.Go : "const PackageVersion = \"{}.{}.{}.{}\"",
Language.Godot : "config/Version=\"{}.{}.{}.{}\"",
Language.Cpp : "string version = \"{}.{}.{}.{}\"",
Language.Haskell : "version: {}.{}.{}.{}",
Language.Rust : "version = \"{}.{}.{}\"",
}
current_version = get_version(options)
old_version = current_version
if not current_version:
if options.no_error:
exit(0)
else:
exit(10)
upgraded = False
if options.major:
t,m,_,_,_ = current_version
current_version = (t, m + 1, 0, 0, 0)
upgraded = True
if options.minor:
t,m,n,_,_ = current_version
current_version = (t, m , n + 1, 0, 0)
upgraded = True
if options.maintenance:
t,m,n,a,_ = current_version
current_version = (t, m , n, a + 1, 0)
upgraded = True
if options.build:
t,m,n,a,b = current_version
current_version = (t, m , n, a, b + 1)
upgraded = True
if options.show:
_,m,n,a,b = current_version
_,om,on,oa,ob = old_version
if options.version_only:
sys.stdout.write("{}.{}.{}.{}\n".format(m,n,a,b))
else:
if upgraded:
sys.stdout.write("Version has been upgraded from '{}.{}.{}.{}' to '{}.{}.{}.{}'\n".format(om,on,oa,ob,m,n,a,b))
else:
sys.stdout.write("Current version is '{}.{}.{}.{}'\n".format(m,n,a,b))
exit(0)
orig, major, minor, maintenance, build = current_version
updated = program_version_update[options.language].format(major, minor, maintenance, build)
text = None
with open(options.file_path,"r") as f:
text = f.read()
text = text.replace(orig, updated)
if upgraded:
_,m,n,a,b = current_version
_,om,on,oa,ob = old_version
with open(".version_upgrade", "w") as f:
f.write("OLD_VERSION={}.{}.{}.{}\n".format(om,on,oa,ob))
f.write("NEW_VERSION={}.{}.{}.{}\n".format(m,n,a,b))
sys.stderr.write("Version has been upgraded from '{}.{}.{}.{}' to '{}.{}.{}.{}'\n".format(om,on,oa,ob,m,n,a,b))
sys.stdout.write(text)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
9220,
25,
2196,
263,
13,
9078,
198,
2,
6434,
25,
1052,
29873,
347,
1434,
6862,
1279,
272,
29873,
13,
65,
1434,
6... | 2.266706 | 3,382 |
print(howSum(7, [2,3]))
print(howSum(7, [5,3,4,7]))
print(howSum(7, [2,4]))
print(howSum(8, [2,3,5]))
print(howSum(300, [7,14])) | [
198,
4798,
7,
4919,
13065,
7,
22,
11,
685,
17,
11,
18,
60,
4008,
198,
4798,
7,
4919,
13065,
7,
22,
11,
685,
20,
11,
18,
11,
19,
11,
22,
60,
4008,
198,
4798,
7,
4919,
13065,
7,
22,
11,
685,
17,
11,
19,
60,
4008,
198,
4798,
... | 1.697368 | 76 |
from dash.dependencies import Input, Output
from app.models import EventFrame, LookupValue
from app.dashes.components import collapseExpand, elementTemplateDropdown, enterpriseDropdown, eventFrameAttributeTemplatesDropdown, eventFramesDropdown, \
eventFrameTemplateDropdown, refreshInterval, siteDropdown
| [
6738,
14470,
13,
45841,
3976,
1330,
23412,
11,
25235,
198,
6738,
598,
13,
27530,
1330,
8558,
19778,
11,
6803,
929,
11395,
198,
6738,
598,
13,
67,
7465,
13,
5589,
3906,
1330,
9807,
16870,
392,
11,
5002,
30800,
26932,
2902,
11,
13953,
2... | 4.065789 | 76 |
#!/usr/bin/env python
import click
import configparser
import json
import requests
from datetime import datetime, timedelta
from dateutil import parser
@click.command()
@click.option('-w', '--workspace', required=True, help='Workspace alias to look up URL and and access key pair from config e.g. acme.')
@click.option('-s', '--start-date', required=True, help='Start date of usage period in ISO format YYYYMMDD.')
@click.option('-e', '--end-date', required=True, help='End date of usage period in ISO format YYYYMMDD.')
def get_workspace_usage(workspace, start_date, end_date):
"""This program gets workspace usage for a given time period"""
print('Please wait. This may take some time...\n')
config = configparser.ConfigParser()
config.read("workspaces.conf")
workspace_config = config[workspace]
# Set GraphQL API Params
turbot_workspace_url = workspace_config.get('workspaceURL')
turbot_access_key = workspace_config.get('accessKey')
turbot_secret = workspace_config.get('secret')
turbot_workspace_certificate_verification = workspace_config.getboolean('verifySSL', fallback=True)
if not turbot_workspace_certificate_verification:
requests.packages.urllib3.disable_warnings()
api_url = "{}/api/v5/graphql".format(turbot_workspace_url)
query = '''
query ControlUsages($filter: [String!]!, $paging: String) {
controlUsagesByControlType(filter: $filter, paging: $paging) {
items {
type {
trunk {
title
}
uri
}
summary {
activeControlDays
inactiveControls
states {
alarm
invalid
error
ok
tbd
skipped
}
}
}
paging {
next
}
}
}
'''
date_range_to_query = get_date_range(start_date, end_date)
active_control_days_non_turbot = 0
active_control_days_turbot = 0
usage_output_file = '{}_{}_{}_control_usage_breakdown.csv'.format(workspace, start_date, end_date)
with open(usage_output_file, 'w') as breakdown_outfile:
breakdown_outfile.write('Period,Title,Control Type URI,ActiveControlDays,InactiveControls,Alarm,Error,Invalid,OK,Skipped,TBD\n')
for date_to_query in date_range_to_query:
print("Querying data for {}".format(parser.parse(date_to_query).strftime('%Y-%m-%d')))
has_more = True
next_token = None
workspace_usage_items = []
while has_more:
# Build variables with paging token from previous run
query_timestamp = parser.parse(date_to_query).strftime('%Y%m%d2359')
variables = {'filter': 'timestamp:{} limit:300'.format(query_timestamp), 'paging': next_token}
# Fetch raw data
response = requests.post(
api_url,
auth=(turbot_access_key, turbot_secret),
verify=turbot_workspace_certificate_verification,
headers={
'content-type': "application/json",
'cache-control': "no-cache"
},
json={'query': query, 'variables': variables}
)
if not response.ok:
print(response.text)
response.raise_for_status()
# Convert the response JSON into a Python object
raw_workspace_usage = json.loads(response.text)
response.close()
next_token = raw_workspace_usage['data']['controlUsagesByControlType']['paging']['next']
has_more = True if next_token else False
# Concatenate results into one
raw_workspace_usage_items = raw_workspace_usage['data']['controlUsagesByControlType']['items']
workspace_usage_items = workspace_usage_items + raw_workspace_usage_items
# Sort by title (we'd like to sort by the control trunk title, but we're getting a null trunk
# if the control type has been deleted - so URI will do for now)
workspace_usage_items.sort(key=lambda x: x['type']['uri'])
# Initialise stats
active_turbot_control_days_for_period = 0
active_non_turbot_control_days_for_period = 0
inactive_turbot_control_days_for_period = 0
inactive_non_turbot_control_days_for_period = 0
turbot_control_usage_by_uri = {}
non_turbot_control_usage_by_uri = {}
for workspace_usage_item in workspace_usage_items:
workspace_usage_summary = workspace_usage_item['summary']
active_control_days_total = workspace_usage_summary['activeControlDays']
inactive_controls_total = workspace_usage_summary['inactiveControls']
states_summary = workspace_usage_summary['states']
alarm_total = states_summary['alarm']
error_total = states_summary['error']
invalid_total = states_summary['invalid']
ok_total = states_summary['ok']
skipped_total = states_summary['skipped']
tbd_total = states_summary['tbd']
control_type_uri = workspace_usage_item['type']['uri']
mod_title = workspace_usage_item['type']['trunk']['title'] if workspace_usage_item['type']['trunk'] else workspace_usage_item['type']['uri']
is_turbot_mod = 'tmod:@turbot/turbot' in control_type_uri
totals = {
'activeControlDays': active_control_days_total,
'inactiveControls': inactive_controls_total,
'alarm': alarm_total,
'error': error_total,
'invalid': invalid_total,
'ok': ok_total,
'skipped': skipped_total,
'tbd': tbd_total,
}
# Increment active/inactive totals
if is_turbot_mod:
active_turbot_control_days_for_period += active_control_days_total
inactive_turbot_control_days_for_period += inactive_controls_total
turbot_control_usage_by_uri[control_type_uri] = {
'title': mod_title,
'summary': totals
}
else:
active_non_turbot_control_days_for_period += active_control_days_total
inactive_non_turbot_control_days_for_period += inactive_controls_total
non_turbot_control_usage_by_uri[control_type_uri] = {
'title': mod_title,
'summary': totals
}
breakdown_outfile.write('{},{},{},{},{},{},{},{},{},{},{}\n'.format(date_to_query, mod_title, control_type_uri, active_control_days_total, inactive_controls_total, alarm_total, error_total, invalid_total, ok_total, skipped_total, tbd_total))
active_control_days_non_turbot += active_non_turbot_control_days_for_period
active_control_days_turbot += active_turbot_control_days_for_period
summary_output_file = '{}_{}_{}_control_usage_summary.txt'.format(workspace, start_date, end_date)
with open(summary_output_file, 'w') as summary_outfile:
padded_active_control_days_non_turbot = str(active_control_days_non_turbot).rjust(7, ' ')
padded_active_control_days_turbot = str(active_control_days_turbot).rjust(7, ' ')
summary_outfile.write('Active Non-Turbot Control Days (billable)\n')
summary_outfile.write('-------------------\n')
summary_outfile.write('{}\n\n'.format(padded_active_control_days_non_turbot))
summary_outfile.write('Active Turbot Control Days\n')
summary_outfile.write('---------------\n')
summary_outfile.write('{}\n'.format(padded_active_control_days_turbot))
print('')
print('Active Non-Turbot Control Days (billable)')
print('-----------------------')
print(padded_active_control_days_non_turbot)
print('')
print('')
print('Active Turbot Control Days')
print('-------------------')
print(padded_active_control_days_turbot)
print('')
if __name__ == '__main__':
get_workspace_usage() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
3904,
198,
11748,
4566,
48610,
198,
11748,
33918,
198,
11748,
7007,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
3128,
22602,
1330,
30751,
198,
198,
31,
1... | 2.165034 | 3,878 |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 31 16:24:19 2020
@author: lucas Rocini
"""
#modelo sequencial
from keras.models import Sequential
#camada de convolução, pooling, flatten e rede neural densa
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
#normalização das características e acelerar o desempenho da rede neural
from keras.layers.normalization import BatchNormalization
#gerar imagens adicionais
from keras.preprocessing.image import ImageDataGenerator
#numpy
import numpy as np
#trabalhar com imagens (para analisar)
from keras.preprocessing import image
"""
#######################
# CONVOLUÇÃO E PROCESSAMENTO DE IMAGEM
#######################
"""
#definição da rede neural
model = Sequential()
#OPERADOR DE CONVOLUÇÃO
#adicionar primeira camada de convolução
#32 = número de feature maps
# (3,3) = filtro da convolução
#input_shape = converte a dimensão da imagem, e canais RBG
#relu = função de ativação, retira os valores negativos, partes mais escuras da imagem
model.add(Conv2D(32, (3,3), input_shape = (64,64,3), activation = 'relu'))
#NORMALIZATION
#pega o mapa de características gerado pelo kernel, e normaliza para entre 0 e 1
#acelera processamento
model.add(BatchNormalization())
#POOLING
#camada de pooling, matriz de 4px para pegar o maior valor (características mais importantes)
model.add(MaxPooling2D(pool_size = (2,2)))
#2ª camada de convolução
model.add(Conv2D(32, (3,3), input_shape = (64,64,3), activation = 'relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size = (2,2)))
#FLATTENING
#flatten = transforma a matriz em um vetor para passar como entrada da rede neural
model.add(Flatten())
"""
#######################
# REDE NEURAL
#######################
"""
#cria primeira camada da rede neural
#units = quantidade de neurônios
model.add(Dense(units = 128, activation='relu'))
#dropout
#0.2 = vai zerar 20% das entradas
model.add(Dropout(0.2))
#camada oculta
model.add(Dense(units = 128, activation='relu'))
model.add(Dropout(0.2))
#camada de saída
#1 = uma saída somente, pois a classificação é binária (gato OU cachorro)
model.add(Dense(units = 1, activation='sigmoid'))
model.compile(optimizer = 'adam', loss = 'binary_crossentropy',
metrics = ['accuracy'])
"""
#######################
# DATA AUGMENTATION
#######################
"""
#rescale = normaliza os dados
#rotation_range = o grau que será feita uma rotação na imagem
#horizontal_flip = realizar giros horizontais nas imagens
#shear_range = muda os pixels para outra direção
#height_shift_range = realiza a faixa de mudança de altura
augmentation_imagens_treinamento = ImageDataGenerator(rescale = 1./255,
rotation_range = 7,
horizontal_flip = True,
shear_range = 0.2,
height_shift_range = 0.07,
zoom_range = 0.2)
#rescale feito para que os dados de teste estejam na mesma escala
augmentation_imagens_teste = ImageDataGenerator(rescale = 1./255,
rotation_range = 7,
horizontal_flip = True,
shear_range = 0.2,
height_shift_range = 0.07,
zoom_range = 0.2)
#
#carrega imagens do diretório e gera imagens com o data augmentation
#target_size = escalona as imagens para o tamanho
#class_mode = quantidade de classes à serem lidas
dataset_imagens_treinamento = augmentation_imagens_treinamento.flow_from_directory('dataset/training_set',
target_size = (64,64),
batch_size = 32,
class_mode = 'binary'
)
dataset_imagens_teste = augmentation_imagens_teste.flow_from_directory('dataset/test_set',
target_size = (64,64),
batch_size = 32,
class_mode = 'binary'
)
"""
#######################
# TREINAMENTO
#######################
"""
#steps_per_epoch = quantidade de imagens à serem utilizadas, quanto maior melhor
#recomenda-se usar a quantidade de imagens e dividir pelo batch_size, para nao dar overflow
model.fit_generator(dataset_imagens_treinamento,
steps_per_epoch= 320/32,
epochs = 20,
validation_data = dataset_imagens_teste,
validation_steps = 110/32
)
"""
###########################
# PREVISÃO INDIVIDUAL
###########################
"""
#carrega imagem
imagem_teste = image.load_img('dataset/test_set/PotesGarrafas/PoteGarrafa10000.jpg', target_size=(64,64))
#transforma imagem em array
imagem_teste = image.img_to_array(imagem_teste)
#normalizacao
imagem_teste /= 255
#expande as dimensões do vetor
#insere
imagem_teste = np.expand_dims(imagem_teste, axis = 0)
previsao = model.predict(imagem_teste)
print("Previsão:",previsao)
print(previsao*100,"% Maquinario")
print((100-(previsao*100)),"% PotesGarrafas")
if (previsao > 0.5):
print("Provavelmente Maquinario")
else:
print ("Provavelmente PotesGarrafas")
#exibe as classes e os índices utilizados
#print(dataset_imagens_treinamento.class_indices)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
7031,
2556,
3261,
1467,
25,
1731,
25,
1129,
12131,
201,
198,
201,
198,
31,
9800,
25,
17115,
292,
13545,
5362,
201,
198,
37811,
201,
1... | 1.947064 | 3,117 |
#
# @file TestKineticLaw.py
# @brief SBML KineticLaw unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestKineticLaw.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| [
2,
198,
2,
2488,
7753,
220,
220,
220,
6208,
49681,
5139,
16966,
13,
9078,
198,
2,
2488,
65,
3796,
220,
220,
18056,
5805,
16645,
5139,
16966,
4326,
5254,
198,
2,
198,
2,
2488,
9800,
220,
48663,
3972,
45538,
17716,
84,
357,
37906,
113... | 3.681604 | 424 |
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
from numpy import argmax, array, asarray, average
# ============= local library imports ==========================
from ararpy import ALPHAS
from ararpy.stats import calculate_mswd, validate_mswd, calculate_weighted_mean
def calculate_plateau_age(ages, errors, k39, kind='inverse_variance', method='fleck 1977', options=None):
"""
ages: list of ages
errors: list of corresponding 1sigma errors
k39: list of 39ArK signals
return age, error
"""
# print 'ages=array({})'.format(ages)
# print 'errors=array({})'.format(errors)
# print 'k39=array({})'.format(k39)
if options is None:
options = {}
ages = asarray(ages)
errors = asarray(errors)
k39 = asarray(k39)
force_steps = options.get('force_steps', False)
if force_steps:
sstep, estep = force_steps
sstep, estep = sstep.upper(), estep.upper()
if not sstep:
sidx = 0
else:
sidx = ALPHAS.index(sstep)
n = ages.shape[0] - 1
if not estep:
eidx = n
else:
eidx = ALPHAS.index(estep)
sidx, eidx = min(sidx, eidx), min(max(sidx, eidx), n)
pidx = (sidx, eidx) if sidx < n else None
else:
p = Plateau(ages=ages,
errors=errors,
signals=k39,
nsteps=options.get('nsteps', 3),
gas_fraction=options.get('gas_fraction', 50))
pidx = p.find_plateaus(method)
if pidx:
sx = slice(pidx[0], pidx[1] + 1)
plateau_ages = ages[sx]
if kind == 'vol_fraction':
weights = k39[sx]
wm, we = average(plateau_ages, weights=weights)
else:
plateau_errors = errors[sx]
wm, we = calculate_weighted_mean(plateau_ages, plateau_errors)
return wm, we, pidx
# ============= EOF =============================================
| [
2,
38093,
25609,
855,
198,
2,
15069,
1946,
14757,
9847,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 2.584559 | 1,088 |
"""
File: blur.py
-------------------------------
This file shows the original image(smiley-face.png)
first, and then its blurred image. The blur algorithm
uses the average RGB values of a pixel's nearest neighbors.
"""
from simpleimage import SimpleImage
def blur(img):
"""
This function will blur the original image
---------------------------------------------
:param img: SimpleImage, the original image
:return: SimpleImage, the blurred image
"""
new_img = SimpleImage.blank(img.width, img.height)
for x in range(img.width):
for y in range(img.height):
img_p = img.get_pixel(x, y)
new_img_p = new_img.get_pixel(x, y)
if x == 0 and y == 0:
# The pixel at the upper left corner
img_p1 = img.get_pixel(x, y + 1)
img_p2 = img.get_pixel(x + 1, y)
img_p3 = img.get_pixel(x + 1, y + 1)
new_img_p.red = (img_p.red + img_p1.red + img_p2.red + img_p3.red) // 4
new_img_p.green = (img_p.green + img_p1.green + img_p2.green + img_p3.green) // 4
new_img_p.blue = (img_p.blue + img_p1.blue + img_p2.blue + img_p3.blue) // 4
elif x == 0 and y == new_img.height - 1:
# The pixel at the bottom left corner
img_p1 = img.get_pixel(x, y - 1)
img_p2 = img.get_pixel(x + 1, y)
img_p3 = img.get_pixel(x + 1, y - 1)
new_img_p.red = (img_p.red + img_p1.red + img_p2.red + img_p3.red) // 4
new_img_p.green = (img_p.green + img_p1.green + img_p2.green + img_p3.green) // 4
new_img_p.blue = (img_p.blue + img_p1.blue + img_p2.blue + img_p3.blue) // 4
elif x == new_img.width - 1 and y == 0:
# The pixel at the upper right corner
img_p1 = img.get_pixel(x, y + 1)
img_p2 = img.get_pixel(x - 1, y)
img_p3 = img.get_pixel(x - 1, y + 1)
new_img_p.red = (img_p.red + img_p1.red + img_p2.red + img_p3.red) // 4
new_img_p.green = (img_p.green + img_p1.green + img_p2.green + img_p3.green) // 4
new_img_p.blue = (img_p.blue + img_p1.blue + img_p2.blue + img_p3.blue) // 4
elif x == new_img.width - 1 and y == new_img.height - 1:
# The pixel at the bottom right corner
img_p1 = img.get_pixel(x, y - 1)
img_p2 = img.get_pixel(x - 1, y)
img_p3 = img.get_pixel(x - 1, y - 1)
new_img_p.red = (img_p.red + img_p1.red + img_p2.red + img_p3.red) // 4
new_img_p.green = (img_p.green + img_p1.green + img_p2.green + img_p3.green) // 4
new_img_p.blue = (img_p.blue + img_p1.blue + img_p2.blue + img_p3.blue) // 4
elif x == 0 and y < new_img.height - 1:
# Pixels on the left edge
img_p1 = img.get_pixel(x, y - 1)
img_p2 = img.get_pixel(x, y + 1)
img_p3 = img.get_pixel(x + 1, y)
img_p4 = img.get_pixel(x + 1, y - 1)
img_p5 = img.get_pixel(x + 1, y + 1)
new_img_p.red = (img_p.red + img_p1.red + img_p2.red + img_p3.red + img_p4.red + img_p5.red) // 6
new_img_p.green = (img_p.green + img_p1.green + img_p2.green + img_p3.green + img_p4.green
+ img_p5.green) // 6
new_img_p.blue = (img_p.blue + img_p1.blue + img_p2.blue + img_p3.blue + img_p4.blue + img_p5.blue) // 6
elif x == new_img.width - 1 and y < new_img.height - 1:
# Pixels on the right edge
img_p1 = img.get_pixel(x, y - 1)
img_p2 = img.get_pixel(x, y + 1)
img_p3 = img.get_pixel(x - 1, y)
img_p4 = img.get_pixel(x - 1, y - 1)
img_p5 = img.get_pixel(x - 1, y + 1)
new_img_p.red = (img_p.red + img_p1.red + img_p2.red + img_p3.red + img_p4.red + img_p5.red) // 6
new_img_p.green = (img_p.green + img_p1.green + img_p2.green + img_p3.green + img_p4.green
+ img_p5.green) // 6
new_img_p.blue = (img_p.blue + img_p1.blue + img_p2.blue + img_p3.blue + img_p4.blue + img_p5.blue) // 6
elif x < new_img.width - 1 and y == 0:
# Pixels on the upper edge
img_p1 = img.get_pixel(x, y + 1)
img_p2 = img.get_pixel(x - 1, y)
img_p3 = img.get_pixel(x - 1, y + 1)
img_p4 = img.get_pixel(x + 1, y)
img_p5 = img.get_pixel(x + 1, y + 1)
new_img_p.red = (img_p.red + img_p1.red + img_p2.red + img_p3.red + img_p4.red + img_p5.red) // 6
new_img_p.green = (img_p.green + img_p1.green + img_p2.green + img_p3.green + img_p4.green
+ img_p5.green) // 6
new_img_p.blue = (img_p.blue + img_p1.blue + img_p2.blue + img_p3.blue + img_p4.blue + img_p5.blue) // 6
elif x < new_img.width - 1 and y == new_img.height - 1:
# Pixels on the bottom edge
img_p1 = img.get_pixel(x, y - 1)
img_p2 = img.get_pixel(x - 1, y)
img_p3 = img.get_pixel(x - 1, y - 1)
img_p4 = img.get_pixel(x + 1, y)
img_p5 = img.get_pixel(x + 1, y - 1)
new_img_p.red = (img_p.red + img_p1.red + img_p2.red + img_p3.red + img_p4.red + img_p5.red) // 6
new_img_p.green = (img_p.green + img_p1.green + img_p2.green + img_p3.green + img_p4.green
+ img_p5.green) // 6
new_img_p.blue = (img_p.blue + img_p1.blue + img_p2.blue + img_p3.blue + img_p4.blue + img_p5.blue) // 6
else:
# Pixels in the middle
img_p1 = img.get_pixel(x, y - 1)
img_p2 = img.get_pixel(x, y + 1)
img_p3 = img.get_pixel(x - 1, y)
img_p4 = img.get_pixel(x - 1, y - 1)
img_p5 = img.get_pixel(x - 1, y + 1)
img_p6 = img.get_pixel(x + 1, y)
img_p7 = img.get_pixel(x + 1, y - 1)
img_p8 = img.get_pixel(x + 1, y + 1)
new_img_p.red = (img_p.red + img_p1.red + img_p2.red + img_p3.red + img_p4.red + img_p5.red +
img_p6.red + img_p7.red + img_p8.red) // 9
new_img_p.green = (img_p.green + img_p1.green + img_p2.green + img_p3.green + img_p4.green +
img_p5.green + img_p6.green + img_p7.green + img_p8.green) // 9
new_img_p.blue = (img_p.blue + img_p1.blue + img_p2.blue + img_p3.blue + img_p4.blue + img_p5.blue +
img_p6.blue + img_p7.blue + img_p8.blue) // 9
return new_img
def main():
"""
This program will show the original image and the blurred image
"""
old_img = SimpleImage("images/smiley-face.png")
old_img.show()
blurred_img = blur(old_img)
for i in range(5):
blurred_img = blur(blurred_img)
blurred_img.show()
if __name__ == '__main__':
main()
| [
37811,
198,
8979,
25,
23671,
13,
9078,
198,
1783,
24305,
198,
1212,
2393,
2523,
262,
2656,
2939,
7,
5796,
9618,
12,
2550,
13,
11134,
8,
198,
11085,
11,
290,
788,
663,
38258,
2939,
13,
383,
23671,
11862,
198,
2664,
262,
2811,
25228,
... | 1.745967 | 4,153 |
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for bilinear sampling.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#import tensorflow as tf
import tensorflow.compat.v1 as tf
import tensorflow_addons as tfa
def bilinear_wrapper(imgs, coords):
"""Wrapper around bilinear sampling function, handles arbitrary input sizes.
Args:
imgs: [..., H_s, W_s, C] images to resample
coords: [..., H_t, W_t, 2], source pixel locations from which to copy
Returns:
[..., H_t, W_t, C] images after bilinear sampling from input.
"""
# The bilinear sampling code only handles 4D input, so we'll need to reshape.
init_dims = imgs.get_shape().as_list()[:-3:]
end_dims_img = imgs.get_shape().as_list()[-3::]
end_dims_coords = coords.get_shape().as_list()[-3::]
prod_init_dims = init_dims[0]
for ix in range(1, len(init_dims)):
prod_init_dims *= init_dims[ix]
imgs = tf.reshape(imgs, [prod_init_dims] + end_dims_img)
coords = tf.reshape(
coords, [prod_init_dims] + end_dims_coords)
imgs_sampled = tfa.image.resampler(imgs, coords)
imgs_sampled = tf.reshape(
imgs_sampled, init_dims + imgs_sampled.get_shape().as_list()[-3::])
return imgs_sampled
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
15069,
2864,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,... | 2.806502 | 646 |
import os
import tensorflow as tf
import numpy as np
import joblib
file_url = "https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt"
file_name = os.path.join(os.getcwd(), "shakespeare.txt")
tf.keras.utils.get_file(file_name, file_url)
with open(file_name, 'r') as file:
text = file.read()
# creates letter id per distinct letter in text
tokenizer = tf.keras.preprocessing.text.Tokenizer(char_level=True)
tokenizer.fit_on_texts(text)
max_depth = len(tokenizer.word_index) # number of distinct letters
dataset_size = tokenizer.document_count # number of total letters
train_size = dataset_size * 9 // 10
[encoded] = np.array(tokenizer.texts_to_sequences([text])) - 1
# since Char-RNN model is stateful a sequence in a batch
# should exactly start where the previous batch's sequence which has same index number left off
batch_size = 32
n_steps = 200
window_length = n_steps + 1
encoded_parts = np.array_split(encoded[:train_size], batch_size)
datasets = list()
for part in encoded_parts:
dataset = tf.data.Dataset.from_tensor_slices(part)
dataset = dataset.window(size=window_length, shift=n_steps, drop_remainder=True)
# since window method of dataset returns a new dataset per window
# the dataset object becomes a dataset of datasets
# the inner datasets should be converted to regular tensors
# following code line does exactly this
dataset = dataset.flat_map(lambda window: window.batch(window_length))
datasets.append(dataset)
dataset = tf.data.Dataset.zip(tuple(datasets)).map(lambda *windows: tf.stack(windows))
dataset = dataset.map(lambda windows: (windows[:, :-1], windows[:, 1:])) # creates labels
dataset = dataset.map(lambda X_batch, y_batch: (tf.one_hot(X_batch, depth=max_depth), y_batch))
dataset = dataset.prefetch(1)
model = tf.keras.Sequential()
# creation of stateful Char-RNN model
model.add(tf.keras.layers.GRU(256, return_sequences=True, stateful=True,
activation="tanh", dropout=0.2, recurrent_dropout=0.3, batch_input_shape=[batch_size, None, max_depth]))
model.add(tf.keras.layers.GRU(256, return_sequences=True, stateful=True, activation="tanh", dropout=0.2, recurrent_dropout=0.3))
model.add(tf.keras.layers.GRU(128, return_sequences=True, stateful=True, activation="tanh", dropout=0.2, recurrent_dropout=0.3))
model.add(tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(max_depth, activation="softmax")))
# the states should be reset
# at the beginning of each epoch
# due to the model is stateful
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam")
model.fit(dataset, epochs=50, callbacks=[ResetStatesCallback()])
model.save(os.path.join(os.getcwd(), "models", "stateful"))
model.save_weights(os.path.join(os.getcwd(), "weights", "shakespearean"))
joblib.dump(tokenizer, "tokenizer.save") | [
11748,
28686,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1693,
8019,
220,
628,
198,
7753,
62,
6371,
796,
366,
5450,
1378,
1831,
13,
12567,
43667,
13,
785,
14,
74,
5117,
10036,
14,
1... | 2.821675 | 1,015 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^qq/login/$',views.QQurlView.as_view()),
url(r'^oauth_callback$',views.QQopenidView.as_view()),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
38227,
14,
38235,
32624,
3256,
33571,
13,
48,
48,
6371,
7680,
13,
... | 2.316456 | 79 |
# -*- coding: utf-8 -*-
from pymongo import MongoClient, errors
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
279,
4948,
25162,
1330,
42591,
11792,
11,
8563,
628
] | 2.708333 | 24 |
from context import categorize
import unittest
from testfixtures import tempdir, compare, TempDirectory
import os
import json
FileOs = categorize.FileOs
class test_FileOs(unittest.TestCase):
"""
Ensure file object can get set and output paths to files
"""
if __name__ == "__main__":
unittest.main() | [
6738,
4732,
1330,
17851,
1096,
198,
11748,
555,
715,
395,
198,
6738,
1332,
69,
25506,
1330,
20218,
15908,
11,
8996,
11,
24189,
43055,
198,
11748,
28686,
198,
11748,
33918,
198,
198,
8979,
16748,
796,
17851,
1096,
13,
8979,
16748,
198,
1... | 2.972727 | 110 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Splunk specific dependencies
import sys, os
from splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option, validators, splunklib_logger as logger
# Command specific dependencies
import requests
from requests.auth import HTTPDigestAuth
import json
# TODO's
# - paramMap auf payload umstellen -> aber backward compatible
# - Error handling auf raise XY umstellen
# - https://www.tutorialspoint.com/python/python_exceptions.htm
# - Add logging via logger
# - Do not delete older builds
@Configuration(type='reporting')
dispatch(curlCommand, sys.argv, sys.stdin, sys.stdout, __name__) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
13341,
2954,
2176,
20086,
198,
11748,
25064,
11,
28686,
198,
6738,
4328,
2954,
8019,
13,
12947,
9503,
1746,
133... | 3.343434 | 198 |
import datetime
from django.db.models import QuerySet
from django.shortcuts import render, get_object_or_404, redirect
from cms.models import Record
from cms.forms import RecordForm
def life_logs(request):
"""活動日一覧"""
today: datetime = datetime.date.today()
if not Record.objects.filter(date=today).exists():
Record(date=today).save()
records: QuerySet = Record.objects.all().order_by('id')
return render(request, 'cms/logs.html.haml', {'records': records})
def edit_record(request, record_id=None):
"""活動日の編集"""
if record_id:
record: Record = get_object_or_404(Record, pk=record_id)
else:
record: Record = Record()
if request.method == 'POST':
form = RecordForm(request.POST, instance=record)
if form.is_valid():
record = form.save(commit=False)
record.save()
return redirect('cms:life_logs')
else:
form = RecordForm(instance=record)
return render(request, 'cms/edit_record.html.haml', dict(form=form, record_id=record_id))
def del_record(request, record_id):
"""活動日の削除"""
# return HttpResponse('活動記録の削除')
record = get_object_or_404(Record, pk=record_id)
record.delete()
return redirect('cms:life_logs')
| [
11748,
4818,
8079,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
43301,
7248,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
651,
62,
15252,
62,
273,
62,
26429,
11,
18941,
198,
198,
6738,
269,
907,
13,
27530,
1330,
1... | 2.345083 | 539 |
from dautil import options
import pandas as pd
import numpy as np
from dautil import log_api
printer = log_api.Printer()
print(pd.describe_option('precision'))
print(pd.describe_option('max_rows'))
printer.print('Initial precision', pd.get_option('precision'))
printer.print('Initial max_rows', pd.get_option('max_rows'))
# Random pi's, should use random state if possible
np.random.seed(42)
df = pd.DataFrame(np.pi * np.random.rand(6, 2))
printer.print('Initial df', df)
options.set_pd_options()
printer.print('df with different options', df)
options.reset_pd_options()
printer.print('df after reset', df)
| [
6738,
288,
2306,
346,
1330,
3689,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
288,
2306,
346,
1330,
2604,
62,
15042,
198,
198,
1050,
3849,
796,
2604,
62,
15042,
13,
6836,
3849,
3419,
198,
4798,... | 2.846512 | 215 |
import re
| [
11748,
302,
628
] | 3.666667 | 3 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext as _
from django.utils import timezone
from taiga.base import response
from taiga.base.decorators import detail_route
from taiga.base.api import ReadOnlyListViewSet
from taiga.mdrender.service import render as mdrender
from . import permissions
from . import serializers
from . import services
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
34,
8,
1946,
12,
5304,
843,
4364,
3738,
2724,
71,
1279,
8461,
37686,
31,
8461,
37686,
13,
27305,
29,
198,
2,
15069,
357,
34,
8,
1946,
12,
5304,
4804,
... | 3.465116 | 387 |
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy
x_min = 40.0
x_step = 20.0
x_max = 300.0
# pop per year at start
starting_pop_per_year = numpy.linspace(x_min, x_max, 1001)
xticks = numpy.arange(0.0, x_max + x_step * 0.5, x_step)
starting_pop_per_month = starting_pop_per_year / 12.0
required_time = numpy.zeros_like(starting_pop_per_month)
for i in range(10):
#level = max(i - 1, 0)
level_pop_per_month = i # level * 4% per level * 3 per year per level / 12 months per year
curr_rate = starting_pop_per_month + level_pop_per_month
required_time += 100.0 / curr_rate
ymax = (numpy.max(required_time) // 12 + 1) * 12.0
yticks = numpy.arange(0.0, ymax + 18.0, 12.0)
plt.plot(starting_pop_per_year, required_time)
plt.xlabel('Starting growth per year (flat + chance * 3)')
plt.xticks(xticks)
plt.yticks(yticks)
plt.ylabel('Months to city')
plt.xlim(xmin=x_min)
plt.ylim(ymin=0, ymax = ymax)
plt.grid(True)
plt.show()
| [
11748,
2603,
29487,
8019,
355,
285,
489,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
11748,
299,
32152,
201,
198,
201,
198,
87,
62,
1084,
796,
2319,
13,
15,
201,
198,
87,
62,
9662,
796,
1160,
13,
1... | 2.198675 | 453 |
from django.test import TestCase
# Create your tests here.
# def test_handles_post_requests(self):
# response = self.client.post('/')
# self.assertIn('a list item', response.content) | [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
198,
2,
13610,
534,
5254,
994,
13,
628,
220,
220,
220,
1303,
825,
1332,
62,
4993,
829,
62,
7353,
62,
8897,
3558,
7,
944,
2599,
198,
220,
220,
220,
1303,
220,
220,
220,
220,
288... | 2.72 | 75 |
import math
import numpy as np
import basis.robot_math as rm
import modeling.mesh_tools as mt
if __name__ == '__main__':
'''
author: weiwei
date: 20201207osaka
'''
# mt.convert_to_stl("robotiq_arg2f_85_base_link.stl", "robotiq_arg2f_85_base_link_cvt.stl", rotmat=rm.rotmat_from_axangle([0,1,0], -math.pi/2))
# mt.convert_to_stl("robotiq_arg2f_85_inner_knuckle.stl", "robotiq_arg2f_85_inner_knuckle_cvt.stl", scale_ratio=.001)
# mt.convert_to_stl("robotiq_arg2f_85_outer_finger.stl", "robotiq_arg2f_85_outer_finger_cvt.stl", scale_ratio=.001)
mt.convert_to_stl("robotiq_arg2f_85_inner_finger.stl", "robotiq_arg2f_85_inner_finger_cvt2.stl", scale_ratio=.001) | [
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4308,
13,
305,
13645,
62,
11018,
355,
42721,
198,
11748,
21128,
13,
76,
5069,
62,
31391,
355,
45079,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
... | 2.050595 | 336 |
from x_rebirth_station_calculator.station_data.station_base import Ware
names = {'L044': 'Newtonian V Crushers',
'L049': 'Neutronenbrecher V'}
NewtonianVCrushers = Ware(names)
| [
6738,
2124,
62,
260,
24280,
62,
17529,
62,
9948,
3129,
1352,
13,
17529,
62,
7890,
13,
17529,
62,
8692,
1330,
28593,
198,
198,
14933,
796,
1391,
6,
43,
43977,
10354,
705,
3791,
1122,
666,
569,
15170,
7084,
3256,
198,
220,
220,
220,
2... | 2.428571 | 77 |
from django.conf import settings
from django.db import models
# Create your models here.
from tags.models import Tag
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
2,
13610,
534,
4981,
994,
13,
198,
198,
6738,
15940,
13,
27530,
1330,
17467,
628,
198
] | 3.666667 | 33 |
{
'targets': [{
"includes": [
"auto.gypi"
],
'conditions': [
['OS == "mac"', {
'sources': [
'src/macos-screen.mm',
'src/screen.cc'
],
'include_dirs': [
'System/Library/Frameworks/ApplicationServices.framework/Headers',
],
'link_settings': {
'libraries': [
'-framework ApplicationServices',
'-framework AppKit',
]
}
}],
['OS == "linux"', {
'link_settings': {
'libraries': [
'-lX11',
]
},
'sources': [
'src/linux-screen.cc',
'src/screen.cc',
'src/xdisplay.cc',
]
}],
["OS=='win'", {
'sources': [
'src/windows-screen.cc',
'src/screen.cc'
],
'msvs_settings': {
'VCCLCompilerTool': {
'AdditionalOptions': [
'/MD',
'/LD'
]
}
},
'ldflags': [
'-Wl,-rpath,<(module_root_dir)',
],
"cflags": [
"-std=c++11",
"-stdlib=libc++"
]
}]
],
}],
"includes": [
"auto-top.gypi"
]
}
| [
90,
198,
197,
470,
853,
1039,
10354,
685,
90,
198,
197,
197,
1,
42813,
1298,
685,
198,
197,
197,
197,
1,
23736,
13,
1360,
14415,
1,
198,
197,
197,
4357,
628,
197,
197,
6,
17561,
1756,
10354,
685,
198,
197,
197,
197,
17816,
2640,
... | 1.736568 | 577 |
from api import create_app
from config import Config
application = create_app(Config)
| [
6738,
40391,
1330,
2251,
62,
1324,
198,
6738,
4566,
1330,
17056,
198,
198,
31438,
796,
2251,
62,
1324,
7,
16934,
8,
198
] | 3.954545 | 22 |
from rest_framework.serializers import HyperlinkedModelSerializer
from users.models import User
class UserSerializer(HyperlinkedModelSerializer):
"""Serializer for user model."""
| [
6738,
1334,
62,
30604,
13,
46911,
11341,
1330,
15079,
25614,
17633,
32634,
7509,
198,
198,
6738,
2985,
13,
27530,
1330,
11787,
628,
198,
4871,
11787,
32634,
7509,
7,
38197,
25614,
17633,
32634,
7509,
2599,
198,
220,
220,
220,
37227,
32634... | 3.957447 | 47 |
from django.test import Client, TestCase
from django.urls import reverse
from rest_framework import status
from wagtail.core.models import Site
from wagtail.images.models import Image
from wagtail.images.tests.utils import get_test_image_file
from muni_portal.core.models import (
ServicePage, AdministratorPage, ServicePointPage
)
OFFICE_HOURS_TEST_TEXT = "<div>Office hours text</div>"
| [
6738,
42625,
14208,
13,
9288,
1330,
20985,
11,
6208,
20448,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
1334,
62,
30604,
1330,
3722,
198,
6738,
266,
363,
13199,
13,
7295,
13,
27530,
1330,
14413,
198,
6738,
266,
363,
... | 3.319328 | 119 |
from flask import Flask
from flask_jwt_sample.auth import configure_jwt
from flask_jwt_sample.bootstrap import init_data
from flask_jwt_sample.index import views as index_views
| [
6738,
42903,
1330,
46947,
198,
6738,
42903,
62,
73,
46569,
62,
39873,
13,
18439,
1330,
17425,
62,
73,
46569,
198,
6738,
42903,
62,
73,
46569,
62,
39873,
13,
18769,
26418,
1330,
2315,
62,
7890,
198,
6738,
42903,
62,
73,
46569,
62,
3987... | 3.490196 | 51 |
import hail as hl
from hail.expr.expressions import expr_float64, expr_numeric, analyze
from hail.typecheck import typecheck, oneof, sequenceof, nullable
from hail.utils import wrap_to_list, new_temp_file
@typecheck(weight_expr=expr_float64,
ld_score_expr=expr_numeric,
chi_sq_exprs=oneof(expr_float64,
sequenceof(expr_float64)),
n_samples_exprs=oneof(expr_numeric,
sequenceof(expr_numeric)),
n_blocks=int,
two_step_threshold=int,
n_reference_panel_variants=nullable(int))
def ld_score_regression(weight_expr,
ld_score_expr,
chi_sq_exprs,
n_samples_exprs,
n_blocks=200,
two_step_threshold=30,
n_reference_panel_variants=None) -> hl.Table:
r"""Estimate SNP-heritability and level of confounding biases from genome-wide association study
(GWAS) summary statistics.
Given a set or multiple sets of GWAS summary statistics, :func:`.ld_score_regression` estimates the heritability
of a trait or set of traits and the level of confounding biases present in
the underlying studies by regressing chi-squared statistics on LD scores,
leveraging the model:
.. math::
\mathrm{E}[\chi_j^2] = 1 + Na + \frac{Nh_g^2}{M}l_j
* :math:`\mathrm{E}[\chi_j^2]` is the expected chi-squared statistic
for variant :math:`j` resulting from a test of association between
variant :math:`j` and a trait.
* :math:`l_j = \sum_{k} r_{jk}^2` is the LD score of variant
:math:`j`, calculated as the sum of squared correlation coefficients
between variant :math:`j` and nearby variants. See :func:`ld_score`
for further details.
* :math:`a` captures the contribution of confounding biases, such as
cryptic relatedness and uncontrolled population structure, to the
association test statistic.
* :math:`h_g^2` is the SNP-heritability, or the proportion of variation
in the trait explained by the effects of variants included in the
regression model above.
* :math:`M` is the number of variants used to estimate :math:`h_g^2`.
* :math:`N` is the number of samples in the underlying association study.
For more details on the method implemented in this function, see:
* `LD Score regression distinguishes confounding from polygenicity in genome-wide association studies (Bulik-Sullivan et al, 2015) <https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4495769/>`__
Examples
--------
Run the method on a matrix table of summary statistics, where the rows
are variants and the columns are different phenotypes:
>>> mt_gwas = ld_score_all_phenos_sumstats
>>> ht_results = hl.experimental.ld_score_regression(
... weight_expr=mt_gwas['ld_score'],
... ld_score_expr=mt_gwas['ld_score'],
... chi_sq_exprs=mt_gwas['chi_squared'],
... n_samples_exprs=mt_gwas['n'])
Run the method on a table with summary statistics for a single
phenotype:
>>> ht_gwas = ld_score_one_pheno_sumstats
>>> ht_results = hl.experimental.ld_score_regression(
... weight_expr=ht_gwas['ld_score'],
... ld_score_expr=ht_gwas['ld_score'],
... chi_sq_exprs=ht_gwas['chi_squared_50_irnt'],
... n_samples_exprs=ht_gwas['n_50_irnt'])
Run the method on a table with summary statistics for multiple
phenotypes:
>>> ht_gwas = ld_score_one_pheno_sumstats
>>> ht_results = hl.experimental.ld_score_regression(
... weight_expr=ht_gwas['ld_score'],
... ld_score_expr=ht_gwas['ld_score'],
... chi_sq_exprs=[ht_gwas['chi_squared_50_irnt'],
... ht_gwas['chi_squared_20160']],
... n_samples_exprs=[ht_gwas['n_50_irnt'],
... ht_gwas['n_20160']])
Notes
-----
The ``exprs`` provided as arguments to :func:`.ld_score_regression`
must all be from the same object, either a :class:`~.Table` or a
:class:`~.MatrixTable`.
**If the arguments originate from a table:**
* The table must be keyed by fields ``locus`` of type
:class:`.tlocus` and ``alleles``, a :class:`.tarray` of
:py:data:`.tstr` elements.
* ``weight_expr``, ``ld_score_expr``, ``chi_sq_exprs``, and
``n_samples_exprs`` are must be row-indexed fields.
* The number of expressions passed to ``n_samples_exprs`` must be
equal to one or the number of expressions passed to
``chi_sq_exprs``. If just one expression is passed to
``n_samples_exprs``, that sample size expression is assumed to
apply to all sets of statistics passed to ``chi_sq_exprs``.
Otherwise, the expressions passed to ``chi_sq_exprs`` and
``n_samples_exprs`` are matched by index.
* The ``phenotype`` field that keys the table returned by
:func:`.ld_score_regression` will have generic :obj:`int` values
``0``, ``1``, etc. corresponding to the ``0th``, ``1st``, etc.
expressions passed to the ``chi_sq_exprs`` argument.
**If the arguments originate from a matrix table:**
* The dimensions of the matrix table must be variants
(rows) by phenotypes (columns).
* The rows of the matrix table must be keyed by fields
``locus`` of type :class:`.tlocus` and ``alleles``,
a :class:`.tarray` of :py:data:`.tstr` elements.
* The columns of the matrix table must be keyed by a field
of type :py:data:`.tstr` that uniquely identifies phenotypes
represented in the matrix table. The column key must be a single
expression; compound keys are not accepted.
* ``weight_expr`` and ``ld_score_expr`` must be row-indexed
fields.
* ``chi_sq_exprs`` must be a single entry-indexed field
(not a list of fields).
* ``n_samples_exprs`` must be a single entry-indexed field
(not a list of fields).
* The ``phenotype`` field that keys the table returned by
:func:`.ld_score_regression` will have values corresponding to the
column keys of the input matrix table.
This function returns a :class:`~.Table` with one row per set of summary
statistics passed to the ``chi_sq_exprs`` argument. The following
row-indexed fields are included in the table:
* **phenotype** (:py:data:`.tstr`) -- The name of the phenotype. The
returned table is keyed by this field. See the notes below for
details on the possible values of this field.
* **mean_chi_sq** (:py:data:`.tfloat64`) -- The mean chi-squared
test statistic for the given phenotype.
* **intercept** (`Struct`) -- Contains fields:
- **estimate** (:py:data:`.tfloat64`) -- A point estimate of the
intercept :math:`1 + Na`.
- **standard_error** (:py:data:`.tfloat64`) -- An estimate of
the standard error of this point estimate.
* **snp_heritability** (`Struct`) -- Contains fields:
- **estimate** (:py:data:`.tfloat64`) -- A point estimate of the
SNP-heritability :math:`h_g^2`.
- **standard_error** (:py:data:`.tfloat64`) -- An estimate of
the standard error of this point estimate.
Warning
-------
:func:`.ld_score_regression` considers only the rows for which both row
fields ``weight_expr`` and ``ld_score_expr`` are defined. Rows with missing
values in either field are removed prior to fitting the LD score
regression model.
Parameters
----------
weight_expr : :class:`.Float64Expression`
Row-indexed expression for the LD scores used to derive
variant weights in the model.
ld_score_expr : :class:`.Float64Expression`
Row-indexed expression for the LD scores used as covariates
in the model.
chi_sq_exprs : :class:`.Float64Expression` or :obj:`list` of
:class:`.Float64Expression`
One or more row-indexed (if table) or entry-indexed
(if matrix table) expressions for chi-squared
statistics resulting from genome-wide association
studies (GWAS).
n_samples_exprs: :class:`.NumericExpression` or :obj:`list` of
:class:`.NumericExpression`
One or more row-indexed (if table) or entry-indexed
(if matrix table) expressions indicating the number of
samples used in the studies that generated the test
statistics supplied to ``chi_sq_exprs``.
n_blocks : :obj:`int`
The number of blocks used in the jackknife approach to
estimating standard errors.
two_step_threshold : :obj:`int`
Variants with chi-squared statistics greater than this
value are excluded in the first step of the two-step
procedure used to fit the model.
n_reference_panel_variants : :obj:`int`, optional
Number of variants used to estimate the
SNP-heritability :math:`h_g^2`.
Returns
-------
:class:`~.Table`
Table keyed by ``phenotype`` with intercept and heritability estimates
for each phenotype passed to the function."""
chi_sq_exprs = wrap_to_list(chi_sq_exprs)
n_samples_exprs = wrap_to_list(n_samples_exprs)
assert ((len(chi_sq_exprs) == len(n_samples_exprs))
or (len(n_samples_exprs) == 1))
__k = 2 # number of covariates, including intercept
ds = chi_sq_exprs[0]._indices.source
analyze('ld_score_regression/weight_expr',
weight_expr,
ds._row_indices)
analyze('ld_score_regression/ld_score_expr',
ld_score_expr,
ds._row_indices)
# format input dataset
if isinstance(ds, hl.MatrixTable):
if len(chi_sq_exprs) != 1:
raise ValueError("""Only one chi_sq_expr allowed if originating
from a matrix table.""")
if len(n_samples_exprs) != 1:
raise ValueError("""Only one n_samples_expr allowed if
originating from a matrix table.""")
col_key = list(ds.col_key)
if len(col_key) != 1:
raise ValueError("""Matrix table must be keyed by a single
phenotype field.""")
analyze('ld_score_regression/chi_squared_expr',
chi_sq_exprs[0],
ds._entry_indices)
analyze('ld_score_regression/n_samples_expr',
n_samples_exprs[0],
ds._entry_indices)
ds = ds._select_all(row_exprs={'__locus': ds.locus,
'__alleles': ds.alleles,
'__w_initial': weight_expr,
'__w_initial_floor': hl.max(weight_expr,
1.0),
'__x': ld_score_expr,
'__x_floor': hl.max(ld_score_expr,
1.0)},
row_key=['__locus', '__alleles'],
col_exprs={'__y_name': ds[col_key[0]]},
col_key=['__y_name'],
entry_exprs={'__y': chi_sq_exprs[0],
'__n': n_samples_exprs[0]})
ds = ds.annotate_entries(**{'__w': ds.__w_initial})
ds = ds.filter_rows(hl.is_defined(ds.__locus)
& hl.is_defined(ds.__alleles)
& hl.is_defined(ds.__w_initial)
& hl.is_defined(ds.__x))
else:
assert isinstance(ds, hl.Table)
for y in chi_sq_exprs:
analyze('ld_score_regression/chi_squared_expr', y, ds._row_indices)
for n in n_samples_exprs:
analyze('ld_score_regression/n_samples_expr', n, ds._row_indices)
ys = ['__y{:}'.format(i) for i, _ in enumerate(chi_sq_exprs)]
ws = ['__w{:}'.format(i) for i, _ in enumerate(chi_sq_exprs)]
ns = ['__n{:}'.format(i) for i, _ in enumerate(n_samples_exprs)]
ds = ds.select(**dict(**{'__locus': ds.locus,
'__alleles': ds.alleles,
'__w_initial': weight_expr,
'__x': ld_score_expr},
**{y: chi_sq_exprs[i]
for i, y in enumerate(ys)},
**{w: weight_expr for w in ws},
**{n: n_samples_exprs[i]
for i, n in enumerate(ns)}))
ds = ds.key_by(ds.__locus, ds.__alleles)
table_tmp_file = new_temp_file()
ds.write(table_tmp_file)
ds = hl.read_table(table_tmp_file)
hts = [ds.select(**{'__w_initial': ds.__w_initial,
'__w_initial_floor': hl.max(ds.__w_initial,
1.0),
'__x': ds.__x,
'__x_floor': hl.max(ds.__x, 1.0),
'__y_name': i,
'__y': ds[ys[i]],
'__w': ds[ws[i]],
'__n': hl.int(ds[ns[i]])})
for i, y in enumerate(ys)]
mts = [ht.to_matrix_table(row_key=['__locus',
'__alleles'],
col_key=['__y_name'],
row_fields=['__w_initial',
'__w_initial_floor',
'__x',
'__x_floor'])
for ht in hts]
ds = mts[0]
for i in range(1, len(ys)):
ds = ds.union_cols(mts[i])
ds = ds.filter_rows(hl.is_defined(ds.__locus)
& hl.is_defined(ds.__alleles)
& hl.is_defined(ds.__w_initial)
& hl.is_defined(ds.__x))
mt_tmp_file1 = new_temp_file()
ds.write(mt_tmp_file1)
mt = hl.read_matrix_table(mt_tmp_file1)
if not n_reference_panel_variants:
M = mt.count_rows()
else:
M = n_reference_panel_variants
mt = mt.annotate_entries(__in_step1=(hl.is_defined(mt.__y)
& (mt.__y < two_step_threshold)),
__in_step2=hl.is_defined(mt.__y))
mt = mt.annotate_cols(__col_idx=hl.int(hl.scan.count()),
__m_step1=hl.agg.count_where(mt.__in_step1),
__m_step2=hl.agg.count_where(mt.__in_step2))
col_keys = list(mt.col_key)
ht = mt.localize_entries(entries_array_field_name='__entries',
columns_array_field_name='__cols')
ht = ht.annotate(__entries=hl.rbind(
hl.scan.array_agg(
lambda entry: hl.scan.count_where(entry.__in_step1),
ht.__entries),
lambda step1_indices: hl.map(
lambda i: hl.rbind(
hl.int(hl.or_else(step1_indices[i], 0)),
ht.__cols[i].__m_step1,
ht.__entries[i],
lambda step1_idx, m_step1, entry: hl.rbind(
hl.map(
lambda j: hl.int(hl.floor(j * (m_step1 / n_blocks))),
hl.range(0, n_blocks + 1)),
lambda step1_separators: hl.rbind(
hl.set(step1_separators).contains(step1_idx),
hl.sum(
hl.map(
lambda s1: step1_idx >= s1,
step1_separators)) - 1,
lambda is_separator, step1_block: entry.annotate(
__step1_block=step1_block,
__step2_block=hl.if_else(~entry.__in_step1 & is_separator,
step1_block - 1,
step1_block))))),
hl.range(0, hl.len(ht.__entries)))))
mt = ht._unlocalize_entries('__entries', '__cols', col_keys)
mt_tmp_file2 = new_temp_file()
mt.write(mt_tmp_file2)
mt = hl.read_matrix_table(mt_tmp_file2)
# initial coefficient estimates
mt = mt.annotate_cols(__initial_betas=[
1.0, (hl.agg.mean(mt.__y) - 1.0) / hl.agg.mean(mt.__x)])
mt = mt.annotate_cols(__step1_betas=mt.__initial_betas,
__step2_betas=mt.__initial_betas)
# step 1 iteratively reweighted least squares
for i in range(3):
mt = mt.annotate_entries(__w=hl.if_else(
mt.__in_step1,
1.0 / (mt.__w_initial_floor * 2.0 * (mt.__step1_betas[0]
+ mt.__step1_betas[1]
* mt.__x_floor) ** 2),
0.0))
mt = mt.annotate_cols(__step1_betas=hl.agg.filter(
mt.__in_step1,
hl.agg.linreg(y=mt.__y,
x=[1.0, mt.__x],
weight=mt.__w).beta))
mt = mt.annotate_cols(__step1_h2=hl.max(hl.min(
mt.__step1_betas[1] * M / hl.agg.mean(mt.__n), 1.0), 0.0))
mt = mt.annotate_cols(__step1_betas=[
mt.__step1_betas[0],
mt.__step1_h2 * hl.agg.mean(mt.__n) / M])
# step 1 block jackknife
mt = mt.annotate_cols(__step1_block_betas=hl.agg.array_agg(
lambda i: hl.agg.filter((mt.__step1_block != i) & mt.__in_step1,
hl.agg.linreg(y=mt.__y,
x=[1.0, mt.__x],
weight=mt.__w).beta),
hl.range(n_blocks)))
mt = mt.annotate_cols(__step1_block_betas_bias_corrected=hl.map(
lambda x: n_blocks * mt.__step1_betas - (n_blocks - 1) * x,
mt.__step1_block_betas))
mt = mt.annotate_cols(
__step1_jackknife_mean=hl.map(
lambda i: hl.mean(
hl.map(lambda x: x[i],
mt.__step1_block_betas_bias_corrected)),
hl.range(0, __k)),
__step1_jackknife_variance=hl.map(
lambda i: (hl.sum(
hl.map(lambda x: x[i]**2,
mt.__step1_block_betas_bias_corrected))
- hl.sum(
hl.map(lambda x: x[i],
mt.__step1_block_betas_bias_corrected)) ** 2
/ n_blocks)
/ (n_blocks - 1) / n_blocks,
hl.range(0, __k)))
# step 2 iteratively reweighted least squares
for i in range(3):
mt = mt.annotate_entries(__w=hl.if_else(
mt.__in_step2,
1.0 / (mt.__w_initial_floor
* 2.0 * (mt.__step2_betas[0] +
+ mt.__step2_betas[1]
* mt.__x_floor) ** 2),
0.0))
mt = mt.annotate_cols(__step2_betas=[
mt.__step1_betas[0],
hl.agg.filter(mt.__in_step2,
hl.agg.linreg(y=mt.__y - mt.__step1_betas[0],
x=[mt.__x],
weight=mt.__w).beta[0])])
mt = mt.annotate_cols(__step2_h2=hl.max(hl.min(
mt.__step2_betas[1] * M / hl.agg.mean(mt.__n), 1.0), 0.0))
mt = mt.annotate_cols(__step2_betas=[
mt.__step1_betas[0],
mt.__step2_h2 * hl.agg.mean(mt.__n) / M])
# step 2 block jackknife
mt = mt.annotate_cols(__step2_block_betas=hl.agg.array_agg(
lambda i: hl.agg.filter((mt.__step2_block != i) & mt.__in_step2,
hl.agg.linreg(y=mt.__y - mt.__step1_betas[0],
x=[mt.__x],
weight=mt.__w).beta[0]),
hl.range(n_blocks)))
mt = mt.annotate_cols(__step2_block_betas_bias_corrected=hl.map(
lambda x: n_blocks * mt.__step2_betas[1] - (n_blocks - 1) * x,
mt.__step2_block_betas))
mt = mt.annotate_cols(
__step2_jackknife_mean=hl.mean(
mt.__step2_block_betas_bias_corrected),
__step2_jackknife_variance=(
hl.sum(mt.__step2_block_betas_bias_corrected ** 2)
- hl.sum(mt.__step2_block_betas_bias_corrected) ** 2
/ n_blocks) / (n_blocks - 1) / n_blocks)
# combine step 1 and step 2 block jackknifes
mt = mt.annotate_entries(
__step2_initial_w=1.0 / (mt.__w_initial_floor
* 2.0 * (mt.__initial_betas[0] +
+ mt.__initial_betas[1]
* mt.__x_floor) ** 2))
mt = mt.annotate_cols(
__final_betas=[
mt.__step1_betas[0],
mt.__step2_betas[1]],
__c=(hl.agg.sum(mt.__step2_initial_w * mt.__x)
/ hl.agg.sum(mt.__step2_initial_w * mt.__x**2)))
mt = mt.annotate_cols(__final_block_betas=hl.map(
lambda i: (mt.__step2_block_betas[i] - mt.__c
* (mt.__step1_block_betas[i][0] - mt.__final_betas[0])),
hl.range(0, n_blocks)))
mt = mt.annotate_cols(
__final_block_betas_bias_corrected=(n_blocks * mt.__final_betas[1]
- (n_blocks - 1)
* mt.__final_block_betas))
mt = mt.annotate_cols(
__final_jackknife_mean=[
mt.__step1_jackknife_mean[0],
hl.mean(mt.__final_block_betas_bias_corrected)],
__final_jackknife_variance=[
mt.__step1_jackknife_variance[0],
(hl.sum(mt.__final_block_betas_bias_corrected ** 2)
- hl.sum(mt.__final_block_betas_bias_corrected) ** 2
/ n_blocks) / (n_blocks - 1) / n_blocks])
# convert coefficient to heritability estimate
mt = mt.annotate_cols(
phenotype=mt.__y_name,
mean_chi_sq=hl.agg.mean(mt.__y),
intercept=hl.struct(
estimate=mt.__final_betas[0],
standard_error=hl.sqrt(mt.__final_jackknife_variance[0])),
snp_heritability=hl.struct(
estimate=(M / hl.agg.mean(mt.__n)) * mt.__final_betas[1],
standard_error=hl.sqrt((M / hl.agg.mean(mt.__n)) ** 2
* mt.__final_jackknife_variance[1])))
# format and return results
ht = mt.cols()
ht = ht.key_by(ht.phenotype)
ht = ht.select(ht.mean_chi_sq,
ht.intercept,
ht.snp_heritability)
ht_tmp_file = new_temp_file()
ht.write(ht_tmp_file)
ht = hl.read_table(ht_tmp_file)
return ht
| [
11748,
32405,
355,
289,
75,
198,
6738,
32405,
13,
31937,
13,
42712,
507,
1330,
44052,
62,
22468,
2414,
11,
44052,
62,
77,
39223,
11,
16602,
198,
6738,
32405,
13,
4906,
9122,
1330,
2099,
9122,
11,
530,
1659,
11,
8379,
1659,
11,
9242,
... | 1.854615 | 12,491 |
"""
2_optionPriceSpread.py
Created by Luca Camerani at 31/08/2020, University of Milano-Bicocca.
(l.camerani@campus.unimib.it)
All rights reserved.
This file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library),
and is released under the "BSD Open Source License".
"""
import os
import matplotlib.pyplot as plt
import numpy as np
from EcoFin.dataDownload.optionsManager import OptionManager
from EcoFin.dataDownload.ticker import Ticker
from EcoFin.options.deepOptionChain import DeepOptionChain
from EcoFin.stat.utils import weightedAvg, weightedStd
# -------------------------[Set-up]-------------------------
ticker = Ticker('MSFT')
optionManager = OptionManager(ticker, now=1492646400)
exp = optionManager.getExpirationByMaturity(30, method='greater')
optionChain = optionManager.getOptionChain(exp=exp)
# ----------------------------------------------------------
ticker_info = ticker.getInfo()
forwardPrice = optionChain.getForwardPrice()
deepOptionChain = DeepOptionChain(optionChain)
data = deepOptionChain.getDeepOptionChain()
data['weights'] = data.loc[:, ['openInterest_call', 'openInterest_put']].sum(axis=1) / \
np.nansum(data.loc[:, ['openInterest_call', 'openInterest_put']].to_numpy())
fig, axs = plt.subplots(3, figsize=(15, 8), sharex=True)
fig.suptitle('Option prices spread analysis ({})'.format(ticker_info.longName), fontsize=16)
# chart 1
axs[0].set_title('Option market price curve vs. theorichal prices')
axs[0].plot(data.strike, data['TheoPrice_call'], linestyle="dotted", label='Theoretical call')
axs[0].plot(data.strike, data['TheoPrice_put'], linestyle="dotted", label='Theoretical put')
axs[0].plot(data.strike, data.avgPrice_call, label='$Call_{AVG}$', color='green')
axs[0].plot(data.strike, data.avgPrice_put, label='$Put_{AVG}$', color='red')
axs[0].plot(forwardPrice, 0, markersize=8, marker="^", color='violet')
prices = data[['avgPrice_call', 'avgPrice_put']].to_numpy()
axs[0].vlines(forwardPrice, np.nanmin(prices), np.nanmax(prices),
linestyles="dashed", color='violet', alpha=.6, label='Forward Price')
axs[0].legend()
axs[0].grid()
# chart 2
axs[1].set_title('Option Price Spread ($OPS$)')
axs[1].plot(data.strike, data['spread_call'], label='$Spread_{CALL}$', color='green', alpha=.5)
axs[1].plot(data.strike, data['spread_put'], label='$Spread_{PUT}$', color='red', alpha=.5)
axs[1].plot(data.strike, data['spreadSummary'], label='$Spread_{SUMMARY}$', linewidth=3, linestyle="dotted")
spreads = data[['spread_call', 'spread_put']].to_numpy()
axs[1].vlines(forwardPrice, np.nanmin(spreads), np.nanmax(spreads),
linestyles="dashed", color='violet', alpha=.6, label='Forward Price')
axs[1].legend()
axs[1].grid()
# chart 3
lines = []
axs[2].set_title('Weights')
lines.append(axs[2].plot(data.strike, np.abs(data.moneyness), label='$Moneyness$')[0])
lines.append(axs[2].vlines(forwardPrice, 0, np.nanmax(data.moneyness),
linestyles="dashed", color='violet', alpha=.6, label='Forward Price'))
axs[2].set(xlabel='Strike')
axs[2].grid()
ax_bis = axs[2]
lines.append(ax_bis.bar(data.strike, data['openInterest_call'],
label='Open Interest (Call)', color='green', alpha=.3))
lines.append(ax_bis.bar(data.strike, data['openInterest_put'],
label='Open Interest (Put)', color='red', alpha=.3))
ax_ter = axs[2].twinx()
lines.append(ax_ter.plot(data.strike, data['weights'],
label='Weights', color='blue', alpha=.3)[0])
axs[2].legend(lines, [l.get_label() for l in lines], loc=0)
plt.figtext(.9, 0.02, "{} | {}".format(optionChain.getChainDate(), optionChain.getChainExpiration()),
ha="right", fontsize=10, bbox={"facecolor": "orange", "alpha": 0.2, "pad": 8})
plt.show()
summary = {'Mean': weightedAvg(data['spreadSummary'], data['weights']),
'Std': weightedStd(data['spreadSummary'], data['weights'])}
print('Summary: {}'.format(summary))
# ----------------------[EXPORT BLOCK]--------------------------------
path = '../Export/[{}]_({})'.format(ticker.ticker, ticker_info.longName)
if not os.path.exists(path):
os.makedirs(path)
fig.savefig('{}/optionPriceSpread_[{}].png'.format(path, exp))
# ----------------------[EXPORT BLOCK]--------------------------------
| [
37811,
198,
17,
62,
18076,
18124,
44458,
13,
9078,
198,
198,
41972,
416,
7598,
64,
8014,
3216,
379,
3261,
14,
2919,
14,
42334,
11,
2059,
286,
4460,
5733,
12,
33,
291,
420,
6888,
13,
198,
7,
75,
13,
66,
2382,
3216,
31,
43842,
13,
... | 2.640367 | 1,635 |
import numpy as np
import scipy.constants as sc
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import visual_solvers
import linoplib
if __name__ == '__main__':
generate_animation(0, 'jacobi_zeros.mp4')
print('1')
generate_animation(1, 'jacobi_lowfreq.mp4')
print('2')
generate_animation(10, 'jacobi_highfreq.mp4')
print('Done!')
# Need to call `ffmpeg -i <input filename> -filter:v "setpts=0.1*PTS" <output filename>`
# to make it actually go fast. (This works by dropping frames.)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
9979,
1187,
355,
629,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
11227,
341,
355,
11034,
198,
11748,
5874,
62,
34453,
6... | 2.61244 | 209 |
# Generated by Django 2.1.1 on 2018-10-11 09:50
import common.utils
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
16,
319,
2864,
12,
940,
12,
1157,
7769,
25,
1120,
198,
198,
11748,
2219,
13,
26791,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
11748,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
... | 3.123596 | 89 |
import cv2
import base64
def get_base64_image(image):
"""
Convert image to base 64.
"""
try:
_, image_buffer = cv2.imencode(".jpg", image)
image_str = base64.b64encode(image_buffer).decode("utf-8")
return "data:image/jpeg;base64, {0}".format(image_str)
except BaseException:
return None
| [
11748,
269,
85,
17,
198,
11748,
2779,
2414,
628,
198,
4299,
651,
62,
8692,
2414,
62,
9060,
7,
9060,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
38240,
2939,
284,
2779,
5598,
13,
198,
220,
220,
220,
37227,
198,
220,
220,
2... | 2.243421 | 152 |
#Aula 07 - Desafio 008
print('=='*31)
print('=='*10, 'CONVERSOR DE MEDIDAS', '=='*10)
print('=='*31)
num = float(input('Digite o valor da medida em metros: '))
mm = num*1000
cm = num*100
print('=='*31)
print(f'A medida em METROS digitada foi: {num:.3f}m')
print(f'Equivalente a {cm:<6.3f}cm e {mm:<6.4f}mm')
print('=='*31)
print('Fim') | [
2,
32,
4712,
8753,
532,
2935,
1878,
952,
3571,
23,
198,
198,
4798,
10786,
855,
6,
9,
3132,
8,
198,
4798,
10786,
855,
6,
9,
940,
11,
705,
10943,
28884,
1581,
5550,
26112,
2389,
1921,
3256,
705,
855,
6,
9,
940,
8,
198,
4798,
10786... | 2.074074 | 162 |
####################################################################################
#
# Released under MIT License
#
# Copyright (c) 2019 CoderDojo Futurix <coderdojo@futurix.pt>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
'''
################################################################################
AsPi
################################################################################
(fork me at: https://github.com/coderdojo-futurix/aspi)
AsPi is a small library that aims to provide the simplest possible data collection
interface to the sensors available in the AstroPi computer, taking into account
the most strict requirements of AstroPi based experiments running in the ISS.
Allowing the scientists to concentrate on the science experiment aspects and in
the respective data analysis
The main objective is to allow scientists of all ages with little or no coding
background to harness all the data the AstroPi can provide in a very simple
and through a "just works" approach.
It works by periodically taking measurements from all the AstroPi sensors and
storing the values in a CSV file. It can also, optionally, take photographs
using the AstroPi camera and storing them in files (functionality DISABLED
by default).
The following data is collected from AstroPi sensors:
* Temperature
* Humidity
* Pressure
* Orientation
* Gyroscope
* Accelerometer
* Compass
* ISS Position (calculated via pyephem)
* Motion Sensor (using the AstroPi camera)
The AsPi library is designed to allow the program using it, to run completely
automatically and unnattended for a specified amount of time, requiring
absolutely no interaction from the operator other that to start the program.
The AsPi library provides a flat and uniform view across all the multiple
sensors and devices available in the AstroPi.
Usage:
datalogger = AsPiLogger()
datalogger.start()
'''
from gpiozero import CPUTemperature
from datetime import datetime
from logging import Formatter
from logzero import logger
from pisense import SenseHAT
from ephem import readtle, degrees
from threading import Timer, Thread
from queue import Queue
from collections import OrderedDict
import logzero
import os
import time
import locale
import math
import sys
import signal
import picamera
import picamera.array
import numpy as np
# Global AstroPi device objects
sense_hat = SenseHAT()
cpu = CPUTemperature()
camera = picamera.PiCamera()
# Default values
MIN_LOG_PERIOD_IN_SECS = 2
MIN_IMG_PERIOD_IN_SECS = 5
SHUTDOWN_TIMEOUT_IN_SECS = 3 * 60
DEFAULT_DURATION_IN_SECS = 3 * 60 * 60 - SHUTDOWN_TIMEOUT_IN_SECS
DEFAULT_SIZE_PER_LOGFILE_IN_BYTES = 30*1024*1024
DEFAULT_LOG_PERIOD_IN_SECS = 5
DEFAULT_IMG_PERIOD_IN_SECS = 10
DEFAULT_LOGFILE_PREFIX = "sense_hat_logger"
PICAMERA_SENSOR_MODE_2_RESOLUTION = ( 2592, 1944 )
ASTROPI_ORIENTATION = 270
ENABLE_DEBUG = False
NO_READING=-1
LOG_FORMAT='%(asctime)-15s.%(msecs)03d,%(message)s'
DATE_FORMAT='%Y-%m-%d %H:%M:%S'
TIMESTAMP_FIELD = "timestamp"
TIMESTAMP_FORMAT='{:%Y-%m-%d_%Hh%Mm%Ss}'
DEGREES_PER_RADIAN = 180.0 / math.pi
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
LOGFILE_EXT = 'csv'
NO_TIMESTAMP_FORMATTER = Formatter()
def get_timestamp():
'''
Simple method to return a formatted timestamp of the current time
'''
return TIMESTAMP_FORMAT.format(datetime.now())
class AsPi:
'''
Utility class that holds constants for the sensors names and respective values units
and manages the end of the program
'''
# Flag to indicate the program has ended
_ended = False
# Holds the termination timer after the end is called
shutdowntimer = None
def isShuttingDown():
'''
Indicates if the program has ended and the termination timer is active
'''
return not AsPi.shutdowntimer is None
def terminate():
'''
Self terminates by issuing a SIGTERM to itself
'''
os.kill(os.getpid(),signal.SIGTERM)
def end():
'''
Call to gracefully end of the program. A termination timer is also started.
If the program cleanup actions are not done in SHUTDOWN_TIMEOUT_IN_SECS seconds,
self termination is issued.
'''
if not AsPi.hasEnded():
AsPi._ended = True
print("Forcing termination in " + str(SHUTDOWN_TIMEOUT_IN_SECS) + " secs")
AsPi.shutdowntimer = Timer( SHUTDOWN_TIMEOUT_IN_SECS , AsPi.terminate )
AsPi.shutdowntimer.start()
def hasEnded():
'''
Indicates if the program ended
'''
return AsPi._ended
# Sensor names constants
SENSOR_CPU_TEMP = "cpu_temp"
SENSOR_TEMPERATURE = "temperature"
SENSOR_PRESSURE = "pressure"
SENSOR_HUMIDITY = "humidity"
SENSOR_COMPASS_X = "compass_x"
SENSOR_COMPASS_Y = "compass_y"
SENSOR_COMPASS_Z = "compass_z"
SENSOR_GYRO_X = "gyro_x"
SENSOR_GYRO_Y = "gyro_y"
SENSOR_GYRO_Z = "gyro_z"
SENSOR_ACCEL_X = "accel_x"
SENSOR_ACCEL_Y = "accel_y"
SENSOR_ACCEL_Z = "accel_z"
SENSOR_PITCH = "pitch"
SENSOR_ROLL = "roll"
SENSOR_YAW = "yaw"
SENSOR_LAT = "lat"
SENSOR_LON = "long"
SENSOR_ELEVATION = "elevation"
SENSOR_ECLIPSED = "eclipsed"
SENSOR_MOTION = "motion"
SENSOR_USERDATA = "userdata"
# Units constants
UNITS_DEGREES_CELSIUS = "°C"
UNITS_RADIANS = "rad"
UNITS_RADIANS_PER_SEC = UNITS_RADIANS + "/sec"
UNITS_STANDARD_GRAVITIES = "g"
UNITS_MICRO_TESLAS = "uT"
UNITS_MILLIBARS = "mbar"
UNITS_PERC_RELATIVE_HUMIDITY = "%RH"
UNITS_DEGREES = "°"
UNITS_METERS = "m"
UNITS_BOOL = "bool"
UNITS_COUNT = "n"
UNITS_STR = "str"
# Units of the values reported by each sensor
UNITS = OrderedDict( [
( SENSOR_CPU_TEMP , UNITS_DEGREES_CELSIUS ) ,
( SENSOR_TEMPERATURE , UNITS_DEGREES_CELSIUS ) ,
( SENSOR_PRESSURE , UNITS_MILLIBARS ) ,
( SENSOR_HUMIDITY , UNITS_PERC_RELATIVE_HUMIDITY ) ,
( SENSOR_COMPASS_X , UNITS_MICRO_TESLAS ) ,
( SENSOR_COMPASS_Y , UNITS_MICRO_TESLAS ) ,
( SENSOR_COMPASS_Z , UNITS_MICRO_TESLAS ) ,
( SENSOR_GYRO_X , UNITS_RADIANS_PER_SEC ) ,
( SENSOR_GYRO_Y , UNITS_RADIANS_PER_SEC ) ,
( SENSOR_GYRO_Z , UNITS_RADIANS_PER_SEC ) ,
( SENSOR_ACCEL_X , UNITS_STANDARD_GRAVITIES ) ,
( SENSOR_ACCEL_Y , UNITS_STANDARD_GRAVITIES ) ,
( SENSOR_ACCEL_Z , UNITS_STANDARD_GRAVITIES ) ,
( SENSOR_PITCH , UNITS_RADIANS ) ,
( SENSOR_ROLL , UNITS_RADIANS ) ,
( SENSOR_YAW , UNITS_RADIANS ) ,
( SENSOR_LAT , UNITS_DEGREES ) ,
( SENSOR_LON , UNITS_DEGREES ) ,
( SENSOR_ELEVATION , UNITS_METERS ) ,
( SENSOR_ECLIPSED , UNITS_BOOL ) ,
( SENSOR_MOTION , UNITS_COUNT ) ,
( SENSOR_USERDATA , UNITS_STR )
])
# list with all sensor names
ALL_SENSORS = UNITS.keys()
class AsPiResult:
'''
Class that stores one and only one value to safely exchanve values between threads
'''
class AsPiSensors:
'''
Class that makes takes measurements from all sensors
'''
userData = AsPiResult()
lastAsPiSensorsReading = AsPiResult()
cpu = CPUTemperature()
iss = readtle(
'ISS (ZARYA)' ,
'1 25544U 98067A 19027.92703822 .00001504 00000-0 30922-4 0 9992',
'2 25544 51.6413 338.8011 0004969 323.5710 139.9801 15.53200917153468'
)
class AsPiTimer:
'''
Recurrent Timer. It's the same as python threading timer class but a recurring one.
Everytime it fires, calls the callback and sets another Timer. It keeps doing that until
it's cancelled.
'''
class AsPiMemImage:
'''
Class to store an image in memory.
To be compatible with Tensorflow classification functions.
'''
class MotionAnalyser( picamera.array.PiMotionAnalysis ):
'''
Analyses frames from recording video and checks if the
frame vectors cross the thresholds indicating that movement
was detected or not. If it detects movement the occurrences
variable is incremented and it can be queried for movement
events
'''
occurrences=0
class AsPiMotionDetector( Thread ):
'''
Starts and stops camera recording to /dev/null sending
the frames to the MotionAnalyser class to detect movement.
'''
class AsPiCamera( Thread ):
'''
If enabled (it's disabled by default), it starts a thread, periodically taking pictures
with the AstroPi camera and storing them in files, putting the current ISS position in
the image file EXIF tags. It also stores the image in the lastPictureTaken class variable.
'''
lastPictureTaken = AsPiResult()
isCameraEnabled = False
def __set_latlon_in_exif(self):
"""
A function to write lat/long to EXIF data for photographs
(source based in the get_latlon function available in the 2019 AstroPi Mission
SpaceLab Phase 2 guide in the "Recording images using the camera" section)
"""
AsPiSensors.iss.compute() # Get the lat/long values from ephem
long_value = [float(i) for i in str(AsPiSensors.iss.sublong).split(":")]
if long_value[0] < 0:
long_value[0] = abs(long_value[0])
longitude_ref = "W"
else:
longitude_ref = "E"
longitude = '%d/1,%d/1,%d/10' % (long_value[0], long_value[1], long_value[2]*10)
lat_value = [float(i) for i in str( AsPiSensors.iss.sublat).split(":")]
if lat_value[0] < 0:
lat_value[0] = abs(lat_value[0])
latitude_ref = "S"
else:
latitude_ref = "N"
latitude = '%d/1,%d/1,%d/10' % (lat_value[0], lat_value[1], lat_value[2]*10)
camera.exif_tags['GPS.GPSLatitude'] = latitude
camera.exif_tags['GPS.GPSLongitude'] = longitude
camera.exif_tags['GPS.GPSLongitudeRef'] = longitude_ref
camera.exif_tags['GPS.GPSLatitudeRef'] = latitude_ref
camera.exif_tags['GPS.GPSAltitudeRef'] = "0"
camera.exif_tags['GPS.GPSAltitude'] = str( AsPiSensors.iss.elevation)
latitude_str ='%s%03dd%02dm%02d' % (latitude_ref, lat_value[0], lat_value[1], lat_value[2])
longitude_str='%s%03dd%02dm%02d' % (longitude_ref, long_value[0], long_value[1], long_value[2])
return latitude_str ,longitude_str
class AsPiUserLoop( Thread):
'''
Thread that continuously calls the provided callback. Passing as arguments, the results of the 'getdata' function.
The result of the provided callback is then passed as argument to a 'returndata' function call.
'''
class AsPiLogFile:
'''
Class that initializes and manages the data log file.
A csv data log file, with the specified naming format, is created at the beginning and everytime
the log file gets bigger than 'logfileMaxBytes' bytes. Each file has a header in the first line
with the sensors names and the respective units.
Each data row is written in the csv file as a line with the field values separated by commas with
the timestamp in the DATE_FORMAT format as the first field.
'''
filePrefix = DEFAULT_LOGFILE_PREFIX
class AsPiLogger:
'''
MAIN CLASS. User facing class that:
* configures all the options with the user specified values or with the predefined defaults.
* starts the log timer, to periodically log data from the sensors
* starts the end timer, to end the program after the specified duration
* starts the motion detector thread to monitor and register movements event count
* if the user callback is specified, it starts the user loop thread to continuously send the collected data
to the user provided callback and receive any result to store in the CSV file as a "pseudo" sensor (SENSOR_USERDATA) value
* if camera is enabled, starts the camera thread to periodically take pictures with the AstroPi camera
* Gracefully manages the program finalization phase and abnormal interruption handling (CTRL-C)
'''
| [
198,
29113,
29113,
14468,
4242,
198,
2,
198,
2,
28728,
739,
17168,
13789,
198,
2,
198,
2,
15069,
357,
66,
8,
13130,
327,
12342,
5211,
7639,
24002,
333,
844,
1279,
66,
12342,
4598,
7639,
31,
69,
315,
333,
844,
13,
457,
29,
198,
2,
... | 2.52666 | 5,345 |
from configparser import ConfigParser
from telethon.sync import TelegramClient
from telethon.tl.types import InputPeerUser
configur = ConfigParser()
configur.read('config.ini')
# Enter the API token in 'token'.
# Enter the API ID and API Hash from
# the telegram app created.
api_id = configur.get('app_config', 'api_id')
api_hash = configur.get('app_config', 'api_hash')
token = configur.get('bot_api', 'token')
# your phone number
phone = configur.get('client_details', 'phone')
# client variable
client = None
# receiver user_id and access_hash
user_id = configur.get('receiver_details', 'user_id')
user_hash = configur.get('receiver_details', 'user_hash')
client_connect(client)
client_authenticate(client, phone)
client_disconnect(client)
| [
6738,
4566,
48610,
1330,
17056,
46677,
198,
6738,
5735,
400,
261,
13,
27261,
1330,
50203,
11792,
198,
6738,
5735,
400,
261,
13,
28781,
13,
19199,
1330,
23412,
6435,
263,
12982,
628,
198,
11250,
333,
796,
17056,
46677,
3419,
198,
11250,
... | 3.106557 | 244 |
from celery.schedules import crontab
import os
import environ
env = environ.Env()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
root = environ.Path(__file__) - 3
BASE_DIR = root()
SECRET_KEY = "replaceme"
DEBUG = True
DATABASES = {"default": env.db(default="sqlite:///")}
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# external packages
"rest_framework",
"rest_framework.authtoken",
"corsheaders",
"rest_auth",
"phonenumber_field",
"celery",
"django_filters",
"import_export",
"drf_spectacular",
"django_celery_results",
# monitoring apps
"django_prometheus",
"health_check",
"health_check.db",
"health_check.cache",
"health_check.contrib.redis",
"health_check.contrib.rabbitmq",
"health_check.contrib.celery_ping",
# local apps
"users",
"contacts",
"covid_cases",
"userprofile",
"tbconnect",
"selfswab",
"lifenet",
"vaccine",
"vaxchamps",
]
MIDDLEWARE = [
"django_prometheus.middleware.PrometheusBeforeMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django_prometheus.middleware.PrometheusAfterMiddleware",
]
ROOT_URLCONF = "healthcheck.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "healthcheck.wsgi.application"
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation."
"UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "Africa/Johannesburg"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"django.contrib.staticfiles.finders.FileSystemFinder",
)
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
TIMEFRAME = env.int("TIMEFRAME", 14)
# Other config variables
AUTH_USER_MODEL = "users.User"
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.TokenAuthentication",
),
"TEST_REQUEST_DEFAULT_FORMAT": "json",
"DEFAULT_SCHEMA_CLASS": "drf_spectacular.openapi.AutoSchema",
"PAGE_SIZE": 1000,
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.CursorPagination",
}
SPECTACULAR_SETTINGS = {"TITLE": "HealthCheck"}
# this might be unneccessary if intented usage is international phone numbers
PHONENUMBER_DB_FORMAT = "NATIONAL"
PHONENUMBER_DEFAULT_REGION = "ZA"
# CELERY SETTINGS
CELERY_BROKER_URL = env.str("CELERY_BROKER_URL", "redis://localhost:6379/0")
# BROKER_URL and REDIS_URL are required to have rabbitmq and redis monitoring.
# Redis is used in dev env, RabbitMQ on production.
BROKER_URL = env.str("CELERY_BROKER_URL", "redis://localhost:6379/0")
REDIS_URL = env.str("REDIS_URL", "redis://localhost:6379/0")
CELERY_RESULT_BACKEND = "django-db"
CELERY_ACCEPT_CONTENT = ["application/json"]
CELERY_TASK_SERIALIZER = env.str("CELERY_TASK_SERIALIZER", "json")
CELERY_RESULT_SERIALIZER = env.str("CELERY_RESULT_SERIALIZER", "json")
CELERY_BEAT_SCHEDULE = {
"scrape-nicd-gis": {
"task": "covid_cases.tasks.scrape_nicd_gis",
"schedule": crontab(minute="0"),
},
"scrape-sacoronavirus": {
"task": "covid_cases.tasks.scrape_sacoronavirus_homepage",
"schedule": crontab(minute="0"),
},
"scrape-sacoronavirus-images": {
"task": "covid_cases.tasks.scrape_sacoronavirus_case_images",
"schedule": crontab(minute="0"),
},
}
TURN_API_KEY = env.str("TURN_API_KEY", "default")
API_DOMAIN = env.str("API_DOMAIN", "https://whatsapp.turn.io/")
SENTRY_DSN = env.str("SENTRY_DSN", "")
RAPIDPRO_URL = env.str("RAPIDPRO_URL", "")
RAPIDPRO_TOKEN = env.str("RAPIDPRO_TOKEN", "")
RAPIDPRO_TBCONNECT_FLOW = env.str("RAPIDPRO_TBCONNECT_FLOW", "")
MEDITECH_URL = env.str("MEDITECH_URL", "")
MEDITECH_USER = env.str("MEDITECH_USER", "")
MEDITECH_PASSWORD = env.str("MEDITECH_PASSWORD", "")
SELFSWAB_RAPIDPRO_TOKEN = env.str("SELFSWAB_RAPIDPRO_TOKEN", "")
SELFSWAB_RAPIDPRO_FLOW = env.str("SELFSWAB_RAPIDPRO_FLOW", "")
VAXCHAMPS_RAPIDPRO_FLOW = env.str("VAXCHAMPS_RAPIDPRO_FLOW", "")
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": REDIS_URL,
"OPTIONS": {"CLIENT_CLASS": "django_redis.client.DefaultClient"},
"KEY_PREFIX": env.str("REDIS_PREFIX", ""),
}
}
TBCONNECT_BQ_KEY_PATH = env.str("TBCONNECT_BQ_KEY_PATH", "bq_credentials.json")
TBCONNECT_BQ_DATASET = env.str("TBCONNECT_BQ_DATASET", "wassup-165700.tbconnect")
SELFSWAB_BQ_KEY_PATH = env.str("SELFSWAB_BQ_KEY_PATH", "bq_credentials.json")
SELFSWAB_BQ_DATASET = env.str("SELFSWAB_BQ_DATASET", "wassup-165700.selfswab")
SELFSWAB_RETRY_HOURS = env.int("SELFSWAB_RETRY_HOURS", 8)
SELFSWAB_TURN_URL = env.str("SELFSWAB_TURN_URL", "https://whatsapp.turn.io/")
SELFSWAB_TURN_TOKEN = env.str("SELFSWAB_TURN_TOKEN", "default")
LIFENET_BQ_KEY_PATH = env.str("LIFENET_BQ_KEY_PATH", "bq_credentials.json")
LIFENET_BQ_DATASET = env.str("LIFENET_BQ_DATASET", "wassup-165700.lifenet")
CONTACT_NOTIFICATION_ENABLED = env.bool("CONTACT_NOTIFICATION_ENABLED", False)
ENABLE_NICD_GIS_SCRAPING = env.bool("ENABLE_NICD_GIS_SCRAPING", False)
ENABLE_SACORONAVIRUS_SCRAPING = env.bool("ENABLE_SACORONAVIRUS_SCRAPING", False)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "django.core.files.storage.FileSystemStorage"
)
AWS_S3_ACCESS_KEY_ID = env.str("AWS_S3_ACCESS_KEY_ID", "")
AWS_S3_SECRET_ACCESS_KEY = env.str("AWS_S3_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_S3_OBJECT_PARAMETERS = env.dict("AWS_S3_OBJECT_PARAMETERS", default={})
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", None)
AWS_LOCATION = env.str("AWS_LOCATION", "")
AWS_S3_REGION_NAME = env.str("AWS_S3_REGION_NAME", None)
AWS_S3_ENDPOINT_URL = env.str("AWS_S3_ENDPOINT_URL", None)
| [
6738,
18725,
1924,
13,
1416,
704,
5028,
1330,
1067,
756,
397,
198,
11748,
28686,
198,
198,
11748,
551,
2268,
198,
198,
24330,
796,
551,
2268,
13,
4834,
85,
3419,
198,
198,
2,
10934,
13532,
2641,
262,
1628,
588,
428,
25,
28686,
13,
6... | 2.204179 | 3,350 |
# https://leetcode.com/problems/climbing-stairs/
# def climbStairs(self, n: int) -> int:
# array = [1, 2]
# for i in range(2, n+1):
# array.append(array[i-1] + array[i-2])
# return array[n-1]
print(Solution().climbStairs(1))
print(Solution().climbStairs(2))
print(Solution().climbStairs(3))
print(Solution().climbStairs(4))
print(Solution().climbStairs(5))
| [
2,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
565,
320,
4623,
12,
17617,
14,
628,
220,
220,
220,
1303,
825,
12080,
1273,
3468,
7,
944,
11,
299,
25,
493,
8,
4613,
493,
25,
198,
220,
220,
220,
1303,
220,
220,
220,
... | 2.209945 | 181 |
from django.contrib.auth import get_user_model
from rest_framework import serializers
from .models import Wallet, Movement
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
198,
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
198,
6738,
764,
27530,
1330,
37249,
11,
15477,
628,
628
] | 3.764706 | 34 |
import os
import json
import requests
from datetime import date
from dotenv import load_dotenv
from PIL import Image
import PIL
load_dotenv()
instance = Apod(input())
img_data = instance.get_image()
img_url = img_data["url"]
page = requests.get(img_url)
with open('./img/apod.jpg', 'wb') as f:
f.write(page.content) | [
11748,
28686,
198,
11748,
33918,
198,
11748,
7007,
198,
6738,
4818,
8079,
1330,
3128,
198,
6738,
16605,
24330,
1330,
3440,
62,
26518,
24330,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
350,
4146,
198,
198,
2220,
62,
26518,
24330,
3419,
... | 2.840708 | 113 |
import torch
from dataset import Dataset
| [
11748,
28034,
198,
6738,
27039,
1330,
16092,
292,
316,
628,
628,
198
] | 3.75 | 12 |
"""Intents for the light integration."""
import voluptuous as vol
from openpeerpower.const import ATTR_ENTITY_ID
from openpeerpower.core import OpenPeerPower
from openpeerpower.helpers import intent
import openpeerpower.helpers.config_validation as cv
import openpeerpower.util.color as color_util
from . import (
ATTR_BRIGHTNESS_PCT,
ATTR_RGB_COLOR,
DOMAIN,
SERVICE_TURN_ON,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
)
INTENT_SET = "OppLightSet"
async def async_setup_intents(opp: OpenPeerPower) -> None:
"""Set up the light intents."""
opp.helpers.intent.async_register(SetIntentHandler())
class SetIntentHandler(intent.IntentHandler):
"""Handle set color intents."""
intent_type = INTENT_SET
slot_schema = {
vol.Required("name"): cv.string,
vol.Optional("color"): color_util.color_name_to_rgb,
vol.Optional("brightness"): vol.All(vol.Coerce(int), vol.Range(0, 100)),
}
async def async_handle(self, intent_obj: intent.Intent) -> intent.IntentResponse:
"""Handle the opp intent."""
opp = intent_obj.opp
slots = self.async_validate_slots(intent_obj.slots)
state = opp.helpers.intent.async_match_state(
slots["name"]["value"],
[state for state in opp.states.async_all() if state.domain == DOMAIN],
)
service_data = {ATTR_ENTITY_ID: state.entity_id}
speech_parts = []
if "color" in slots:
intent.async_test_feature(state, SUPPORT_COLOR, "changing colors")
service_data[ATTR_RGB_COLOR] = slots["color"]["value"]
# Use original passed in value of the color because we don't have
# human readable names for that internally.
speech_parts.append(
"the color {}".format(intent_obj.slots["color"]["value"])
)
if "brightness" in slots:
intent.async_test_feature(state, SUPPORT_BRIGHTNESS, "changing brightness")
service_data[ATTR_BRIGHTNESS_PCT] = slots["brightness"]["value"]
speech_parts.append("{}% brightness".format(slots["brightness"]["value"]))
await opp.services.async_call(
DOMAIN, SERVICE_TURN_ON, service_data, context=intent_obj.context
)
response = intent_obj.create_response()
if not speech_parts: # No attributes changed
speech = f"Turned on {state.name}"
else:
parts = [f"Changed {state.name} to"]
for index, part in enumerate(speech_parts):
if index == 0:
parts.append(f" {part}")
elif index != len(speech_parts) - 1:
parts.append(f", {part}")
else:
parts.append(f" and {part}")
speech = "".join(parts)
response.async_set_speech(speech)
return response
| [
37811,
5317,
658,
329,
262,
1657,
11812,
526,
15931,
198,
11748,
2322,
37623,
5623,
355,
2322,
198,
198,
6738,
1280,
33350,
6477,
13,
9979,
1330,
5161,
5446,
62,
3525,
9050,
62,
2389,
198,
6738,
1280,
33350,
6477,
13,
7295,
1330,
4946,
... | 2.272013 | 1,272 |
# coding=utf-8
# list,有序的
list1 = [1, 2, 3, 4, 5]
# 获取列表中的第3个元素
print list1[2]
list2 = [1, 2, 3, [4, 5]]
# 获取列表中的最后1个元素
print list2[-1]
# 切片
list3 = [1, 2, 3, 4, 5]
# 正向,从左至右
# 打印列表中的第1个元素~最后1个元素,步长为2。默认的步长是1
print list3[1 : : 2]
# 反向,从右至左
# 打印列表中的最后1个元素~倒数第3个元素,步长必须是-1
print list3[-1 : -4 : -1]
# 列表的相加,使用+会新生成一个列表
list4 = [1, 2]
list5 = [3, 4]
print list4 + list5
# 使用extend,不会生成一个新列表
list6 = [1, 2, 3, 4, 5]
list7 = [6, 7]
list6.extend(list7)
print list6
# 使用append
list8 = [1, 2, 3, 4]
list8.append(5)
print list8
# 使用insert
list9 = [1, 2, 3, 4, 5]
# 第1个参数是位置,第2个参数是值
# 如果插入的位置大于实际的长度,会插入列表的末尾。如果插入的位置是负数且小于实际的长度,比如list9中的第一个参数是-99,相当于插入第0个位置
list9.insert(8, 88)
print list9
# 列表是可变的,所以可以修改
# 注意:下标不能越界,无论是正向还是反向
list10 = [1, 2, 3, 4]
list10[2] = 99
print list10
# 判断成员关系用 in 或者是 not in
list11 = [1, 2, 3, 4]
print 5 in list11
print 1 in list11
# 列表的排序
# 注意:sort()方法返回的值是none。排序会改变原来的列表
list12 = [4, 3, 88, 1, 4, 9]
list12.sort()
print list12
# 列表的反转
# 注意:reverse()方法返回的值是none。排序会改变原来的列表
list13 = [1, 2, 3, 4, 5]
list13.reverse()
print list13
# 推导式
# 生成1~5的列表
# 注意:range(起始点, 终点, 步长),终点是开区间
print [a for a in range(1, 6)]
print [a for a in range(1, 6, 2)]
print [a for a in range(1, 6) if a & 1 == 0] # if是条件,这里是取偶数
print ["hello %d" % a for a in xrange(10)] # 生成格式化的字符串
print [(a, b) for a in range(1, 5) for b in range(1, 5)] # 生成元组
print ["%d haha" % a for a in range(1, 11)] # 生成格式化的字符串
print [(a, b) for a in range(0, 3, 2) for b in range(0, 3, 2)] # 生成元组
# 内置list方法
# 注意:传入的参数必须是可迭代的对象,比如字符串,元组
list14 = "abc"
print list(list14)
list14 = (1, 2, 3)
print list(list14)
# 下面的操作就是删除引用
list15 = [8, 9, 10, 11]
list16 = list15[:]
del list15
print list16
# pop pop可以pop任意索引的值
# 注意:pop是的索引值不能越界
list17 = [1, 2, 3, 4, 5]
popValue1 = list17.pop(3)
popValue2 = list17.pop()
print popValue1
print popValue2
| [
2,
19617,
28,
40477,
12,
23,
198,
198,
2,
1351,
171,
120,
234,
17312,
231,
41753,
237,
21410,
198,
198,
4868,
16,
796,
685,
16,
11,
362,
11,
513,
11,
604,
11,
642,
60,
198,
2,
5525,
236,
115,
20998,
244,
26344,
245,
26193,
101,
... | 1.194245 | 1,529 |
# platform specific customizations
# 2013-03-28, Davide Bacchet (davide.bacchet@gmail.com)
import sys
# associate .m files to C compiler
from waflib import TaskGen
@TaskGen.extension('.m')
def m_hook(self, node):
"""Alias .m files to be compiled the same as .c files, the compiler will do the right thing."""
return self.create_compiled_task('c', node)
def customize_environment(env):
"""customize the build evironment"""
if sys.platform=='darwin':
set_clang_compiler(env)
elif sys.platform=='win32':
embed_MSVC_manifest(env)
elif sys.platform=='linux2':
pass
| [
2,
3859,
2176,
2183,
4582,
198,
2,
2211,
12,
3070,
12,
2078,
11,
2544,
485,
35583,
20043,
357,
67,
615,
485,
13,
65,
330,
20043,
31,
14816,
13,
785,
8,
628,
198,
11748,
25064,
198,
198,
2,
11602,
764,
76,
3696,
284,
327,
17050,
... | 2.675325 | 231 |
default_app_config = 'froide.accesstoken.apps.AccessTokenConfig'
| [
12286,
62,
1324,
62,
11250,
796,
705,
69,
305,
485,
13,
330,
728,
301,
4233,
13,
18211,
13,
15457,
30642,
16934,
6,
198
] | 2.826087 | 23 |
import weakref
import numpy as np
import qmhub.helpmelib as pme
from .dobject import cache_update
| [
11748,
4939,
5420,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
10662,
76,
40140,
13,
2978,
4426,
417,
571,
355,
279,
1326,
198,
6738,
764,
67,
15252,
1330,
12940,
62,
19119,
628
] | 3.030303 | 33 |
import os
import pandas as pd
import matplotlib.pyplot as plt | [
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83
] | 3.05 | 20 |
import FWCore.ParameterSet.Config as cms
#
# default configuration valid for online DQM
#
# configuration for online DQM
# perform tests on endLumi
# perform tests on endRun
#
# configuration for offline DQM
# perform tests on endRun only
#
# for both online and offline
# get the quality tests from an XML file
# no tests in event loop
# do not prescale
# verboseQT true, but reportThreshold empty
# L1 systems quality tests
# ECAL quality tests
from DQM.L1TMonitorClient.L1EmulatorEcalQualityTests_cfi import *
seqL1EmulatorEcalQualityTests = cms.Sequence(l1EmulatorEcalQualityTests)
# HCAL quality tests
from DQM.L1TMonitorClient.L1EmulatorHcalQualityTests_cfi import *
seqL1EmulatorHcalQualityTests = cms.Sequence(l1EmulatorHcalQualityTests)
# RCT quality tests
from DQM.L1TMonitorClient.L1EmulatorRctQualityTests_cfi import *
seqL1EmulatorRctQualityTests = cms.Sequence(l1EmulatorRctQualityTests)
# GCT quality tests
from DQM.L1TMonitorClient.L1EmulatorGctQualityTests_cfi import *
seqL1EmulatorGctQualityTests = cms.Sequence(l1EmulatorGctQualityTests)
# DTTF quality tests
from DQM.L1TMonitorClient.L1EmulatorDttfQualityTests_cfi import *
seqL1EmulatorDttfQualityTests = cms.Sequence(l1EmulatorDttfQualityTests)
# DTTPG quality tests
#from DQM.L1TMonitorClient.L1EmulatorDttpgQualityTests_cfi import *
#seqL1EmulatorDttpgQualityTests = cms.Sequence(l1EmulatorDttpgQualityTests)
# CSCTF quality tests
from DQM.L1TMonitorClient.L1EmulatorCsctfQualityTests_cfi import *
seqL1EmulatorCsctfQualityTests = cms.Sequence(l1EmulatorCsctfQualityTests)
# CSCTPG quality tests
from DQM.L1TMonitorClient.L1EmulatorCsctpgQualityTests_cfi import *
seqL1EmulatorCsctpgQualityTests = cms.Sequence(l1EmulatorCsctpgQualityTests)
# RPC quality tests
from DQM.L1TMonitorClient.L1EmulatorRpcQualityTests_cfi import *
seqL1EmulatorRpcQualityTests = cms.Sequence(l1EmulatorRpcQualityTests)
# GMT quality tests
from DQM.L1TMonitorClient.L1EmulatorGmtQualityTests_cfi import *
seqL1EmulatorGmtQualityTests = cms.Sequence(l1EmulatorGmtQualityTests)
# GT quality tests
from DQM.L1TMonitorClient.L1EmulatorGtQualityTests_cfi import *
seqL1EmulatorGtQualityTests = cms.Sequence(l1EmulatorGtQualityTests)
# L1 objects quality tests
# GtExternal quality tests
from DQM.L1TMonitorClient.L1EmulatorObjGtExternalQualityTests_cfi import *
seqL1EmulatorObjGtExternalQualityTests = cms.Sequence(l1EmulatorObjGtExternalQualityTests)
# TechTrig quality tests
from DQM.L1TMonitorClient.L1EmulatorObjTechTrigQualityTests_cfi import *
seqL1EmulatorObjTechTrigQualityTests = cms.Sequence(l1EmulatorObjTechTrigQualityTests)
# HfRingEtSums quality tests
from DQM.L1TMonitorClient.L1EmulatorObjHfRingEtSumsQualityTests_cfi import *
seqL1EmulatorObjHfRingEtSumsQualityTests = cms.Sequence(l1EmulatorObjHfRingEtSumsQualityTests)
# HfBitCounts quality tests
from DQM.L1TMonitorClient.L1EmulatorObjHfBitCountsQualityTests_cfi import *
seqL1EmulatorObjHfBitCountsQualityTests = cms.Sequence(l1EmulatorObjHfBitCountsQualityTests)
# HTM quality tests
from DQM.L1TMonitorClient.L1EmulatorObjHTMQualityTests_cfi import *
seqL1EmulatorObjHTMQualityTests = cms.Sequence(l1EmulatorObjHTMQualityTests)
# HTT quality tests
from DQM.L1TMonitorClient.L1EmulatorObjHTTQualityTests_cfi import *
seqL1EmulatorObjHTTQualityTests = cms.Sequence(l1EmulatorObjHTTQualityTests)
# ETM quality tests
from DQM.L1TMonitorClient.L1EmulatorObjETMQualityTests_cfi import *
seqL1EmulatorObjETMQualityTests = cms.Sequence(l1EmulatorObjETMQualityTests)
# ETT quality tests
from DQM.L1TMonitorClient.L1EmulatorObjETTQualityTests_cfi import *
seqL1EmulatorObjETTQualityTests = cms.Sequence(l1EmulatorObjETTQualityTests)
# TauJet quality tests
from DQM.L1TMonitorClient.L1EmulatorObjTauJetQualityTests_cfi import *
seqL1EmulatorObjTauJetQualityTests = cms.Sequence(l1EmulatorObjTauJetQualityTests)
# IsoTauJet quality tests
from DQM.L1TMonitorClient.L1EmulatorObjIsoTauJetQualityTests_cfi import *
seqL1EmulatorObjIsoTauJetQualityTests = cms.Sequence(l1EmulatorObjIsoTauJetQualityTests)
# ForJet quality tests
from DQM.L1TMonitorClient.L1EmulatorObjForJetQualityTests_cfi import *
seqL1EmulatorObjForJetQualityTests = cms.Sequence(l1EmulatorObjForJetQualityTests)
# CenJet quality tests
from DQM.L1TMonitorClient.L1EmulatorObjCenJetQualityTests_cfi import *
seqL1EmulatorObjCenJetQualityTests = cms.Sequence(l1EmulatorObjCenJetQualityTests)
# IsoEG quality tests
from DQM.L1TMonitorClient.L1EmulatorObjIsoEGQualityTests_cfi import *
seqL1EmulatorObjIsoEGQualityTests = cms.Sequence(l1EmulatorObjIsoEGQualityTests)
# NoIsoEG quality tests
from DQM.L1TMonitorClient.L1EmulatorObjNoIsoEGQualityTests_cfi import *
seqL1EmulatorObjNoIsoEGQualityTests = cms.Sequence(l1EmulatorObjNoIsoEGQualityTests)
# Mu quality tests
from DQM.L1TMonitorClient.L1EmulatorObjMuQualityTests_cfi import *
seqL1EmulatorObjMuQualityTests = cms.Sequence(l1EmulatorObjMuQualityTests)
# sequence for L1 systems
l1EmulatorSystemQualityTests = cms.Sequence(
seqL1EmulatorEcalQualityTests +
seqL1EmulatorHcalQualityTests +
seqL1EmulatorRctQualityTests +
seqL1EmulatorGctQualityTests +
seqL1EmulatorDttfQualityTests +
#seqL1EmulatorDttpgQualityTests +
seqL1EmulatorCsctfQualityTests +
seqL1EmulatorCsctpgQualityTests +
seqL1EmulatorRpcQualityTests +
seqL1EmulatorGmtQualityTests +
seqL1EmulatorGtQualityTests
)
# sequence for L1 objects
l1EmulatorObjectQualityTests = cms.Sequence(
seqL1EmulatorObjTechTrigQualityTests +
seqL1EmulatorObjGtExternalQualityTests +
seqL1EmulatorObjHfRingEtSumsQualityTests +
seqL1EmulatorObjHfBitCountsQualityTests +
seqL1EmulatorObjHTMQualityTests +
seqL1EmulatorObjHTTQualityTests +
seqL1EmulatorObjETMQualityTests +
seqL1EmulatorObjETTQualityTests +
seqL1EmulatorObjTauJetQualityTests +
seqL1EmulatorObjIsoTauJetQualityTests +
seqL1EmulatorObjForJetQualityTests +
seqL1EmulatorObjCenJetQualityTests +
seqL1EmulatorObjIsoEGQualityTests +
seqL1EmulatorObjNoIsoEGQualityTests +
seqL1EmulatorObjMuQualityTests
)
# general sequence
l1EmulatorQualityTests = cms.Sequence(
l1EmulatorSystemQualityTests +
l1EmulatorObjectQualityTests
)
| [
11748,
48849,
14055,
13,
36301,
7248,
13,
16934,
355,
269,
907,
198,
198,
2,
198,
2,
4277,
8398,
4938,
329,
2691,
360,
48,
44,
198,
2,
198,
2,
8398,
329,
2691,
360,
48,
44,
198,
2,
220,
220,
220,
1620,
5254,
319,
886,
43,
12994,... | 2.132605 | 3,386 |
#!/usr/bin/env python3
'''
The MIT License (MIT)
Copyright (c) <2018> <DresdenConceptGenomeCenter>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Use Python Naming Conventions
https://www.python.org/dev/peps/pep-0008/#naming-conventions
contact: mathias.lesche(at)tu-dresden.de
'''
''' python modules '''
import logging
from argparse import ArgumentParser as ArgumentParser
from argparse import RawDescriptionHelpFormatter
from datetime import datetime
from os import listdir
from os.path import isfile
from os.path import join as pathjoin
from re import search
from subprocess import Popen
from subprocess import PIPE
from subprocess import CalledProcessError
from subprocess import run
from sys import argv as argv
''' own modules '''
from helper.io_module import check_file
from helper.io_module import create_directory
from helper.helper_logger import MainLogger
from snake_information import SnakeInformation
class Parser(object):
# def show_workflow(self):
# if self.
'''
function retrieves the available workflows from a give directory
@param directory: string
@return: list
'''
'''
function checks the CMCB specific options. It can show the available snake workflows, check if the selected snake
workflow is valid, set the job scheduler and set the cluster config file. However, the configfile has to be provided
by the '-c'/'--configfile parameter
'''
'''
function checks the ZIH specific options. It can show the available snake workflows, check if the selected snake
workflow is valid, set the job scheduler and set the cluster config file. However, the configfile has to be provided
by the '-c'/'--configfile parameter
'''
'''
function checks if the cluster config was provided. if it is, a simple file check is done.
one can provide a subystem too and then it's set to the default value
@param subsytem: string
'''
'''
function checks if a configfile was provided
'''
'''
functinos checks if a snakefile was provided
'''
'''
function checks if the job argument is set correctly. otherwise it defaults to 1
'''
if __name__ == '__main__':
mainlog = MainLogger('snakemake')
parser = Parser()
parser.main()
parser.prepare_run()
mainlog.close()
logging.shutdown()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
7061,
6,
198,
464,
17168,
13789,
357,
36393,
8,
198,
198,
15269,
357,
66,
8,
1279,
7908,
29,
1279,
35,
411,
6559,
3103,
984,
13746,
462,
23656,
29,
198,
198,
5990,
3411,
318,
2... | 3.310651 | 1,014 |
from cartopy import crs
import matplotlib.pyplot as plt
from . import vector
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.patches as mpatches
def epsg_to_proj(epsg):
"""Return a cartopy projection from an EPSG code.
Attributes:
epsg <int>: An EPSG code.
"""
proj = crs.epsg(epsg)
return(proj)
class CentreToPage:
"""Return coordinates of page dimensions from a central point. A4 size and landscape is default. Need to expand by allowing orientation to be set"""
class Page:
"""Useful methods for page formatting from a geometry"""
class AxesOld:
""""""
def __init__(self, proj):
"""Attributes:
proj <cartopy.crs>: crs projection from cartopy
"""
self.proj = proj
def axes(self, ticks=1000):
"""If class is not initialised then self.proj should derive from an inheriting class"""
proj = self.proj
ax = plt.axes(projection=proj)
return(ax)
def axis_ticks(self, ticks):
""""""
ax = self.ax
w, e, s, n = ax.get_extent()
xticks = list(range(int(round(w)), int(round(e)), ticks))
yticks = list(range(int(round(s)), int(round(n)), ticks))
ax.tick_params(axis = 'both', which = 'major', labelsize = 4)
ax.set_xticklabels(xticks, ha = 'left')
ax.set_yticklabels(yticks, rotation = 90, va = 'bottom')
ax.set_xticks(xticks, crs = self.proj)
ax.set_yticks(yticks, crs = self.proj)
#class MapOld:
# """This was written to include a cartopy projection but that's actually unnescessary unless working with geographical projections (UTM is already a Cartesian projection and can readily be plotted in matplotlib)"""
# def __init__(self, proj, ticks=1000):
# """
# Attributes:
# proj <cartopy.crs>: A cartopy projection.
# """
# self.proj = proj
# self.fig, ax = plt.subplots()
# self.ax = plt.axes(projection=proj)
# self.color = 'black'
#
# def _color(self, c):
# color = c if c else self.color
# return(color)
#
# def decorations(self, color=None):
# self.north(color=color)
# self.scale(color=color)
#
# def north(self, x=5, y=90, color=None):
# color = self._color(color)
# decorations(self.ax).northarrow(x, y, color)
#
# def scale(self, x=5, y=5, width=10000, color=None):
# color = self._color(color)
# decorations(self.ax).scalebar(x, y, width, color)
#
# def legend(self, handles):
# plt.legend(handles=handles, loc='lower right', fontsize=4, title='Legend', title_fontsize=6)
#
# def colorbar(self, label='Raster'):
# """Plot a colorbar to show raster values"""
# divider = make_axes_locatable(self.ax)
# cax = divider.append_axes("right", size="5%", pad=0.1)
# #cbar = plt.colorbar(cax=cax)
# #cbar.set_label(label)
#
# def save(self, outfile, dpi=300):
# """"""
# plt.savefig(outfile + '.jpg', format='jpg', dpi=dpi, bbox_inches='tight')
class decorationsbak:
""""""
class Decorations:
""""""
def _get_wesn(self):
"""Return the coordinates of the axis boundary."""
x0, x1 = self.ax.get_xlim()
y0, y1 = self.ax.get_ylim()
w, e, s, n = x0, x1, y0, y1
return(w, e, s, n)
def _frame_percent(self):
"""Return the magnitude of length representing 1% of the frame x and y axes."""
w, e, s, n = self._get_wesn()
ns1 = (n-s)/100
ew1 = (e-w)/100
return(ew1, ns1)
class Map(Decorations):
"""Plot a map using matplotlib. Note that this class assumes that the data exists in a Cartesian coordinate system (e.g. UTM); do not use with cartopy projection objects."""
def __init__(self, proj=None, xlim=(None,None), ylim=(None,None), ticks=1000):
"""
Attributes:
proj <cartopy.crs>: A cartopy projection.
"""
self.fig, self.ax = plt.subplots()
if proj:
self.ax = plt.axes(projection=proj)
#self._plot_extent(xlim, ylim)
self.color = 'black'
def _plot_extent(self, xlim, ylim):
"""Use the matplotlib axes methods 'set_xlim' and 'set_ylim' to define the plot window."""
if xlim:
self.ax.set_xlim(xlim)
if ylim:
self.ax.set_ylim(ylim)
def colorbar(self, label='Raster'):
"""Plot a colorbar to show raster values"""
divider = make_axes_locatable(self.ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
#cbar = plt.colorbar(cax=cax)
#cbar.set_label(label)
def save(self, outfile, dpi=300):
""""""
plt.savefig(outfile+'.jpg', format='jpg', dpi=dpi, bbox_inches='tight', pad_inches=0)
def gdf_to_patches(gdf):
"""Convert a GeoDataFrame of classes and rgb colors to legend patches.
Attributes:
gdf <geopandas.GeoDataFrame>: A 2-column geodataframe with clases in first column and colors in second"""
handles = [mpatches.Patch(color=i[1], label=i[0]) for n, i in gdf.iterrows()]
return(handles)
class MplPalettes:
"""Return the full lists of colour palettes provided by the palettable package by name and returned as matplotlib compatable"""
import palettable as p
def pastel1(self):
"""Colorbrewer2 Pastel1 palette"""
p = self.p.colorbrewer.qualitative.Pastel1_9.mpl_colors
return(p)
#####################################################
def extent_from_vectorbak(x, y, ew = 5000):
"""Return extents of a map from the centre point. Return coordinates in format of matplotlib 'get_extent'"""
width = ew / 2
height = ew / 2 * 210 / 297
w = x - width
e = x + width
s = y - height
n = y + height
return(w,e,s,n)
def extent_from_vector(geometry, ew = 5000):
"""Return extents of a map from the centre point. Return coordinates in format of matplotlib 'get_extent'"""
print('wee')
c = geometry.centroid
x = c.x.mean()
y = c.y.mean()
wee = point_to_page(Point(x, y), 100)
print(wee)
exit()
g = geometry.total_bounds
print(g)
print(mean(g[2]-g[0]), g[3]-g[1])
exit()
print(type(geometry))
print(dir(geometry))
print(dir(geometry.total_bounds))
print(geometry.total_bounds.centroid)
g = GeometryToPage(geometry)
exit()
c = geodataframe_to_centroid(geometry)
print(c)
exit()
c = geometry.centroid
x = c.x.mean()
y = c.y.mean()
print(x, y)
#print(c)
#print(type(c.centroid))
#print(dir(c.centroid))
#print(min(c.centroid))
exit()
x, y = c.x, c.y
width = ew / 2
height = ew / 2 * 210 / 297
w = x - width
e = x + width
s = y - height
n = y + height
return(w,e,s,n)
from os.path import dirname, join
import numpy as np
import geopandas as gpd
from .vector import spatial_overlays
from matplotlib.patches import Patch
import tempfile
from matplotlib import cm
from shapely.geometry import Point, LineString, Polygon, MultiLineString
rdir = dirname(__file__)
def colorwee(length, pallete = 'Pastel1'):
"""Return a list of sequential greys with a given length between 3 and 9"""
from palettable.colorbrewer import qualitative as cb
length = 3 if length < 3 else length
length = 9 if length > 9 else length
method = getattr(cb, pallete + '_' + str(length))
cols = method.colors
c = []
for i in cols:
rgb = []
for j in i:
rgb += [j / 255]
c += [rgb]
return(c)
class cartography:
""""""
def axes(self, n, s, w, e, epsg, ticks=1000):
"""Set standard axes parameters"""
self.epsg = epsg
ax = plt.axes(projection = self.proj())
ax.set_extent([e, w, s, n], self.proj())
# set ticks
xticks = list(range(round(w), round(e), ticks))
yticks = list(range(round(s), round(n), ticks))
ax.tick_params(axis = 'both', which = 'major', labelsize = 6)
ax.set_xticklabels(xticks, ha = 'left')
ax.set_yticklabels(yticks, rotation = 90, va = 'bottom')
ax.set_xticks(xticks, crs = self.proj())
ax.set_yticks(yticks, crs = self.proj())
return(ax)
def proj(self):
"""Set projection parameters"""
proj = crs.epsg(self.epsg)
return(proj)
def point(self, e, n, label = None, color = 'k', markersize=5, linewidth=0):
"""Plot a point"""
p = plt.plot(e, n, color = color, marker = 'o', markeredgecolor = 'black', markeredgewidth = 0.2, markersize = markersize, label = label, linewidth=linewidth)
if label:
plt.annotate(label, xy = (e, n), xytext = (e, n), ha = 'right', va = 'top', fontsize = 8, weight = 'bold')
return(p)
def line(self, linestring, edgecolor = 'black', linewidth = 2):
""""""
ls = linestring
line_good = []
start_pt = list(ls.coords)[0]
for i in range(1,len(ls.coords)):
end_pt = list(ls.coords)[i]
simple_line = (start_pt, end_pt)
line_good.append(simple_line)
start_pt = end_pt
lines = MultiLineString(line_good)
self.ax.add_geometries(lines, crs = self.proj(), edgecolor = edgecolor, linewidth = linewidth)
def plot(self, geometry, facecolor = 'none', edgecolor = 'black', linewidth = 0.5):
"""Plot dataset using cartopy"""
self.ax.add_geometries(geometry, crs = self.proj(), facecolor = facecolor, edgecolor = edgecolor, linewidth = linewidth)
def cards2bbox(n, s, w, e):
"""Cardinal boundaries to bounding box"""
ll = Point(w, s)
ul = Point(w, n)
ur = Point(e, n)
lr = Point(e, s)
lst = [ll, ul, ur, lr]
bbox = Polygon([[i.x, i.y] for i in lst])
return(bbox)
def clipdf(gdf, bbox):
"""Clip dataframe to a bounding box"""
clip = gpd.GeoDataFrame(gpd.GeoSeries(bbox), columns = ['geometry'])
#gdfc = gpd.overlay(gdf, clip, how = 'intersection')
gdfc = spatial_overlays(gdf, clip)
return(gdfc)
| [
201,
198,
6738,
6383,
11081,
1330,
1067,
82,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
6738,
764,
1330,
15879,
201,
198,
6738,
285,
489,
62,
25981,
74,
896,
13,
897,
274,
62,
25928,
16,
1330,
787,
... | 2.100709 | 5,074 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
198,
6738,
42625,
14208,
13,
10414,
1330,
... | 3.111111 | 45 |
import time
#
# Enable metrics by setting builtins.metrics_enabled = True before importing metrics the first time.
#
# import builtins
# builtins.metrics_enabled = True
# import metrics
#
# You can add @timer to all methods you want to time; when you start up with metrics disabled, the
# function you decorate is unaltered. Only when metrics is enabled do you pay any runtime cost.
#
# Call metrics.log_metrics() periodically.
# This will cost no more than a single unconditional `pass` when metrics is disabled/
#
try:
global metrics_enabled
if metrics_enabled:
print('Enabling metrics instrumentation')
except NameError:
# Set False by default to remove runtime overhead of decorators and make everything else early-out
metrics_enabled = False
def timer(name):
"""
@Decorator
Record milliseconds per invocation when started with metrics_enabled.
@:param name A string name for the function to be decorated.
"""
if type(name) is str:
return arg_wrapper
def atimer(name):
"""
@Decorator
Record milliseconds per invocation when started with metrics_enabled.
@:param name A string name for the function to be decorated.
"""
if type(name) is str:
return arg_wrapper
if metrics_enabled:
def measure(name, observation):
"""
Record some numeric observation.
Gets aggregated now, and later is printed when you call log_metrics()
"""
if name not in measurements:
measurements[name] = _StatisticSet()
measurements[name].observe(observation)
class Timer:
"""
Prefer @timer(name) to this as there is 0 runtime cost on that strategy when
metrics is disabled.
Tracks time for scope open to scope close (use in a `with Timer('name'):` block).
Emits the time into the stack tracker.
Get your results by calling log_metrics() periodically.
"""
def log_metrics(target_seconds=10) -> None:
"""
Call on a timer, like every 10 seconds or every 60 seconds. Whatever tickles your fancy.
"""
global last_timer_reset
start = time.monotonic_ns()
elapsed_nanos = start - last_timer_reset
if (start - last_timer_reset) < (target_seconds * 1000000000):
return
last_timer_reset = start
print('\n\n-------------- Metrics -------------------------------------------------------------------------------------------------')
# Print out the recursive formatted timer stack
timer_stack[0].print(elapsed_nanos / 1000000)
timer_stack[0].reset()
# Print out the literal measurements table
if len(measurements) > 0:
maxname = max(len(name) for name in measurements)
stackname_format = '{{:{}s}}'.format(maxname)
# Print measurement header
print('\n------------- Measurements ', end='')
for _ in range(maxname + 11*4):
print('-', end='')
print()
print('{{:>{}s}}'.format(maxname).format('Measurement'), end='')
print(' | {avg:>8s} | {min:>8s} | {max:>8s} | {count:>8s}'.format(
avg='avg', min='min', max='max', count='count'
))
for name, stats in measurements.items():
print('{stackname} | {avg:8.3f} | {min:8.3f} | {max:8.3f} | {count:8d}'.format(
stackname=stackname_format.format(name),
avg=stats.sum / max(1, stats.count),
min=stats.min,
max=stats.max,
count=stats.count,
))
stats.reset()
print('\nMetrics report completed in {}ms'.format((time.monotonic_ns() - start) / 1000000))
print('----------------------------------------------------------------------------------------------------------------------------\n\n')
########################################################################################
#
# Framework tools below. You probably only care about what's above here.
#
########################################################################################
measurements = {}
timer_stack = [_TimerNode('root')]
last_timer_reset = time.monotonic_ns()
else:
# Stubs so you don't have to change your code, just the global metrics_enabled boolean when you're doing perf work.
| [
11748,
640,
198,
198,
2,
198,
2,
27882,
20731,
416,
4634,
3170,
1040,
13,
4164,
10466,
62,
25616,
796,
6407,
878,
33332,
20731,
262,
717,
640,
13,
198,
2,
198,
2,
1330,
3170,
1040,
198,
2,
3170,
1040,
13,
4164,
10466,
62,
25616,
7... | 2.687875 | 1,666 |
from django.conf.urls import url
from . import views
app_name = 'auth'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^loginVoter/$', views.loginVoter, name='loginVoter'),
url(r'^validateVoter/$', views.validateVoter, name='validateVoter'),
url(r'^logout/$', views.logout, name='logout'),
url(r'^addAdmin/$', views.addAdmin, name='addAdmin'),
url(r'^loginAdmin/$', views.loginAdmin, name='loginAdmin'),
url(r'^validateAdmin/$', views.validateAdmin, name='validateAdmin'),
#url(r'^profile/$', views.profileView, name='profileView'),
] | [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
764,
1330,
5009,
198,
198,
1324,
62,
3672,
796,
705,
18439,
6,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
3,
3256,
5009,
13,
... | 2.575221 | 226 |