content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# -*- coding: utf-8 -*-
"""A two-layer LSTM for character-level language modelling on Tolstoi's War and Peace."""
import tensorflow as tf
from ..datasets.tolstoi import tolstoi
from .testproblem import TestProblem
class tolstoi_char_rnn(TestProblem):
"""DeepOBS test problem class for a two-layer LSTM for character-level language
modelling (Char RNN) on Tolstoi's War and Peace.
Some network characteristics:
- ``128`` hidden units per LSTM cell
- sequence length ``50``
- cell state is automatically stored in variables between subsequent steps
- when the phase placeholder swithches its value from one step to the next,
the cell state is set to its zero value (meaning that we set to zero state
after each round of evaluation, it is therefore important to set the
evaluation interval such that we evaluate after a full epoch.)
Working training parameters are:
- batch size ``50``
- ``200`` epochs
- SGD with a learning rate of :math:`\\approx 0.1` works
Args:
batch_size (int): Batch size to use.
weight_decay (float): No weight decay (L2-regularization) is used in this
test problem. Defaults to ``None`` and any input here is ignored.
Attributes:
dataset: The DeepOBS data set class for Tolstoi.
train_init_op: A tensorflow operation initializing the test problem for the
training phase.
train_eval_init_op: A tensorflow operation initializing the test problem for
evaluating on training data.
test_init_op: A tensorflow operation initializing the test problem for
evaluating on test data.
losses: A tf.Tensor of shape (batch_size, ) containing the per-example loss
values.
regularizer: A scalar tf.Tensor containing a regularization term.
accuracy: A scalar tf.Tensor containing the mini-batch mean accuracy.
"""
def __init__(self, batch_size, weight_decay=None):
"""Create a new Char RNN test problem instance on Tolstoi.
Args:
batch_size (int): Batch size to use.
weight_decay (float): No weight decay (L2-regularization) is used in this
test problem. Defaults to ``None`` and any input here is ignored.
"""
super(tolstoi_char_rnn, self).__init__(batch_size, weight_decay)
if weight_decay is not None:
print(
"WARNING: Weight decay is non-zero but no weight decay is used",
"for this model.",
)
def set_up(self):
"""Set up the Char RNN test problem instance on Tolstoi."""
self.dataset = tolstoi(self._batch_size)
seq_length = 50
vocab_size = 83 # For War and Peace
x, y = self.dataset.batch
num_layers = 2
rnn_size = 128
input_keep_prob = tf.cond(
tf.equal(self.dataset.phase, tf.constant("train")),
lambda: tf.constant(0.8),
lambda: tf.constant(1.0),
)
output_keep_prob = tf.cond(
tf.equal(self.dataset.phase, tf.constant("train")),
lambda: tf.constant(0.8),
lambda: tf.constant(1.0),
)
# Create an embedding matrix, look up embedding of input
embedding = tf.get_variable("embedding", [vocab_size, rnn_size])
inputs = tf.nn.embedding_lookup(embedding, x)
# Split batch of input sequences along time, such that inputs[i] is a
# batch_size x embedding_size representation of the batch of characters
# at position i of this batch of sequences
inputs = tf.split(inputs, seq_length, axis=1)
inputs = [tf.squeeze(input_, [1]) for input_ in inputs]
# Make Multi LSTM cell
cells = []
for _ in range(num_layers):
cell = tf.contrib.rnn.LSTMCell(rnn_size)
cell = tf.contrib.rnn.DropoutWrapper(
cell,
input_keep_prob=input_keep_prob,
output_keep_prob=output_keep_prob,
)
cells.append(cell)
cell = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True)
# Create RNN using the cell defined above, (including operations that store)
# the state in variables
self.state_variables, self.zero_states = self._get_state_variables(
self._batch_size, cell
)
outputs, new_states = tf.nn.static_rnn(
cell, inputs, initial_state=self.state_variables
)
with tf.control_dependencies(outputs):
state_update_op = self._get_state_update_op(
self.state_variables, new_states
)
# Reshape RNN output for multiplication with softmax layer
# print "Shape of outputs", [output.get_shape() for output in outputs]
with tf.control_dependencies(state_update_op):
output = tf.reshape(tf.concat(outputs, 1), [-1, rnn_size])
# print "Shape of output", output.get_shape()
# Apply softmax layer
with tf.variable_scope("rnnlm"):
softmax_w = tf.get_variable("softmax_w", [rnn_size, vocab_size])
softmax_b = tf.get_variable("softmax_b", [vocab_size])
logits = tf.matmul(output, softmax_w) + softmax_b
# print logits.get_shape()
# Reshape logits to batch_size x seq_length x vocab size
reshaped_logits = tf.reshape(
logits, [self._batch_size, seq_length, vocab_size]
)
# print "Shape of reshaped logits", reshaped_logits.get_shape()
# Create vector of losses
self.losses = tf.contrib.seq2seq.sequence_loss(
reshaped_logits,
y,
weights=tf.ones([self._batch_size, seq_length], dtype=tf.float32),
average_across_timesteps=True,
average_across_batch=False,
)
predictions = tf.argmax(reshaped_logits, 2)
correct_prediction = tf.equal(predictions, y)
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
self.regularizer = tf.losses.get_regularization_loss()
self.train_init_op = tf.group(
[
self.dataset.train_init_op,
self._get_state_update_op(
self.state_variables, self.zero_states
),
]
)
self.train_eval_init_op = tf.group(
[
self.dataset.train_eval_init_op,
self._get_state_update_op(
self.state_variables, self.zero_states
),
]
)
self.train_eval_init_op = tf.group(
[
self.dataset.valid_init_op,
self._get_state_update_op(
self.state_variables, self.zero_states
),
]
)
self.test_init_op = tf.group(
[
self.dataset.test_init_op,
self._get_state_update_op(
self.state_variables, self.zero_states
),
]
)
def _get_state_variables(self, batch_size, cell):
"""For each layer, get the initial state and make a variable out of it
to enable updating its value.
Args:
batch_size (int): Batch size.
cell (tf.BasicLSTMCell): LSTM cell to get the initial state for.
Returns:
tupel: Tupel of the state variables and there zero states.
"""
# For each layer, get the initial state and make a variable out of it
# to enable updating its value.
zero_state = cell.zero_state(batch_size, tf.float32)
state_variables = []
for state_c, state_h in zero_state:
state_variables.append(
tf.contrib.rnn.LSTMStateTuple(
tf.Variable(state_c, trainable=False),
tf.Variable(state_h, trainable=False),
)
)
# Return as a tuple, so that it can be fed to dynamic_rnn as an initial state
return tuple(state_variables), zero_state
def _get_state_update_op(self, state_variables, new_states):
"""Add an operation to update the train states with the last state tensors
Args:
state_variables (tf.Variable): State variables to be updated
new_states (tf.Variable): New state of the state variable.
Returns:
tf.Operation: Return a tuple in order to combine all update_ops into a
single operation. The tuple's actual value should not be used.
"""
# Add an operation to update the train states with the last state tensors
update_ops = []
for state_variable, new_state in zip(state_variables, new_states):
# Assign the new state to the state variables on this layer
update_ops.extend(
[
state_variable[0].assign(new_state[0]),
state_variable[1].assign(new_state[1]),
]
)
# Return a tuple in order to combine all update_ops into a single operation.
# The tuple's actual value should not be used.
return tf.tuple(update_ops)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
32,
734,
12,
29289,
406,
2257,
44,
329,
2095,
12,
5715,
3303,
38591,
319,
20054,
301,
23013,
338,
1810,
290,
12689,
526,
15931,
198,
198,
11748,
11192,
273,
11125... | 2.2374 | 4,107 |
from io import BytesIO
from time import sleep
from PyQt5.QtGui import QPixmap
from picamera import PiCamera
# Create an in-memory stream
my_stream = BytesIO()
camera = PiCamera()
camera.start_preview()
# Camera warm-up time
sleep(100)
camera.capture(my_stream, 'jpeg')
qp = QPixmap()
qp.loadFromData(my_stream) | [
6738,
33245,
1330,
2750,
4879,
9399,
201,
198,
6738,
640,
1330,
3993,
201,
198,
201,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
8205,
72,
1330,
1195,
47,
844,
8899,
201,
198,
6738,
8301,
18144,
1330,
13993,
35632,
201,
198,
201,
198,
... | 2.523077 | 130 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Guillaume Fournier <fournierg@gmail.com>
#
# This file is forked from `smart_open` project.
#
# This code is distributed under the terms and conditions
# from the MIT License (MIT).
#
import os
SCHEME = 'file'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
34,
8,
33448,
1962,
5049,
2454,
376,
1798,
959,
1279,
69,
1798,
959,
70,
31,
14816,
13,
785,
29,
198,
2,
198,
2,
770,
2393,
318,
329,
9091,
... | 2.776596 | 94 |
#!/usr/bin/env python
__author__ = "Jamie Farnes"
__email__ = "jamie.farnes@oerc.ox.ac.uk"
import os
import time as t
import subprocess
import argparse
import numpy as np
from processing_components.image.operations import import_image_from_fits, export_image_to_fits, \
qa_image
from processing_components.visibility.operations import append_visibility
from ska_sip.uvoperations.filter import uv_cut, uv_advice
from ska_sip.telescopetools.initinst import init_inst
from ska_sip.loaddata.ms import load
from ska_sip.pipelines.dprepb import dprepb_imaging, arl_data_future
import dask
import dask.array as da
from dask.distributed import Client
from distributed.diagnostics import progress
from distributed import wait
from confluent_kafka import Producer
import pickle
def main(ARGS):
"""
Initialising launch sequence.
"""
# ------------------------------------------------------
# Print some stuff to show that the code is running:
print("")
os.system("printf 'A demonstration of a \033[5mDPrepB/DPrepC\033[m SDP pipeline\n'")
print("")
# Set the directory for the moment images:
MOMENTS_DIR = ARGS.outputs + '/MOMENTS'
# Check that the output directories exist, if not then create:
os.makedirs(ARGS.outputs, exist_ok=True)
os.makedirs(MOMENTS_DIR, exist_ok=True)
# Set the polarisation definition of the instrument:
POLDEF = init_inst(ARGS.inst)
# Setup Variables for SIP services
# ------------------------------------------------------
# Define the Queue Producer settings:
if ARGS.queues:
queue_settings = {'bootstrap.servers': 'scheduler:9092', 'message.max.bytes': 100000000}
# Setup the Confluent Kafka Queue
# ------------------------------------------------------
if ARGS.queues:
# Create an SDP queue:
sip_queue = Producer(queue_settings)
# Define a Data Array Format
# ------------------------------------------------------
# Setup the Dask Cluster
# ------------------------------------------------------
starttime = t.time()
dask.config.set(get=dask.distributed.Client.get)
client = Client(ARGS.daskaddress) # scheduler for Docker container, localhost for P3.
print("Dask Client details:")
print(client)
print("")
# Define channel range for 1 subband, each containing 40 channels:
channel_range = np.array(range(int(ARGS.channels)))
# Load the data into memory:
print("Loading data:")
print("")
vis1 = [load('%s/%s' % (ARGS.inputs, ARGS.ms1), range(channel, channel+1), POLDEF) \
for channel in range(0, int(ARGS.channels))]
vis2 = [load('%s/%s' % (ARGS.inputs, ARGS.ms2), range(channel, channel+1), POLDEF) \
for channel in range(0, int(ARGS.channels))]
# Prepare Measurement Set
# ------------------------------------------------------
# Combine MSSS snapshots:
vis_advice = append_visibility(vis1[0], vis2[0])
# Apply a uv-distance cut to the data:
vis_advice = uv_cut(vis_advice, float(ARGS.uvcut))
npixel_advice, cell_advice = uv_advice(vis_advice, float(ARGS.uvcut), float(ARGS.pixels))
# Begin imaging via the Dask cluster
# ------------------------------------------------------
# Submit data for each channel to the client, and return an image:
# Scatter all the data in advance to all the workers:
print("Scatter data to workers:")
print("")
big_job = [client.scatter(gen_data(channel)) for channel in channel_range]
# Submit jobs to the cluster and create a list of futures:
futures = [client.submit(dprepb_imaging, big_job[channel], pure=False, retries=3) \
for channel in channel_range]
print("Imaging on workers:")
# Watch progress:
progress(futures)
# Wait until all futures are complete:
wait(futures)
# Check that no futures have errors, if so resubmit:
for future in futures:
if future.status == 'error':
print("ERROR: Future", future, "has 'error' status, as:")
print(client.recreate_error_locally(future))
print("Rerunning...")
print("")
index = futures.index(future)
futures[index].cancel()
futures[index] = client.submit(dprepb_imaging, big_job[index], pure=False, retries=3)
# Wait until all futures are complete:
wait(futures)
# Gather results from the futures:
results = client.gather(futures, errors='raise')
# Run QA on ARL objects and produce to queue:
if ARGS.queues:
print("Adding QA to queue:")
for result in results:
sip_queue.produce('qa', pickle.dumps(qa_image(result), protocol=2))
sip_queue.flush()
# Return the data element of each ARL object, as a Dask future:
futures = [client.submit(arl_data_future, result, pure=False, retries=3) for result in results]
progress(futures)
wait(futures)
# Calculate the Moment images
# ------------------------------------------------------
# Now use 'distributed Dask arrays' in order to parallelise the Moment image calculation:
# Construct a small Dask array for every future:
print("")
print("Calculating Moment images:")
print("")
arrays = [da.from_delayed(future, dtype=np.dtype('float64'), shape=(1, 4, 512, 512)) \
for future in futures]
# Stack all small Dask arrays into one:
stack = da.stack(arrays, axis=0)
# Combine chunks to reduce overhead - is initially (40, 1, 4, 512, 512):
stack = stack.rechunk((1, 1, 4, 64, 64))
# Spread the data around on the cluster:
stack = client.persist(stack)
# Data is now coordinated by the single logical Dask array, 'stack'.
# Save the Moment images:
print("Compute Moment images and save to disk:")
print("")
# First generate a template:
image_template = import_image_from_fits('%s/imaging_dirty_WStack-%s.fits' % (ARGS.outputs, 0))
# Output mean images:
# I:
image_template.data = stack[:, :, 0, :, :].mean(axis=0).compute()
# Run QA on ARL objects and produce to queue:
if ARGS.queues:
sip_queue.produce('qa', pickle.dumps(qa_image(image_template), protocol=2))
# Export the data to disk:
export_image_to_fits(image_template, '%s/Mean-%s.fits' % (MOMENTS_DIR, 'I'))
# Q:
image_template.data = stack[:, :, 1, :, :].mean(axis=0).compute()
# Run QA on ARL objects and produce to queue:
if ARGS.queues:
sip_queue.produce('qa', pickle.dumps(qa_image(image_template), protocol=2))
# Export the data to disk:
export_image_to_fits(image_template, '%s/Mean-%s.fits' % (MOMENTS_DIR, 'Q'))
# U:
image_template.data = stack[:, :, 2, :, :].mean(axis=0).compute()
# Run QA on ARL objects and produce to queue:
if ARGS.queues:
sip_queue.produce('qa', pickle.dumps(qa_image(image_template), protocol=2))
# Export the data to disk:
export_image_to_fits(image_template, '%s/Mean-%s.fits' % (MOMENTS_DIR, 'U'))
# P:
image_template.data = da.sqrt((da.square(stack[:, :, 1, :, :]) + \
da.square(stack[:, :, 2, :, :]))).mean(axis=0).compute()
# Run QA on ARL objects and produce to queue:
if ARGS.queues:
sip_queue.produce('qa', pickle.dumps(qa_image(image_template), protocol=2))
# Export the data to disk:
export_image_to_fits(image_template, '%s/Mean-%s.fits' % (MOMENTS_DIR, 'P'))
# Output standard deviation images:
# I:
image_template.data = stack[:, :, 0, :, :].std(axis=0).compute()
# Run QA on ARL objects and produce to queue:
if ARGS.queues:
sip_queue.produce('qa', pickle.dumps(qa_image(image_template), protocol=2))
# Export the data to disk:
export_image_to_fits(image_template, '%s/Std-%s.fits' % (MOMENTS_DIR, 'I'))
# Q:
image_template.data = stack[:, :, 1, :, :].std(axis=0).compute()
# Run QA on ARL objects and produce to queue:
if ARGS.queues:
sip_queue.produce('qa', pickle.dumps(qa_image(image_template), protocol=2))
# Export the data to disk:
export_image_to_fits(image_template, '%s/Std-%s.fits' % (MOMENTS_DIR, 'Q'))
# U:
image_template.data = stack[:, :, 2, :, :].std(axis=0).compute()
# Run QA on ARL objects and produce to queue:
if ARGS.queues:
sip_queue.produce('qa', pickle.dumps(qa_image(image_template), protocol=2))
# Export the data to disk:
export_image_to_fits(image_template, '%s/Std-%s.fits' % (MOMENTS_DIR, 'U'))
# P:
image_template.data = da.sqrt((da.square(stack[:, :, 1, :, :]) + \
da.square(stack[:, :, 2, :, :]))).std(axis=0).compute()
# Run QA on ARL objects and produce to queue:
if ARGS.queues:
sip_queue.produce('qa', pickle.dumps(qa_image(image_template), protocol=2))
# Export the data to disk:
export_image_to_fits(image_template, '%s/Std-%s.fits' % (MOMENTS_DIR, 'P'))
# Flush queue:
if ARGS.queues:
sip_queue.flush()
# Make a tarball of moment images:
subprocess.call(['tar', '-cvf', '%s/moment.tar' % (MOMENTS_DIR), '%s/' % (MOMENTS_DIR)])
subprocess.call(['gzip', '-9f', '%s/moment.tar' % (MOMENTS_DIR)])
endtime = t.time()
print(endtime-starttime)
# Define the arguments for the pipeline:
AP = argparse.ArgumentParser()
AP.add_argument('-d', '--daskaddress', help='Address of the Dask scheduler [default scheduler:8786]', default='scheduler:8786')
AP.add_argument('-c', '--channels', help='Number of channels to process [default 40]', default=40)
AP.add_argument('-inp', '--inputs', help='Input data directory [default /data/inputs]', default='/data/inputs')
AP.add_argument('-out', '--outputs', help='Output data directory [default /data/outputs]', default='/data/outputs')
AP.add_argument('-ms1', '--ms1', help='Measurement Set 1 [default sim-1.ms]', default='sim-1.ms')
AP.add_argument('-ms2', '--ms2', help='Measurement Set 2 [default sim-2.ms]', default='sim-2.ms')
AP.add_argument('-q', '--queues', help='Enable Queues? [default False]', default=True)
AP.add_argument('-p', '--plots', help='Output diagnostic plots? [default False]', default=False)
AP.add_argument('-2d', '--twod', help='2D imaging [True] or wstack imaging [False]? [default False]', default=False)
AP.add_argument('-uv', '--uvcut', help='Cut-off for the uv-data [default 450]', default=450.0)
AP.add_argument('-a', '--angres', help='Force the angular resolution to be consistent across the band, in arcmin FWHM [default 8.0]', default=8.0)
AP.add_argument('-pix', '--pixels', help='The number of pixels/sampling across the observing beam [default 5.0]', default=5.0)
AP.add_argument('-ins', '--inst', help='Instrument name (for future use) [default LOFAR]', default='LOFAR')
ARGS = AP.parse_args()
main(ARGS)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
834,
9800,
834,
796,
366,
48337,
49230,
274,
1,
198,
834,
12888,
834,
796,
366,
39159,
494,
13,
69,
1501,
274,
31,
78,
2798,
13,
1140,
13,
330,
13,
2724,
1,
198,
198,
11748,
... | 2.5866 | 4,209 |
import random
import numpy as np
import pytest
from maplayerpy.voronoi_layer import (VoronoiLayer, get_random_points_spaced,
get_random_voronoi_layer,
get_voronoi_layer)
@pytest.mark.voronoi_img
| [
11748,
4738,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
6738,
17266,
7829,
9078,
13,
20867,
261,
23013,
62,
29289,
1330,
357,
53,
273,
261,
23013,
49925,
11,
651,
62,
25120,
62,
13033,
62,
2777,
2286,
11,
1... | 1.849673 | 153 |
import calendar
from datetime import date
days = dict(zip(list(calendar.day_name),range(7)))
week_dict = {
"1st":0,
"2nd":1,
"3rd":2,
"4th":3,
"5th":4,
"teenth":[13,14,15,16,17,18,19]
}
| [
11748,
11845,
198,
6738,
4818,
8079,
1330,
3128,
198,
198,
12545,
796,
8633,
7,
13344,
7,
4868,
7,
9948,
9239,
13,
820,
62,
3672,
828,
9521,
7,
22,
22305,
198,
198,
10464,
62,
11600,
796,
1391,
198,
197,
1,
16,
301,
1298,
15,
11,
... | 1.960396 | 101 |
"""A (very) simple banning & rate limiting extension for Flask.
"""
from inspect import signature
import time
from collections import deque
from functools import wraps
from flask import request
| [
37811,
32,
357,
548,
8,
2829,
19479,
1222,
2494,
15637,
7552,
329,
46947,
13,
198,
37811,
198,
6738,
10104,
1330,
9877,
198,
11748,
640,
198,
6738,
17268,
1330,
390,
4188,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
198,
6738,
42903,... | 4.377778 | 45 |
from django.conf.urls import *
urlpatterns = patterns('messaging.views',
url(r'^contact/$', 'contact', name='contact'),
)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
1635,
198,
198,
6371,
33279,
82,
796,
7572,
10786,
37348,
3039,
13,
33571,
3256,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
32057,
32624,
3256,
705,
32057,
3256,
1438,
11639,
32057,
338... | 2.822222 | 45 |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 1 14:02:03 2021
@author: Hatlab_3
for getting many files out of a particular directory with Wolfie's directory structure'
"""
import easygui
import os
from plottr.apps.autoplot import autoplotDDH5, script, main
from plottr.data.datadict_storage import all_datadicts_from_hdf5
cwd = r'E:\Data\Cooldown_20210104\pitchfork_freq_sweeps\2_kappa'
res = find_all_ddh5(cwd)
for filename in res:
main(filename, 'data') | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
3158,
220,
352,
1478,
25,
2999,
25,
3070,
33448,
198,
198,
31,
9800,
25,
10983,
23912,
62,
18,
198,
198,
1640,
1972,
867,
3696,
503,
286,... | 2.634831 | 178 |
# -*- coding: utf-8 -*-
from fabric.api import env
from fabric.utils import puts
from fabric.colors import *
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
9664,
13,
15042,
1330,
17365,
198,
6738,
9664,
13,
26791,
1330,
7584,
198,
6738,
9664,
13,
4033,
669,
1330,
1635,
198
] | 3.027778 | 36 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-13 16:40
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
24,
13,
16,
319,
1584,
12,
2999,
12,
1485,
1467,
25,
1821,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
1... | 2.719298 | 57 |
"""Gremlin Server exceptions."""
__all__ = ("RequestError", "GremlinServerError")
| [
37811,
38,
17244,
9652,
13269,
526,
15931,
198,
198,
834,
439,
834,
796,
5855,
18453,
12331,
1600,
366,
38,
17244,
10697,
12331,
4943,
628,
628
] | 3.44 | 25 |
import nose.tools as nt
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
from treeano.sandbox.nodes import stochastic_pooling
fX = theano.config.floatX
| [
11748,
9686,
13,
31391,
355,
299,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
262,
5733,
198,
11748,
262,
5733,
13,
83,
22854,
355,
309,
198,
198,
11748,
5509,
5733,
198,
11748,
5509,
5733,
13,
77,
4147,
355,
256,
77,
198,
6... | 2.902778 | 72 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 21 12:32:00 2022
Author: Gianluca Bianco
"""
#################################################
# Libraries
#################################################
import sys, os
import functions as ft
from termcolor import colored
import numpy as np
from numpy import Infinity, pi
from arsenalgear.mathematics import Hermite, Chebyshev, Legendre, Laguerre
#################################################
# Main program
#################################################
if __name__ == "__main__":
main() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
2365,
2310,
1105,
25,
2624,
25,
405,
33160,
198,
13838,
25,
30851,
75,
43120,
41227,... | 3.835526 | 152 |
import pandas as pd
import json
from io import StringIO
df = pd.read_csv('data/2012_05_06.csv')
nan_index = df[(df['type'] == 'MemberEvent') & df.isnull().any(axis=1)].index
df = df.drop(nan_index)
df = df.apply(filter_data, axis=1)
print("\n Writing to CSV file...")
df.to_csv('filter/2012_05_06.csv', sep='\t')
print("\n Done!")
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
33918,
198,
6738,
33245,
1330,
10903,
9399,
198,
198,
7568,
796,
279,
67,
13,
961,
62,
40664,
10786,
7890,
14,
6999,
62,
2713,
62,
3312,
13,
40664,
11537,
198,
12647,
62,
9630,
796,
47764,
... | 2.470588 | 136 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
##
## Compatible with Python >= 2.6, < 3.*
##
##
## Created by Andrew Podkovyrin, 2018
## Copyright © 2018 Axe Foundation. All rights reserved.
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in
## all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
## THE SOFTWARE.
##
import os
import urllib2
import json
import plistlib
import socket
import struct
import sqlite3
# configuration
PLIST_PATH = '../AxeWallet/FixedPeers.plist'
API_HTTP_URL = 'https://www.axeninja.pl/data/masternodeslistfull-0.json'
LOCAL_MASTERNODES_FILE = 'masternodeslistfull-0.json'
SOCKET_CONNECTION_TIMEOUT = 3
MASTERNODE_DEFAULT_PORT = 9937
MASTERNODE_MIN_PROTOCOL = 70208
FIXED_PEERS_COUNT = 100
# global in-memory database
CONNECTION = sqlite3.connect(":memory:")
CONNECTION.executescript("""
CREATE TABLE masternodes (ip TEXT,
port INTEGER,
portcheck BOOLEAN,
countrycode TEXT,
activeseconds INTEGER,
protocol INTEGER
);
CREATE INDEX index_ip on masternodes (ip);
CREATE INDEX index_port on masternodes (port);
""")
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
2235,
198,
2235,
220,
3082,
16873,
351,
11361,
18189,
362,
13,
21,
11,
1279,
513,
15885,
198,
2235,
198,
2235,
198,
2235,
... | 2.780151 | 796 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# SKR03
# =====
# Dieses Modul bietet Ihnen einen deutschen Kontenplan basierend auf dem SKR03.
# Gemäss der aktuellen Einstellungen ist ein neues Unternehmen in Odoo
# Umsatzsteuerpflichtig. Zahlreiche Erlös- und Aufwandskonten enthalten
# bereits eine zugeordnete Steuer. Hierdurch wird für diese Konten bereits
# die richtige Vorsteuer (Eingangsrechnungen) bzw. Umsatzsteuer
# (Ausgangsrechnungen) automatisch ausgewählt.
#
# Die Zuordnung von Steuerkonten zu Produkten und / oder Sachkonten kann
# für den jeweiligen betrieblichen Anwendungszweck überarbeitet oder
# auch erweitert werden.
# Die mit diesem Kontenrahmen installierten Steuerschlüssel (z.B. 19%, 7%,
# steuerfrei) können hierzu bei den Produktstammdaten hinterlegt werden
# (in Abhängigkeit der Steuervorschriften). Die Zuordnung erfolgt auf
# dem Aktenreiter Finanzbuchhaltung (Kategorie: Umsatzsteuer / Vorsteuer).
# Die Zuordnung der Steuern für Ein- und Ausfuhren aus EU Ländern, sowie auch
# für den Ein- und Verkauf aus und in Drittländer sollten beim Partner
# (Lieferant / Kunde) hinterlegt werden (in Anhängigkeit vom Herkunftsland
# des Lieferanten/Kunden). Diese Zuordnung ist 'höherwertig' als
# die Zuordnung bei Produkten und überschreibt diese im Einzelfall.
#
# Zur Vereinfachung der Steuerausweise und Buchung bei Auslandsgeschäften
# erlaubt Odoo ein generelles Mapping von Steuerausweis und Steuerkonten
# (z.B. Zuordnung 'Umsatzsteuer 19%' zu 'steuerfreie Einfuhren aus der EU')
# zwecks Zuordnung dieses Mappings zum ausländischen Partner (Kunde/Lieferant).
{
'name': 'Germany SKR03 - Accounting',
'version': '3.0',
'author': 'openbig.org',
'website': 'http://www.openbig.org',
'category': 'Localization',
'description': """
Dieses Modul beinhaltet einen deutschen Kontenrahmen basierend auf dem SKR03.
==============================================================================
German accounting chart and localization.
""",
'depends': ['l10n_de'],
'data': [
'data/l10n_de_skr03_chart_data.xml',
'data/account.account.template.csv',
'data/l10n_de_skr03_chart_post_data.xml',
'data/account_data.xml',
'data/account_tax_fiscal_position_data.xml',
'data/account_reconcile_model_template.xml',
'data/account_chart_template_data.xml',
],
'auto_install': True
}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2142,
286,
10529,
2238,
13,
4091,
38559,
24290,
2393,
329,
1336,
6634,
290,
15665,
3307,
13,
198,
198,
2,
14277,
49,
3070,
198,
2,
29335,
198,
198,
2,
360,
444,
2... | 2.389105 | 1,028 |
from statistics import mean
n, a, b = [int(_) for _ in input().split()]
values = [int(v) for v in input().split()]
values.sort(reverse=True)
maximum_mean = mean(values[:a])
print(maximum_mean)
min_value = values[:a][-1]
accepted = values[:a].count(min_value)
full = values.count(min_value)
print(cmb(full, accepted))
| [
6738,
7869,
1330,
1612,
201,
198,
201,
198,
77,
11,
257,
11,
275,
796,
685,
600,
28264,
8,
329,
4808,
287,
5128,
22446,
35312,
3419,
60,
201,
198,
27160,
796,
685,
600,
7,
85,
8,
329,
410,
287,
5128,
22446,
35312,
3419,
60,
201,
... | 2.459854 | 137 |
from decimal import Decimal
from exchangelib.ewsdatetime import EWSDateTime, EWSTimeZone, UTC_NOW
from exchangelib.folders import Tasks
from exchangelib.items import Task
from .test_basics import CommonItemTest
| [
6738,
32465,
1330,
4280,
4402,
198,
198,
6738,
9933,
8368,
571,
13,
15515,
19608,
8079,
1330,
43255,
10305,
378,
7575,
11,
43255,
2257,
524,
26961,
11,
18119,
62,
45669,
198,
6738,
9933,
8368,
571,
13,
11379,
364,
1330,
309,
6791,
198,
... | 3.451613 | 62 |
# My Exception
print('hello')
raise Other('a') | [
2,
2011,
35528,
198,
4798,
10786,
31373,
11537,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198,
40225,
3819,
10786,
64,
11537
] | 2.391304 | 23 |
from statsmodels.tsa.stattools import adfuller,kpss
import warnings
def determine_interval(intervals,df,labels):
"""
@Author: Kenneth Brezinski, Department of Electrical and Computer Engineering
args:
df: pandas dataframe containing your time series
intervals: how many subintervals will be returned
labels: a (2,1) list containing the strings of your two dataframe columns, e.g. ['Time','Bits']
sensitivity: depending on the sensitivity of your distribution, may need to adjust to find indices
return:
appLists: a list of lists containing your dataframe separated into intervals
"""
appLists = []
init = 0
stopCond = max(df[labels[0]])/intervals
sensitivity = 1E-9
dfb = []
while len(dfb) != intervals-1:
print(len(dfb))
dfb = df[df[labels[0]] % stopCond < sensitivity].index
sensitivity *= 10
for item in dfb[1:]:
appLists.append(df[labels[1]][init:item])
init = item
return appLists
| [
198,
6738,
9756,
27530,
13,
912,
64,
13,
301,
1078,
10141,
1330,
512,
12853,
263,
11,
74,
79,
824,
198,
11748,
14601,
198,
198,
4299,
5004,
62,
3849,
2100,
7,
3849,
12786,
11,
7568,
11,
23912,
1424,
2599,
198,
220,
220,
220,
37227,
... | 2.712401 | 379 |
from annotation_utils.coco.structs import COCO_Dataset
dataset = COCO_Dataset.load_from_path('measure_coco/measure/output.json')
dataset.save_video(
save_path='merged_mask_measure_viz.mp4',
show_details=True,
fps=3
) | [
6738,
23025,
62,
26791,
13,
66,
25634,
13,
7249,
82,
1330,
327,
4503,
46,
62,
27354,
292,
316,
198,
198,
19608,
292,
316,
796,
327,
4503,
46,
62,
27354,
292,
316,
13,
2220,
62,
6738,
62,
6978,
10786,
1326,
5015,
62,
66,
25634,
14,... | 2.267327 | 101 |
import gzip
import pickle
import struct
from warnings import warn
import math
import os
from Chamaeleo.utils.monitor import Monitor
| [
11748,
308,
13344,
198,
11748,
2298,
293,
198,
11748,
2878,
198,
6738,
14601,
1330,
9828,
198,
198,
11748,
10688,
198,
11748,
28686,
198,
198,
6738,
609,
1689,
11129,
78,
13,
26791,
13,
41143,
1330,
18289,
628,
628,
628,
628,
628,
198
] | 3.512195 | 41 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from nose.plugins.attrib import attr
from hgvs.exceptions import HGVSValidationError
import hgvs.dataproviders.uta
import hgvs.variantmapper
import hgvs.parser
import hgvs.validator
db_dir = ['tests/data/sample_data']
hdp = hgvs.dataproviders.uta.connect()
class Test_HGVSValidator(unittest.TestCase):
"""Validator wrapper class tests (most testing is handled by the component classes)"""
def test_wrapper(self):
"""Test that validator wrapper is working"""
self.assertTrue(self.vr.validate(self.hp.parse_hgvs_variant('NM_001005405.2:c.6C>A')))
@attr(tags=["quick","validation"])
class Test_HGVSIntrinsicValidator(unittest.TestCase):
"""Tests for internal validation"""
def test_start_lte_end(self):
"""Test if start position is less <= end position"""
self.assertTrue(self.validate_int.validate(self.hp.parse_hgvs_variant('NC_000007.13:g.36561662C>T')))
self.assertTrue(self.validate_int.validate(self.hp.parse_hgvs_variant('NC_000007.13:g.36561662_36561663insT')))
self.assertTrue(self.validate_int.validate(self.hp.parse_hgvs_variant('NM_01234.5:c.76_77insT')))
self.assertTrue(self.validate_int.validate(self.hp.parse_hgvs_variant('AC_01234.5:c.123+54_123+55insT')))
self.assertTrue(self.validate_int.validate(self.hp.parse_hgvs_variant('AC_01234.5:c.123+54A>T')))
with self.assertRaisesRegexp(HGVSValidationError, hgvs.validator.BASE_RANGE_ERROR_MSG):
self.validate_int.validate(self.hp.parse_hgvs_variant('NC_000007.13:g.36561664_36561663A>T'))
with self.assertRaisesRegexp(HGVSValidationError, hgvs.validator.BASE_RANGE_ERROR_MSG):
self.validate_int.validate(self.hp.parse_hgvs_variant('NM_000277.1:c.*1_2delAG'))
with self.assertRaisesRegexp(HGVSValidationError, hgvs.validator.OFFSET_RANGE_ERROR_MSG):
self.validate_int.validate(self.hp.parse_hgvs_variant('AC_01234.5:c.123+56_123+55A>T'))
def test_ins_length_is_one(self):
"""Test if insertion length = 1"""
self.assertTrue(self.validate_int.validate(self.hp.parse_hgvs_variant('NC_000007.13:g.36561662_36561663insT')))
self.assertTrue(self.validate_int.validate(self.hp.parse_hgvs_variant('AC_01234.5:c.76_77insT')))
self.assertTrue(self.validate_int.validate(self.hp.parse_hgvs_variant('AC_01234.5:c.123+54_123+55insT')))
self.assertTrue(self.validate_int.validate(self.hp.parse_hgvs_variant('AC_01234.5:c.123-54_123-53insT')))
with self.assertRaisesRegexp(HGVSValidationError, hgvs.validator.INS_ERROR_MSG):
self.validate_int.validate(self.hp.parse_hgvs_variant('AC_01234.5:c.76_78insTT'))
with self.assertRaisesRegexp(HGVSValidationError, hgvs.validator.INS_ERROR_MSG):
self.validate_int.validate(self.hp.parse_hgvs_variant('AC_01234.5:c.123+54_123+56insT'))
def test_del_length(self):
"""Test if del length agrees with position range"""
self.assertTrue(self.validate_int.validate(self.hp.parse_hgvs_variant('AC_01234.5:c.76_78delACT')))
self.assertTrue(self.validate_int.validate(self.hp.parse_hgvs_variant('AC_01234.5:c.123+54_123+55delTA'))) # <-- haha "delta"
with self.assertRaisesRegexp(HGVSValidationError, hgvs.validator.DEL_ERROR_MSG):
self.validate_int.validate(self.hp.parse_hgvs_variant('AC_01234.5:c.76_78del'))
with self.assertRaisesRegexp(HGVSValidationError, hgvs.validator.DEL_ERROR_MSG):
self.validate_int.validate(self.hp.parse_hgvs_variant('AC_01234.5:c.76_78delACTACAT'))
def test_sub(self):
"""Test substitution ref != alt"""
self.assertTrue(self.validate_int.validate(self.hp.parse_hgvs_variant('NC_000007.13:g.36561662C>T')))
self.assertTrue(self.validate_int.validate(self.hp.parse_hgvs_variant('AC_01234.5:c.123+54A>T')))
with self.assertRaisesRegexp(HGVSValidationError, hgvs.validator.SUB_ERROR_MSG):
self.validate_int.validate(self.hp.parse_hgvs_variant('NC_000007.13:g.36561662_36561663T>T'))
@attr(tags=["validation"])
class Test_HGVSExtrinsicValidator(unittest.TestCase):
"""Tests for external validation"""
def test_valid_ref(self):
"""Test if reference seqeuence is valid. Uses sample_data in tests directory"""
self.assertTrue(self.validate_ext.validate(self.hp.parse_hgvs_variant('NM_001005405.2:c.6C>A')))
self.assertTrue(self.validate_ext.validate(self.hp.parse_hgvs_variant('NM_001005405.2:c.-38T>A')))
self.assertTrue(self.validate_ext.validate(self.hp.parse_hgvs_variant('NM_001005405.2:c.*3C>G')))
self.assertTrue(self.validate_ext.validate(self.hp.parse_hgvs_variant('NM_001005405.2:c.435_440delCTGCTG')))
#self.assertTrue(self.validate_ext.validate(self.hp.parse_hgvs_variant('NP_001005405.1:p.Gly2Ser')))
with self.assertRaisesRegexp(HGVSValidationError, hgvs.validator.SEQ_ERROR_MSG):
self.validate_ext.validate(self.hp.parse_hgvs_variant('NM_001005405.2:c.435_440delCTGCT'))
if __name__ == '__main__':
unittest.main()
## <LICENSE>
## Copyright 2014 HGVS Contributors (https://bitbucket.org/hgvs/hgvs)
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
## </LICENSE>
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
555,
715,
395,
198,
198,
6738,
9686,
13... | 2.291977 | 2,555 |
from qgis.core import *
from PyQt5.QtCore import *
# This is necessary for the processing to work ### Otherwise get "algorithm not found" error
from qgis.analysis import QgsNativeAlgorithms
import sys
import os
QgsApplication.setPrefixPath("/usr",True)
# Starts the application, with False to not launch the GUI
app = QgsApplication([], False)
app.initQgis()
sys.path.append('/usr/share/qgis/python/plugins')
# Import and initialize Processing framework
from processing.core.Processing import Processing
import processing
Processing.initialize()
QgsApplication.processingRegistry().addProvider(QgsNativeAlgorithms())
# Read config file
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
atlas_dataset = config['DEFAULT']['Atlas-Dataset'] # exact file path
shapefile = config['DEFAULT']['Shapefile'] # exact file path of shape file
output_file = config['DEFAULT']['Output-File']
shape_file_year = config['DEFAULT']['Shape-File-Year']
# This is the actual QGIS part of the script and it works as a python script in the QGIS GUI
uri = 'file://{}?d?delimiter=,type=csv&detectTypes=yes&xField=longtitude&yField=latitude&crs=EPSG:4326&spatialIndex=no&subsetIndex=no&watchFile=no'.format(atlas_dataset)
layer_csv = QgsVectorLayer(uri,'somename','delimitedtext')
if not layer_csv.isValid():
print('atlas dataset failed to load')
uri2 = shapefile
overlay_er = QgsVectorLayer(uri2,'somename2','ogr')
if not overlay_er.isValid():
print('sa2 shapefile layer failed to load')
if shape_file_year == "2016":
params = {
'INPUT_FIELDS' : [],\
'OUTPUT' : output_file,\
'OVERLAY' : overlay_er,\
'OVERLAY_FIELDS' : ['SA2_MAIN16','SA2_5DIG16','SA2_NAME16'],\
'INPUT' : layer_csv}
elif shape_file_year == "2011":
params = {
'INPUT_FIELDS' : [],\
'OUTPUT' : output_file,\
'OVERLAY' : overlay_er,\
'OVERLAY_FIELDS' : ['SA2_MAIN11','SA2_5DIG11','SA2_NAME11'],\
'INPUT' : layer_csv}
else:
print("shape file year is wrong")
processing.run("native:intersection",params)
app.exitQgis()
| [
6738,
10662,
70,
271,
13,
7295,
1330,
1635,
201,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
14055,
1330,
1635,
201,
198,
201,
198,
2,
770,
318,
3306,
329,
262,
7587,
284,
670,
44386,
15323,
651,
366,
282,
42289,
407,
1043,
1,
4049,
... | 2.576271 | 826 |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
| [
2,
1398,
7560,
416,
1024,
11008,
36,
3712,
17953,
5005,
11008,
3620,
375,
2261,
4863,
36392,
42,
10267,
198,
6738,
8265,
62,
74,
896,
13,
85,
30488,
62,
15813,
13,
19816,
1040,
1330,
17427,
36392,
42,
9487,
26796,
14881,
198,
11748,
4... | 3.090909 | 44 |
#!/usr/bin/env
# author: stephanie hyland
# make a list of PIDS with non-empty, non-all-unknown endpoint data
import pandas as pd
import glob
import ipdb
import paths
def check_list():
"""
slow but w/e
"""
pids_with_endpoints = open(paths.derived_dir + 'patients_with_endpoints_v6b.txt', 'r')
pids_without_endpoints = open(paths.derived_dir + 'patients_without_endpoints_v6b.txt', 'r')
for line in pids_with_endpoints:
pid, f = line.strip('\n').split(',')
pdf = pd.read_hdf(f, where='PatientID == ' + str(pid), columns=['PatientID', 'endpoint_status'])
assert pdf.shape[0] > 0
if len(pdf['endpoint_status'].unique()) < 2:
assert not 'unknown' in pdf['endpoint_status'].unique()
for line in pids_with_endpoints:
pid, f = line.strip('\n').split(',')
pdf = pd.read_hdf(f, where='PatientID == ' + str(pid), columns=['PatientID', 'endpoint_status'])
if not pdf.shape[0] == 0:
assert len(pdf['endpoint_status'].unique()) == 1
assert 'unknown' in pdf['endpoint_status'].unique()
print('all good')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
198,
2,
1772,
25,
2239,
7637,
494,
2537,
1044,
198,
2,
787,
257,
1351,
286,
350,
14255,
351,
1729,
12,
28920,
11,
1729,
12,
439,
12,
34680,
36123,
1366,
198,
198,
11748,
19798,
292,
355,
279,
... | 2.331942 | 479 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the cloudlib.project module."""
import subprocess
from unittest import mock
from absl.testing import absltest
from googleapiclient import errors
from django_cloud_deploy.cloudlib import project
from django_cloud_deploy.tests.unit.cloudlib.lib import http_fake
class OrganizationsFake(object):
"""A fake object returned by ...organizations()."""
class ProjectsFake(object):
"""A fake object returned by ...projects()."""
class ServiceFake:
"""A fake Resource returned by discovery.build('cloudresourcemanager', .."""
class ProjectClientTestCase(absltest.TestCase):
"""Test case for project.ProjectClient."""
@mock.patch('subprocess.check_call')
@mock.patch('subprocess.check_call')
@mock.patch('subprocess.check_call')
| [
2,
15069,
2864,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330... | 3.546174 | 379 |
from couchbase_core._ixmgmt import N1QL_PRIMARY_INDEX, IxmgmtRequest, N1qlIndex
from couchbase_core.bucketmanager import BucketManager
from couchbase.options import OptionBlock, OptionBlockTimeOut, forward_args, timedelta
from typing import *
from couchbase.management.generic import GenericManager
import attr
from attr.validators import instance_of as io, deep_mapping as dm
from couchbase_core._pyport import Protocol
from couchbase.exceptions import HTTPException, ErrorMapper, AnyPattern, QueryIndexAlreadyExistsException, \
QueryIndexNotFoundException, DocumentNotFoundException, DocumentExistsException
@QueryErrorMapper.wrap
@attr.s
class QueryIndex(Protocol):
"""The QueryIndex protocol provides a means of mapping a query index into an object."""
name = attr.ib(validator=io(str)) # type: str
is_primary = attr.ib(validator=io(bool)) # type: bool
type = attr.ib(validator=io(IndexType), type=IndexType) # type: IndexType
state = attr.ib(validator=io(str)) # type: str
keyspace = attr.ib(validator=io(str)) # type: str
index_key = attr.ib(validator=io(Iterable)) # type: Iterable[str]
condition = attr.ib(validator=io(str)) # type: str
@classmethod
| [
6738,
18507,
8692,
62,
7295,
13557,
844,
11296,
16762,
1330,
399,
16,
9711,
62,
4805,
3955,
13153,
62,
12115,
6369,
11,
314,
87,
11296,
16762,
18453,
11,
399,
16,
13976,
15732,
198,
6738,
18507,
8692,
62,
7295,
13,
27041,
316,
37153,
... | 3.062814 | 398 |
__bootstrap__()
| [
834,
18769,
26418,
834,
3419,
198
] | 2.666667 | 6 |
"""construct the sampler"""
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from collections import defaultdict
import numpy as np
import mindspore.dataset as ds
class RandomIdentitySampler(ds.Sampler):
"""
Randomly sample N identities, then for each identity,
randomly sample K instances, therefore batch size is N*K
Args:
data_source (Dataset): dataset to sample from.
num_instances (int): number of instances per identity.
"""
| [
37811,
41571,
262,
6072,
20053,
37811,
198,
2,
15069,
33448,
43208,
21852,
1766,
1539,
12052,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
... | 3.785235 | 298 |
import keras
from keras.datasets import mnist
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPool2D
from keras import backend as K
# the datam split between train and test
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print(X_train.shape, y_train.shape)
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)
input_shape = (28, 28, 1)
# convert class vectors to binary class metrics
num_classes = 10
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255.0
X_test /= 255.0
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train sample')
print(X_test.shape[0], 'test sample')
batch_size = 128
epochs = 10
model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), activation='relu', input_shape=input_shape))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'])
hist = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs,verbose=1,validation_data=(X_test, y_test))
print('The model has successfully trained')
model.save('mnist.h5')
print('Saving the model as mnist.h5')
score = model.evaluate(X_test, y_test, verbose=0)
print('Test loss :', score[0])
print('Test accuracy :', score[1]) | [
11748,
41927,
292,
198,
6738,
41927,
292,
13,
19608,
292,
1039,
1330,
285,
77,
396,
198,
6738,
41927,
292,
13,
26791,
13,
37659,
62,
26791,
1330,
284,
62,
66,
2397,
12409,
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
1843,
198,
6738... | 2.570588 | 680 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='xrftomo',
author='Fabricio Marin, Chris Roehrig, Arthur Glowacki, Francesco De Carlo, Si Chen',
packages=find_packages(),
version=open('VERSION').read().strip(),
description = 'Pre-processing tools for x-ray fluorescence.',
license='BSD-3',
platforms='Any',
scripts=['xrftomo/__main__.py'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: BSD-3',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.7'
'Programming Language :: Python :: 3.9'
],
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
8... | 2.578313 | 332 |
"""Classes that replace tkinter gui objects used by an object being tested.
A gui object jest anything przy a master albo parent parameter, which jest
typically required w spite of what the doc strings say.
"""
klasa Event:
'''Minimal mock przy attributes dla testing event handlers.
This jest nie a gui object, but jest used jako an argument dla callbacks
that access attributes of the event dalejed. If a callback ignores
the event, other than the fact that jest happened, dalej 'event'.
Keyboard, mouse, window, oraz other sources generate Event instances.
Event instances have the following attributes: serial (number of
event), time (of event), type (of event jako number), widget (in which
event occurred), oraz x,y (position of mouse). There are other
attributes dla specific events, such jako keycode dla key events.
tkinter.Event.__doc__ has more but jest still nie complete.
'''
def __init__(self, **kwds):
"Create event przy attributes needed dla test"
self.__dict__.update(kwds)
klasa Var:
"Use dla String/Int/BooleanVar: incomplete"
klasa Mbox_func:
"""Generic mock dla messagebox functions, which all have the same signature.
Instead of displaying a message box, the mock's call method saves the
arguments jako instance attributes, which test functions can then examime.
The test can set the result returned to ask function
"""
klasa Mbox:
"""Mock dla tkinter.messagebox przy an Mbox_func dla each function.
This module was 'tkMessageBox' w 2.x; hence the 'zaimportuj as' w 3.x.
Example usage w test_module.py dla testing functions w module.py:
---
z idlelib.idle_test.mock_tk zaimportuj Mbox
zaimportuj module
orig_mbox = module.tkMessageBox
showerror = Mbox.showerror # example, dla attribute access w test methods
klasa Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
module.tkMessageBox = Mbox
@classmethod
def tearDownClass(cls):
module.tkMessageBox = orig_mbox
---
For 'ask' functions, set func.result zwróć value before calling the method
that uses the message function. When tkMessageBox functions are the
only gui alls w a method, this replacement makes the method gui-free,
"""
askokcancel = Mbox_func() # Prawda albo Nieprawda
askquestion = Mbox_func() # 'yes' albo 'no'
askretrycancel = Mbox_func() # Prawda albo Nieprawda
askyesno = Mbox_func() # Prawda albo Nieprawda
askyesnocancel = Mbox_func() # Prawda, Nieprawda, albo Nic
showerror = Mbox_func() # Nic
showinfo = Mbox_func() # Nic
showwarning = Mbox_func() # Nic
z _tkinter zaimportuj TclError
klasa Text:
"""A semi-functional non-gui replacement dla tkinter.Text text editors.
The mock's data mousuń jest that a text jest a list of \n-terminated lines.
The mock adds an empty string at the beginning of the list so that the
index of actual lines start at 1, jako przy Tk. The methods never see this.
Tk initializes files przy a terminal \n that cannot be deleted. It jest
invisible w the sense that one cannot move the cursor beyond it.
This klasa jest only tested (and valid) przy strings of ascii chars.
For testing, we are nie concerned przy Tk Text's treatment of,
dla instance, 0-width characters albo character + accent.
"""
def __init__(self, master=Nic, cnf={}, **kw):
'''Initialize mock, non-gui, text-only Text widget.
At present, all args are ignored. Almost all affect visual behavior.
There are just a few Text-only options that affect text behavior.
'''
self.data = ['', '\n']
def index(self, index):
"Return string version of index decoded according to current text."
zwróć "%s.%s" % self._decode(index, endflag=1)
spróbuj:
index=index.lower()
wyjąwszy AttributeError:
podnieś TclError('bad text index "%s"' % index) z Nic
lastline = len(self.data) - 1 # same jako number of text lines
jeżeli index == 'insert':
zwróć lastline, len(self.data[lastline]) - 1
albo_inaczej index == 'end':
zwróć self._endex(endflag)
line, char = index.split('.')
line = int(line)
# Out of bounds line becomes first albo last ('end') index
jeżeli line < 1:
zwróć 1, 0
albo_inaczej line > lastline:
zwróć self._endex(endflag)
linelength = len(self.data[line]) -1 # position before/at \n
jeżeli char.endswith(' lineend') albo char == 'end':
zwróć line, linelength
# Tk requires that ignored chars before ' lineend' be valid int
# Out of bounds char becomes first albo last index of line
char = int(char)
jeżeli char < 0:
char = 0
albo_inaczej char > linelength:
char = linelength
zwróć line, char
def _endex(self, endflag):
'''Return position dla 'end' albo line overflow corresponding to endflag.
-1: position before terminal \n; dla .insert(), .delete
0: position after terminal \n; dla .get, .delete index 1
1: same viewed jako beginning of non-existent next line (dla .index)
'''
n = len(self.data)
jeżeli endflag == 1:
zwróć n, 0
inaczej:
n -= 1
zwróć n, len(self.data[n]) + endflag
def insert(self, index, chars):
"Insert chars before the character at index."
jeżeli nie chars: # ''.splitlines() jest [], nie ['']
zwróć
chars = chars.splitlines(Prawda)
jeżeli chars[-1][-1] == '\n':
chars.append('')
line, char = self._decode(index, -1)
before = self.data[line][:char]
after = self.data[line][char:]
self.data[line] = before + chars[0]
self.data[line+1:line+1] = chars[1:]
self.data[line+len(chars)-1] += after
def get(self, index1, index2=Nic):
"Return slice z index1 to index2 (default jest 'index1+1')."
startline, startchar = self._decode(index1)
jeżeli index2 jest Nic:
endline, endchar = startline, startchar+1
inaczej:
endline, endchar = self._decode(index2)
jeżeli startline == endline:
zwróć self.data[startline][startchar:endchar]
inaczej:
lines = [self.data[startline][startchar:]]
dla i w range(startline+1, endline):
lines.append(self.data[i])
lines.append(self.data[endline][:endchar])
zwróć ''.join(lines)
def delete(self, index1, index2=Nic):
'''Delete slice z index1 to index2 (default jest 'index1+1').
Adjust default index2 ('index+1) dla line ends.
Do nie delete the terminal \n at the very end of self.data ([-1][-1]).
'''
startline, startchar = self._decode(index1, -1)
jeżeli index2 jest Nic:
jeżeli startchar < len(self.data[startline])-1:
# nie deleting \n
endline, endchar = startline, startchar+1
albo_inaczej startline < len(self.data) - 1:
# deleting non-terminal \n, convert 'index1+1 to start of next line
endline, endchar = startline+1, 0
inaczej:
# do nie delete terminal \n jeżeli index1 == 'insert'
zwróć
inaczej:
endline, endchar = self._decode(index2, -1)
# restricting end position to insert position excludes terminal \n
jeżeli startline == endline oraz startchar < endchar:
self.data[startline] = self.data[startline][:startchar] + \
self.data[startline][endchar:]
albo_inaczej startline < endline:
self.data[startline] = self.data[startline][:startchar] + \
self.data[endline][endchar:]
startline += 1
dla i w range(startline, endline+1):
usuń self.data[startline]
def compare(self, index1, op, index2):
line1, char1 = self._decode(index1)
line2, char2 = self._decode(index2)
jeżeli op == '<':
zwróć line1 < line2 albo line1 == line2 oraz char1 < char2
albo_inaczej op == '<=':
zwróć line1 < line2 albo line1 == line2 oraz char1 <= char2
albo_inaczej op == '>':
zwróć line1 > line2 albo line1 == line2 oraz char1 > char2
albo_inaczej op == '>=':
zwróć line1 > line2 albo line1 == line2 oraz char1 >= char2
albo_inaczej op == '==':
zwróć line1 == line2 oraz char1 == char2
albo_inaczej op == '!=':
zwróć line1 != line2 albo char1 != char2
inaczej:
podnieś TclError('''bad comparison operator "%s":'''
'''must be <, <=, ==, >=, >, albo !=''' % op)
# The following Text methods normally do something oraz zwróć Nic.
# Whether doing nothing jest sufficient dla a test will depend on the test.
def mark_set(self, name, index):
"Set mark *name* before the character at index."
dalej
def mark_unset(self, *markNames):
"Delete all marks w markNames."
def tag_remove(self, tagName, index1, index2=Nic):
"Remove tag tagName z all characters between index1 oraz index2."
dalej
# The following Text methods affect the graphics screen oraz zwróć Nic.
# Doing nothing should always be sufficient dla tests.
def scan_dragto(self, x, y):
"Adjust the view of the text according to scan_mark"
def scan_mark(self, x, y):
"Remember the current X, Y coordinates."
def see(self, index):
"Scroll screen to make the character at INDEX jest visible."
dalej
# The following jest a Misc method inherited by Text.
# It should properly go w a Misc mock, but jest included here dla now.
def bind(sequence=Nic, func=Nic, add=Nic):
"Bind to this widget at event sequence a call to function func."
dalej
| [
37811,
9487,
274,
326,
6330,
256,
74,
3849,
11774,
5563,
973,
416,
281,
2134,
852,
6789,
13,
198,
198,
32,
11774,
2134,
474,
395,
1997,
778,
7357,
257,
4958,
435,
2127,
2560,
11507,
11,
543,
474,
395,
198,
48126,
2672,
266,
15275,
2... | 2.333105 | 4,374 |
#__all__ = ['nt_domain_controller',
# 'simplemysqlabstractionlayer',
# ]
| [
2,
834,
439,
834,
796,
37250,
429,
62,
27830,
62,
36500,
3256,
220,
201,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
705,
36439,
28744,
13976,
397,
301,
7861,
29289,
3256,
201,
198,
2,
220,
220,
220,
220,
220,
220,
... | 1.901961 | 51 |
import math
from mesa import Agent
class Citizen(Agent):
"""
A member of the general population, may or may not be in active rebellion.
Summary of rule: If grievance - risk > threshold, rebel.
Attributes:
unique_id: unique int
x, y: Grid coordinates
hardship: Agent's 'perceived hardship (i.e., physical or economic
privation).' Exogenous, drawn from U(0,1).
regime_legitimacy: Agent's perception of regime legitimacy, equal
across agents. Exogenous.
risk_aversion: Exogenous, drawn from U(0,1).
threshold: if (grievance - (risk_aversion * arrest_probability)) >
threshold, go/remain Active
vision: number of cells in each direction (N, S, E and W) that agent
can inspect
condition: Can be "Quiescent" or "Active;" deterministic function of
greivance, perceived risk, and
grievance: deterministic function of hardship and regime_legitimacy;
how aggrieved is agent at the regime?
arrest_probability: agent's assessment of arrest probability, given
rebellion
"""
def __init__(
self,
unique_id,
model,
pos,
hardship,
regime_legitimacy,
risk_aversion,
threshold,
vision,
):
"""
Create a new Citizen.
Args:
unique_id: unique int
x, y: Grid coordinates
hardship: Agent's 'perceived hardship (i.e., physical or economic
privation).' Exogenous, drawn from U(0,1).
regime_legitimacy: Agent's perception of regime legitimacy, equal
across agents. Exogenous.
risk_aversion: Exogenous, drawn from U(0,1).
threshold: if (grievance - (risk_aversion * arrest_probability)) >
threshold, go/remain Active
vision: number of cells in each direction (N, S, E and W) that
agent can inspect. Exogenous.
model: model instance
"""
super().__init__(unique_id, model)
self.breed = "citizen"
self.pos = pos
self.hardship = hardship
self.regime_legitimacy = regime_legitimacy
self.risk_aversion = risk_aversion
self.threshold = threshold
self.condition = "Quiescent"
self.vision = vision
self.jail_sentence = 0
self.grievance = self.hardship * (1 - self.regime_legitimacy)
self.arrest_probability = None
def step(self):
"""
Decide whether to activate, then move if applicable.
"""
if self.jail_sentence:
self.jail_sentence -= 1
return # no other changes or movements if agent is in jail.
self.update_neighbors()
self.update_estimated_arrest_probability()
net_risk = self.risk_aversion * self.arrest_probability
if (
self.condition == "Quiescent"
and (self.grievance - net_risk) > self.threshold
):
self.condition = "Active"
elif (
self.condition == "Active" and (self.grievance - net_risk) <= self.threshold
):
self.condition = "Quiescent"
if self.model.movement and self.empty_neighbors:
new_pos = self.random.choice(self.empty_neighbors)
self.model.grid.move_agent(self, new_pos)
def update_neighbors(self):
"""
Look around and see who my neighbors are
"""
self.neighborhood = self.model.grid.get_neighborhood(
self.pos, moore=False, radius=1
)
self.neighbors = self.model.grid.get_cell_list_contents(self.neighborhood)
self.empty_neighbors = [
c for c in self.neighborhood if self.model.grid.is_cell_empty(c)
]
def update_estimated_arrest_probability(self):
"""
Based on the ratio of cops to actives in my neighborhood, estimate the
p(Arrest | I go active).
"""
cops_in_vision = len([c for c in self.neighbors if c.breed == "cop"])
actives_in_vision = 1.0 # citizen counts herself
for c in self.neighbors:
if (
c.breed == "citizen"
and c.condition == "Active"
and c.jail_sentence == 0
):
actives_in_vision += 1
self.arrest_probability = 1 - math.exp(
-1 * self.model.arrest_prob_constant * (cops_in_vision / actives_in_vision)
)
class Cop(Agent):
"""
A cop for life. No defection.
Summary of rule: Inspect local vision and arrest a random active agent.
Attributes:
unique_id: unique int
x, y: Grid coordinates
vision: number of cells in each direction (N, S, E and W) that cop is
able to inspect
"""
def __init__(self, unique_id, model, pos, vision):
"""
Create a new Cop.
Args:
unique_id: unique int
x, y: Grid coordinates
vision: number of cells in each direction (N, S, E and W) that
agent can inspect. Exogenous.
model: model instance
"""
super().__init__(unique_id, model)
self.breed = "cop"
self.pos = pos
self.vision = vision
def step(self):
"""
Inspect local vision and arrest a random active agent. Move if
applicable.
"""
self.update_neighbors()
active_neighbors = []
for agent in self.neighbors:
if (
agent.breed == "citizen"
and agent.condition == "Active"
and agent.jail_sentence == 0
):
active_neighbors.append(agent)
if active_neighbors:
arrestee = self.random.choice(active_neighbors)
sentence = self.random.randint(0, self.model.max_jail_term)
arrestee.jail_sentence = sentence
if self.model.movement and self.empty_neighbors:
new_pos = self.random.choice(self.empty_neighbors)
self.model.grid.move_agent(self, new_pos)
def update_neighbors(self):
"""
Look around and see who my neighbors are.
"""
self.neighborhood = self.model.grid.get_neighborhood(
self.pos, moore=False, radius=1
)
self.neighbors = self.model.grid.get_cell_list_contents(self.neighborhood)
self.empty_neighbors = [
c for c in self.neighborhood if self.model.grid.is_cell_empty(c)
]
| [
11748,
10688,
198,
198,
6738,
18842,
64,
1330,
15906,
628,
198,
4871,
22307,
7,
36772,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
317,
2888,
286,
262,
2276,
3265,
11,
743,
393,
743,
407,
307,
287,
4075,
21540,
13,
198,
220... | 2.15199 | 3,066 |
from modAL.uncertainty import (entropy_sampling,
uncertainty_sampling,
margin_sampling)
from asr.query_strategies.random_sampling import random_sampling
| [
198,
6738,
953,
1847,
13,
19524,
1425,
774,
1330,
357,
298,
28338,
62,
37687,
11347,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.972727 | 110 |
from typing import List, Tuple
from ..builtin_utils import BuiltinModule, Fail, make_simple
from ..types import Value, Stack, Str, Atom, Vec
module = BuiltinModule("streams")
T, V, S = Tuple, Value, Stack
@module.register_simple('str->stream')
@module.register_simple('list->stream') | [
6738,
19720,
1330,
7343,
11,
309,
29291,
198,
198,
6738,
11485,
18780,
259,
62,
26791,
1330,
28477,
259,
26796,
11,
18448,
11,
787,
62,
36439,
198,
6738,
11485,
19199,
1330,
11052,
11,
23881,
11,
4285,
11,
33102,
11,
38692,
198,
198,
... | 3.233333 | 90 |
from openvino.inference_engine import IECore
import cv2
import numpy as np | [
6738,
1280,
85,
2879,
13,
259,
4288,
62,
18392,
1330,
314,
2943,
382,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941
] | 3.083333 | 24 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from ddv_global import client, reply_keyboard, keyboard, pk_keyboard
from ddv_waitforconfirmation import wait_for_confirmation
from algosdk.future.transaction import AssetTransferTxn
from telegram.inline.inlinekeyboardbutton import InlineKeyboardButton
from telegram.inline.inlinekeyboardmarkup import InlineKeyboardMarkup
from algosdk import account, mnemonic, transaction
import logging
import time
import os
from dotenv import load_dotenv
load_dotenv()
user_d = {}
dispensed = tuple()
test_dispenser = os.getenv('DEFAULT2_ACCOUNT')
mn = os.getenv('DEFAULT2_MNEMONIC')
def query_balance(update, context):
"""
Check balance on an account's public key
:param update:
:param context:
:return: Balance in account plus asset (s) balance
"""
if 'public_key' in context.user_data:
pk = context.user_data['public_key']
update.message.reply_text("Getting the balance on this address_sending ==> {}.".format(pk))
if len(pk) == 58:
account_bal = client.account_info(pk)
bal = account_bal['amount']/1000000
update.message.reply_text(
"Balance on your account: {} Algo."
"".format(bal), reply_markup=reply_keyboard(update, context, keyboard))
for m in account_bal['assets']:
update.message.reply_text("Asset balance: {} 'DMT2'. \nClick /Menu to go the main menu.".format(m['amount']))
context.user_data.clear()
else:
update.message.reply_text("Wrong address_sending supplied.\nNo changes has been made.")
else:
update.message.reply_text("Something went wrong")
return -1
def get_test_account(update, context):
"""
Create new public/private key pair
Returns the result of generating an account to user:
:param update:
:param context:
:return: 1). An Algorand address, 2). A mnemonic seed phrase
"""
global mn
update.message.reply_text("Swift!\nYour keys are ready: \n")
try:
sk, pk = account.generate_account()
mn = mnemonic.from_private_key(sk)
address = account.address_from_private_key(sk)
update.message.reply_text("Account address/Public key: {}\n\n"
"Private Key: {}\n\n"
"Mnemonic:\n {}\n\n"
"I do not hold or manage your keys."
"".format(address, sk, mn)
)
context.user_data['default_pk'] = pk
update.message.reply_text('To test if your address works fine, copy your address, and visit:\n ')
key_board = [[InlineKeyboardButton(
"DISPENSER", 'https://bank.testnet.algorand.network/', callback_data='1'
)]]
dispenser = InlineKeyboardMarkup(key_board)
update.message.reply_text('the dispenser to get some Algos\nSession ended.'
'Click /start to begin.', reply_markup=dispenser)
context.user_data.clear()
except Exception as e:
update.message.reply_text('Account creation error.')
return e
def getPK(update, context):
"""
Takes in 25 mnemonic and converts to private key
:param context:
:param update:
:return: 25 mnemonic words
# """
if 'Mnemonic' in context.user_data:
mn = context.user_data['Mnemonic']
phrase = mnemonic.to_private_key(str(mn))
update.message.reply_text(
"Your Private Key:\n {}\n\nKeep your key from prying eyes.\n"
"\n\nI do not hold or manage your keys.".format(phrase), reply_markup=reply_keyboard(
update, context, pk_keyboard
)
)
update.message.reply_text('\nSession ended.')
del context.user_data['Mnemonic']
else:
update.message.reply_text("Cannot find Mnemonic.")
context.user_data.clear()
# First time account to opt in for an ASA asset
def optin(update, context):
"""
Checks if user already optin for an ASA,
subscribes users if condition is false.
:param update:
:param context:
:param recipient: public key of subscriber
:param sk: Signature of subscriber
:return: true if success.
"""
sk = context.user_data['Signing_key']
recipient = context.user_data['address']
asset_id = 13251912
params = client.suggested_params()
# Check if recipient holding DMT2 asset prior to opt-in
account_info_pk = client.account_info(recipient)
holding = None
for assetinfo in account_info_pk['assets']:
scrutinized_asset = assetinfo['asset-id']
if asset_id == scrutinized_asset:
holding = True
msg = "This address has opted in for DMT2, ID {}".format(asset_id)
logging.info("Message: {}".format(msg))
logging.captureWarnings(True)
break
if not holding:
# Use the AssetTransferTxn class to transfer assets and opt-in
txn = AssetTransferTxn(sender=recipient,
sp=params,
receiver=recipient,
amt=0,
index=asset_id)
# Sign the transaction
# Firstly, convert mnemonics to private key.
# For tutorial purpose, we will focus on using private key
# sk = mnemonic.to_private_key(seed)
sendTrxn = txn.sign(sk)
# Submit transaction to the network
txid = client.send_transaction(sendTrxn)
message = "Transaction was signed with: {}.".format(txid)
wait = wait_for_confirmation(update, context, client, txid)
time.sleep(2)
hasOptedIn = bool(wait is not None)
if hasOptedIn:
return update.message.reply_text("Opt in success\n{}".format(message))
def dispense(update, context):
"""
Transfer a custom asset from default account A to account B (Any)
:param update: Default telegram argument
:param context: Same as update
:return:
"""
time.sleep(5)
global dispensed
global mn
global test_dispenser
update.message.reply_text('Sending you some test token....')
to = context.user_data['address']
params = client.suggested_params()
params.flat_fee = True
note = "Thank you for helping in testing this program".encode('utf-8')
# optin(update, context)
time.sleep(4)
# try:
trxn = transaction.AssetTransferTxn(
test_dispenser,
params.fee,
params.first,
params.last,
params.gh,
to,
amt=200,
index=13251912,
close_assets_to=None,
note=note,
gen=params.gen,
flat_fee=params.flat_fee,
lease=None,
rekey_to=None
)
# Sign the transaction
k = mnemonic.to_private_key(mn)
signed_txn = trxn.sign(k)
# Submit transaction to the network
tx_id = client.send_transaction(signed_txn)
wait_for_confirmation(update, context, client, tx_id)
update.message.reply_text("Yummy! I just sent you 200 DMT2...\nCheck the explorer for txn info.\n"
"" 'Hash: ' f'{tx_id}' 'Explorer: ''https://algoexplorer.io')
dispensed = dispensed + (to,)
logging.info(
"...##Asset Transfer... \nReceiving account: {}.\nOperation: {}.\nTxn Hash: {}"
.format(to, dispense.__name__, tx_id))
update.message.reply_text("Successful! \nTransaction hash: {}".format(tx_id))
context.user_data.clear()
# except Exception as err:
# return err
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
49427,
85,
62,
20541,
1330,
5456,
11,
10971,
62,
2539,
3526,
11,
10586,
11,
279,
74,
62,
2539,
3526,
198,
... | 2.308646 | 3,308 |
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
from database_actions import get_known_encodings
import sklearn.decomposition as deco
## Helper function for plotting a 2D Gaussian
## Load data
data = get_known_encodings()[0].transpose()
x = (data - np.mean(data, 0)) / np.std(data, 0)
pca = deco.PCA(2)
data = pca.fit(x).transform(x)
N, D = data.shape
## Initialize parameters
K = 5; # clusters
mu = []
Sigma = []
pi_k = np.ones(K)/K
for _ in range(K):
# Let mu_k be a random data point:
mu.append(data[np.random.choice(N)])
# Let Sigma_k be the identity matrix:
Sigma.append(np.eye(D))
## Loop until you're happy
max_iter = 100;
log_likelihood = np.zeros(max_iter)
respons = np.zeros((K, N)) # KxN
for iteration in range(max_iter):
## Compute responsibilities
for k in range(K):
respons[k] = pi_k[k] * multivariate_normal.pdf(data, mean=mu[k], cov=Sigma[k])
respons /= np.sum(respons, axis=0)
## Update parameters
for k in range(K):
respons_k = respons[k] # N
Nk = np.sum(respons_k) # scalar
mu[k] = respons_k.dot(data) / Nk # D
delta = data - mu[k] # NxD
Sigma[k] = (respons_k * delta.T).dot(delta) / Nk # DxD
pi_k[k] = Nk / N
## Compute log-likelihood of data
L = 0
for k in range(K):
L += pi_k[k] * multivariate_normal.pdf(data, mean=mu[k], cov=Sigma[k])
log_likelihood[iteration] = np.sum(np.log(L))
## Plot log-likelihood -- did we converge?
plt.figure(1)
plt.plot(log_likelihood)
plt.xlabel('Iterations')
plt.ylabel('Log-likelihood')
plt.savefig("figures/em_loglikelihood")
## Plot data
plt.figure(2)
plt.plot(data[:, 0], data[:, 1], '.')
for k in range(K):
plot_normal(mu[k], Sigma[k])
plt.savefig("figures/em_algorithm_clusters")
plt.show() | [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
629,
541,
88,
13,
34242,
1330,
1963,
42524,
62,
11265,
198,
6738,
6831,
62,
4658,
1330,
651,
62,
4002,
62,
12685,
375,
654,
198,
... | 2.4 | 745 |
# Copyright 2021 The Distla Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Unit tests for probabilityfunctions.py."""
import jax
import jax.numpy as jnp
import numpy as np
import scipy.stats
import pytest
from distla_core.root_solution_unfold import operators
from distla_core.root_solution_unfold import test_utils
from distla_core.root_solution_unfold import probabilityfunctions
from distla_core.linalg.utils import testutils as la_testutils
from distla_core.utils import config
from distla_core.utils import pops
# The parameters used by most tests.
DTYPES = (jnp.float32,)
PRECISIONS = (jax.lax.Precision.HIGHEST,)
SEEDS = (0,)
NUMS_GLOBAL_DISCRETEDS = (0, 1, 2, 3)
SYSTEM_SIZES = tuple(range(12, 18))
BOOLS = (True, False)
def _complex_dtype(dtype):
"""Get the complex version of a real dtype, e.g. float32 -> complex64.
"""
if dtype == jnp.float32:
complex_dtype = jnp.complex64
elif dtype == jnp.float64:
complex_dtype = jnp.complex128
else:
msg = f"Don't know what the complex version of {dtype} is."
raise ValueError(msg)
return complex_dtype
@pytest.mark.parametrize("n_discretes", SYSTEM_SIZES)
@pytest.mark.parametrize("n_global_discretes", NUMS_GLOBAL_DISCRETEDS)
@pytest.mark.parametrize("dtype", DTYPES)
def test_all_ones_state(n_discretes, n_global_discretes, dtype):
"""Creates an all-ones state, compares to numpy."""
expected = np.ones((2**n_discretes,), dtype=dtype)
state = probabilityfunctions.all_ones_state(n_discretes, n_global_discretes, dtype)
state_np = np.array(state).reshape((2**n_discretes,))
np.testing.assert_allclose(state_np, expected)
@pytest.mark.parametrize("n_discretes", SYSTEM_SIZES)
@pytest.mark.parametrize("n_global_discretes", NUMS_GLOBAL_DISCRETEDS)
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("dtype", DTYPES)
def test_random_rademacher_state(n_discretes, n_global_discretes, seed, dtype):
"""Creates a random Rademacher-distributed state, checks that it really is all
+/-1 and of the right size.
"""
state = probabilityfunctions.random_rademacher_state(
n_discretes,
n_global_discretes,
seed,
dtype,
)
state_np = np.array(state)
assert np.prod(state_np.shape) == 2**n_discretes
np.testing.assert_allclose(np.abs(state_np), 1.0)
norm_pmapped = pops.pmap(
probabilityfunctions.norm,
out_axes=None,
static_broadcasted_argnums=(1,),
)
inner_pmapped = pops.pmap(
probabilityfunctions.inner,
out_axes=None,
static_broadcasted_argnums=(2,),
)
@pytest.mark.parametrize("n_discretes", SYSTEM_SIZES)
@pytest.mark.parametrize("n_global_discretes", NUMS_GLOBAL_DISCRETEDS)
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("precision", PRECISIONS)
def test_norm_and_inner(n_discretes, n_global_discretes, seed, dtype, precision):
"""Creates a random Rademacher state, checks that its norm is correct, that
`inner` with itself is the norm squared, and that `normalize` normalizes it.
"""
state = probabilityfunctions.random_rademacher_state(
n_discretes,
n_global_discretes,
seed,
dtype,
)
state_norm = norm_pmapped(state, precision)
np.testing.assert_allclose(state_norm, 2**(n_discretes / 2))
np.testing.assert_allclose(
state_norm,
np.sqrt(inner_pmapped(state, state, precision)),
)
state_normalized = probabilityfunctions.normalize(state, precision)
np.testing.assert_allclose(norm_pmapped(state_normalized, precision), 1.0)
@pytest.mark.parametrize("n_discretes", SYSTEM_SIZES)
@pytest.mark.parametrize("n_global_discretes", NUMS_GLOBAL_DISCRETEDS)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("precision", PRECISIONS)
def test_cycle_left_on_ones(n_discretes, n_global_discretes, dtype, precision):
"""Creates an all-ones state and checks that it's invariant under translation.
"""
state = probabilityfunctions.all_ones_state(n_discretes, n_global_discretes, dtype)
state_translated = probabilityfunctions.cycle_left(state)
np.testing.assert_allclose(state, state_translated)
transinvar = probabilityfunctions.translation_invariance(state, precision)
np.testing.assert_allclose(transinvar, 0.0)
@pytest.mark.parametrize("n_discretes", SYSTEM_SIZES)
@pytest.mark.parametrize("n_global_discretes", NUMS_GLOBAL_DISCRETEDS)
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("precision", PRECISIONS)
def test_cycle_left_full_circle(
n_discretes,
n_global_discretes,
seed,
dtype,
precision,
):
"""Creates a linear combination of all `n_discretes` translations of the the same
random state, and checks that it's translation invariant.
"""
state = probabilityfunctions.random_rademacher_state(
n_discretes,
n_global_discretes,
seed,
dtype,
)
state_symmetrised = state
for _ in range(n_discretes - 1):
state = probabilityfunctions.cycle_left(state)
state_symmetrised = state_symmetrised + state
transinvar = probabilityfunctions.translation_invariance(
state_symmetrised,
precision,
)
np.testing.assert_allclose(transinvar, 0.0)
@pytest.mark.parametrize("n_global_discretes", NUMS_GLOBAL_DISCRETEDS)
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("precision", PRECISIONS)
def test_measure_energy(n_global_discretes, seed, dtype, precision):
"""Checks the energy of the MaxCut ObjectiveFn on an all-ones state."""
np.random.seed(seed)
n_discretes = operators.min_n_discretes_objective_fn(n_global_discretes, 2)
neigh_coupling = np.random.rand(n_discretes)
onsite_coupling = np.random.rand(n_discretes)
local_terms, shift = test_utils.global_maxcut_objective_fn(
neigh_coupling,
onsite_coupling,
n_discretes,
)
assert shift == 0
obj_fn = operators.gather_local_terms(local_terms, n_global_discretes)
state = probabilityfunctions.normalize(
probabilityfunctions.all_ones_state(n_discretes, n_global_discretes, dtype),
precision,
)
energy = probabilityfunctions.measure_energy(state, obj_fn, precision)
energy += shift * n_discretes
energy_expected = np.sum(neigh_coupling)
rtol = 100 * la_testutils.eps(precision, dtype)
np.testing.assert_allclose(energy, energy_expected, rtol=rtol)
@pytest.mark.parametrize("n_global_discretes", NUMS_GLOBAL_DISCRETEDS)
@pytest.mark.parametrize("term_width", (2, 3))
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("precision", PRECISIONS)
def test_find_root_solution(n_global_discretes, term_width, seed, dtype, precision):
"""Finds the ground state of the ObjectiveFn sum_i -Z_i, checks that the
energy and the state are correct.
"""
n_discretes = operators.min_n_discretes_objective_fn(n_global_discretes, term_width)
neigh_coupling = 0.0
onsite_coupling = -1.0
local_terms, shift = test_utils.global_maxcut_objective_fn(
neigh_coupling,
onsite_coupling,
n_discretes,
boundary="open",
apply_shift=True,
)
# Pad the terms with identity to make them of the desired width.
while local_terms[0].shape[0] < 2**term_width:
eye = np.eye(2, dtype=local_terms[0].dtype)
local_terms = [np.kron(term, eye) for term in local_terms]
obj_fn = operators.gather_local_terms(local_terms, n_global_discretes)
expected_energy = onsite_coupling * n_discretes
expected_state = np.zeros((2**n_discretes,), dtype=dtype)
expected_state[0] = 1
# Since in the test we can only afford a few iterations, we start from a state
# that is already close to the right one.
initial_state = probabilityfunctions.random_rademacher_state(
n_discretes,
n_global_discretes,
seed,
dtype,
)
initial_state = pops.pmap(lambda x, y: x + 1e-2 * y)(
expected_state.reshape(initial_state.shape),
initial_state,
)
# REDACTED The reason for only doing dynamic_dtype=False is that device
# spoofing doesn't support bf16 AllReduce. If that ever gets fixed, start
# testing both True and False.
energy, state = probabilityfunctions.find_root_solution(
obj_fn,
n_discretes,
precision,
dynamic_dtype=False,
n_krylov=10,
initial_state=initial_state,
)
energy += shift * n_discretes
energy_measured = probabilityfunctions.measure_energy(state, obj_fn, precision)
energy_measured += shift * n_discretes
rtol = 20 * la_testutils.eps(precision, dtype)
np.testing.assert_allclose(energy, energy_measured, rtol=rtol)
state = probabilityfunctions.normalize(state, precision)
np.testing.assert_allclose(energy, expected_energy, rtol=rtol)
fidelity = abs(np.vdot(expected_state, np.array(state)))
np.testing.assert_allclose(fidelity, 1.0, rtol=rtol)
@pytest.mark.parametrize("n_global_discretes", NUMS_GLOBAL_DISCRETEDS)
@pytest.mark.parametrize("term_width", (2, 3))
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("precision", PRECISIONS)
def test_objective_fn_unfold(
n_global_discretes,
term_width,
seed,
dtype,
precision,
):
"""Unfolds a random state by a random ObjectiveFn for a short time. Checks
that the norm remains 1 and that the state hasn't changed much.
"""
# REDACTED Would be useful to have some time unfolding where we can
# unfold by very little time, and know the exact solution to compare to. Here
# we just check that the code runs and the state isn't changed in any crazy
# way, but not whether the unfolding is actually correct.
n_discretes = operators.min_n_discretes_objective_fn(n_global_discretes, term_width)
np.random.seed(seed)
time_step = 1e-4
n_steps = 2
local_terms = []
dim = 2**term_width
for _ in range(n_discretes):
term = np.random.randn(dim, dim) + 1j * np.random.randn(dim, dim)
term = term + term.T.conj()
local_terms.append(term)
obj_fn = operators.gather_local_terms(local_terms, n_global_discretes)
obj_fn = [term.to_jax(dtype=dtype) for term in obj_fn]
state = probabilityfunctions.normalize(
probabilityfunctions.random_rademacher_state(
n_discretes,
n_global_discretes,
seed,
_complex_dtype(dtype),
),
precision,
)
state_unfoldd = probabilityfunctions.objective_fn_unfold(
state,
obj_fn,
n_steps * time_step,
time_step,
precision,
)
rtol = 10 * la_testutils.eps(precision, dtype)
norm_unfoldd = norm_pmapped(state_unfoldd, precision)
np.testing.assert_allclose(norm_unfoldd, 1.0, rtol=rtol)
fidelity = inner_pmapped(state, state_unfoldd, precision)
rtol = 10 * time_step * n_steps
np.testing.assert_allclose(fidelity, 1.0, rtol=rtol)
@pytest.mark.parametrize("n_global_discretes", NUMS_GLOBAL_DISCRETEDS)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("precision", PRECISIONS)
def test_acyclic_graph_unfold(n_global_discretes, dtype, precision):
"""Unfolds the all-ones state, that is the state |+>^n_discretes, by a acyclic_graph
that applies the Hadamard building_block to each site once. Checks that the outcome is
the state zerovector^n_discretes.
"""
n_discretes = operators.min_n_discretes_acyclic_graph(n_global_discretes)
# Brickwork acyclic_graphs only work with even system size
n_discretes += (n_discretes % 2)
hadamard = np.array([[1.0, 1.0], [1.0, -1.0]], dtype=np.float64) / np.sqrt(2)
eye = np.eye(2)
local_building_blocks = [np.kron(hadamard, eye)] * n_discretes
acyclic_graph = operators.gather_local_building_blocks(local_building_blocks, n_global_discretes)
acyclic_graph = [building_block.to_jax(dtype) for building_block in acyclic_graph]
state = probabilityfunctions.normalize(
probabilityfunctions.all_ones_state(
n_discretes,
n_global_discretes,
_complex_dtype(dtype),
),
precision,
)
state_unfoldd = probabilityfunctions.acyclic_graph_unfold(
state,
acyclic_graph,
1,
precision,
)
rtol = 10 * la_testutils.eps(precision, dtype)
expected_state = np.zeros((2**n_discretes,), dtype=dtype)
expected_state[0] = 1
fidelity = abs(np.vdot(expected_state, np.array(state_unfoldd)))
np.testing.assert_allclose(fidelity, 1.0, rtol=rtol)
@pytest.mark.parametrize("n_global_discretes", NUMS_GLOBAL_DISCRETEDS)
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("precision", PRECISIONS)
def test_acyclic_graph_unfold_translation(n_global_discretes, seed, dtype, precision):
"""Tests that acyclic_graph unfold respects 2-translation invariance when the
initial state is translation invariant and all of the building_blocks in the acyclic_graph
are identical.
"""
n_discretes = operators.min_n_discretes_acyclic_graph(n_global_discretes)
# Brickwork acyclic_graphs only work with even system size
n_discretes += (n_discretes % 2)
# Initial state is translation invariant
state = probabilityfunctions.normalize(
probabilityfunctions.all_ones_state(n_discretes, n_global_discretes, dtype),
precision,
)
# Random real two-discrete building_block
np.random.seed(seed)
U = scipy.stats.special_ortho_group.rvs(4)
# The acyclic_graph is 2-translation invariant
local_building_blocks = [U] * n_discretes
building_blocks = operators.gather_local_building_blocks(local_building_blocks, n_global_discretes)
building_blocks = [building_block.to_jax(dtype) for building_block in building_blocks]
# Apply the acyclic_graph
state = probabilityfunctions.acyclic_graph_unfold(state, building_blocks, 1, precision)
# Get the 2-translated state
trans_state = probabilityfunctions.cycle_left(probabilityfunctions.cycle_left(state))
# Overlap should be 1
ovlp = pops.pmap(
probabilityfunctions.inner,
out_axes=None,
static_broadcasted_argnums=(2,),
)(state, trans_state, precision)
# This high tolerance works around an observed degradation of accuracy when
# spoofing multiple devices on CPU
rtol = 1e-2
np.testing.assert_allclose(ovlp, 1., rtol=rtol)
@pytest.mark.parametrize("n_global_discretes", NUMS_GLOBAL_DISCRETEDS)
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("precision", PRECISIONS)
def test_acyclic_graph_unfold_basis_order(n_global_discretes, seed, dtype, precision):
"""Tests that the basis order is what we expect it to be after applying a
acyclic_graph (global discretes come first, before local discretes).
"""
n_discretes = operators.min_n_discretes_acyclic_graph(n_global_discretes)
# Brickwork acyclic_graphs only work with even system size
n_discretes += (n_discretes % 2)
# Product X = +1 state initially
state = probabilityfunctions.normalize(
probabilityfunctions.all_ones_state(n_discretes, n_global_discretes, dtype),
precision,
)
# Generate random real one-discrete building_blocks
np.random.seed(seed)
building_blocks_1q = [scipy.stats.special_ortho_group.rvs(2) for i in range(n_discretes)]
# Construct the brickwork acyclic_graph
eye = np.eye(2)
building_blocks_2q = [np.kron(u, eye) for u in building_blocks_1q]
building_blocks = operators.gather_local_building_blocks(building_blocks_2q, n_global_discretes)
building_blocks = [building_block.to_jax(dtype) for building_block in building_blocks]
# Apply the acyclic_graph
state = probabilityfunctions.acyclic_graph_unfold(state, building_blocks, 1, precision)
# Covert to numpy array for later comparison
state = np.array(state).reshape(2**n_discretes)
# Compute the state in a different way for comparison
state_1q = np.ones(2) / np.sqrt(2)
states_1q = [np.einsum("i,ij->j", state_1q, u) for u in building_blocks_1q]
kron_state = operators._kron_fold(states_1q)
# Compare the states obtained in different ways
ovlp = np.vdot(state, kron_state)
rtol = 10 * la_testutils.eps(precision, dtype)
np.testing.assert_allclose(ovlp, 1, rtol=rtol)
@pytest.mark.parametrize("n_discretes", SYSTEM_SIZES)
@pytest.mark.parametrize("less_than_half_traced", (0, 1, 3))
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("dtype", (jnp.float32, jnp.complex64))
def test_reduced_density_matrix(n_discretes, less_than_half_traced, seed, dtype):
"""Tests that reduced_density_matrix agrees with the Numpy result."""
p_sz = 256
precision = jax.lax.Precision.HIGHEST
np.random.seed(seed)
n_traced = n_discretes // 2 - less_than_half_traced
n_untraced = n_discretes - n_traced
# The conversion to DistlaCore matrix doesn't work if all devices aren't
# utilised.
n_global_discretes = int(np.round(np.log2(config.NPROCS)))
n_local_discretes = n_discretes - n_global_discretes
state_shape = 2**n_global_discretes, 2**n_local_discretes
state_np = np.random.randn(*state_shape)
if dtype in (jnp.complex64, jnp.complex128):
state_np = state_np + 1j * np.random.randn(*state_shape)
state_np /= np.linalg.norm(state_np)
matrix = probabilityfunctions.reduced_density_matrix(
jnp.array(state_np, dtype=dtype),
n_traced,
p_sz,
precision,
)
# Choose the discretes to trace over to match the choice in
# reduced_density_matrix.
traced_global_discretes = int(np.ceil(n_global_discretes / 2))
traced_local_discretes = n_traced - traced_global_discretes
untraced_discretes = n_discretes - traced_local_discretes - traced_global_discretes
state_np = state_np.reshape((2**traced_global_discretes, 2**untraced_discretes,
2**traced_local_discretes))
matrix_np = np.tensordot(state_np, state_np.conj(), axes=((0, 2), (0, 2)))
matrix = pops.undistribute(matrix, collect_to_host=True)
assert matrix.shape[0] == matrix.shape[1] == 2**n_untraced
tol = 10 * la_testutils.eps(precision, dtype)
np.testing.assert_allclose(matrix, matrix.T.conj(), rtol=tol, atol=tol)
np.testing.assert_allclose(matrix, matrix_np, rtol=tol, atol=tol)
@pytest.mark.parametrize("dim", (16, 128))
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("alpha", (2,))
@pytest.mark.parametrize("dtype", (jnp.float32, jnp.complex64))
def test_renyi_entropy(dim, seed, alpha, dtype):
"""Tests that renyi_entropy agrees with the Numpy result."""
precision = jax.lax.Precision.HIGHEST
np.random.seed(seed)
if alpha != 2:
raise NotImplementedError("alpha != 2 not implemented.")
matrix_np = np.random.randn(dim, dim)
if dtype in (jnp.complex64, jnp.complex128):
matrix_np = matrix_np + 1j * np.random.randn(dim, dim)
matrix_np = np.dot(matrix_np, matrix_np.conjubuilding_block().transpose())
matrix_np /= np.trace(matrix_np)
expected = np.log2(np.trace(np.dot(matrix_np, matrix_np))) / (1 - alpha)
matrix = pops.distribute(matrix_np)
result = probabilityfunctions.renyi_entropy(matrix, alpha, precision)
tol = 10 * la_testutils.eps(precision, dtype)
np.testing.assert_allclose(result, expected, rtol=tol, atol=tol)
| [
2,
15069,
33448,
383,
4307,
5031,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
... | 2.582557 | 7,625 |
# coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools to prepare learning curves for plotting.
Some utilities include:
- Sanitizing the number of environment steps to ensure that they are always
increasing.
- Applying linear smoothing, similar to that in Tensorboard.
- Producing an averaged curve with error bars from multiple replicates of an
experiment. These should be stored as: `path/to/experiment/SeedX` where X is
a number. Each folder should have tensorboard summaries that can be read.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import numpy as np
import data_io
from scipy import interpolate
DEFAULT_SMOOTHING_WEIGHT = 0.9
def sanitize_env_steps(env_steps_sequence):
"""This function ensures that the env_steps_sequence is always increasing.
Sometimes env_steps can reset to zero (when badly checkpointed). This function
will ensure that the computations are correct by just adding to the last
maximum count found. For example, consider the badly checkpointed sequence:
```
bad_sequence = [5, 10, 15, 5, 10, 15]
```
At index 3 the job preempted and restarted. The answer should be:
```
expected_answer = [5, 10, 15, 20, 25, 35]
```
This function will return a result such that:
```
assert all(a == b for a,b in zip(sanitize_env_steps(bad_sequence),
expected_answer))
```
Args:
env_steps_sequence: A list of floats representing environment steps taken.
Returns:
A list of floats representing the environment steps taken sanitized for
issues in checkpointing.
"""
logging.log_first_n(logging.INFO,
'Sanitizing data. This will show only once.', 1)
sanitized_env_steps = []
last_max = 0
for i in range(len(env_steps_sequence) - 1):
xt, xtp = env_steps_sequence[i], env_steps_sequence[i + 1]
sanitized_env_steps.append(xt + last_max)
if xtp < xt:
# Reset occurred between t and t+1.
# Set the last_max to be the current step so that we continue by just
# adding to the current step.
last_max = xt
sanitized_env_steps.append(xtp + last_max)
return np.array(sanitized_env_steps)
def apply_linear_smoothing(data, smoothing_weight=DEFAULT_SMOOTHING_WEIGHT):
"""Smooth curves using a exponential linear weight.
This smoothing algorithm is the same as the one used in tensorboard.
Args:
data: The iterable containing the data to smooth.
smoothing_weight: A float representing the weight to place on the moving
average.
Returns:
A list containing the smoothed data.
"""
assert len(data), 'No curve to smooth.' # pylint: disable=g-explicit-length-test,line-too-long
if smoothing_weight <= 0:
return data
last = data[0]
smooth_data = []
for x in data:
if not np.isfinite(last):
smooth_data.append(x)
else:
smooth_data.append(last * smoothing_weight + (1 - smoothing_weight) * x)
last = smooth_data[-1]
return smooth_data
def extract_average_curve(file_path,
n_points=500,
y_tag='mean_deterministic_trajectory_reward',
x_tag=None,
min_trajectory_len=0,
smoothing_weight=DEFAULT_SMOOTHING_WEIGHT,
skip_seed_path_glob=False):
"""Extract a curve averaged over all experimental replicates.
Args:
file_path: The path to where the experimental replicates are saved.
n_points: The number of points to plot.
y_tag: A string representing the data that will be plotted on the y-axis.
x_tag: A string representing the data that will be plotted on the x-axis.
This should be None or `env_steps_at_deterministic_eval` to be something
meaningful.
min_trajectory_len: The minimum number of elements in the optimization
trajectory or the minimum number of environment steps after which to
consider curves to plot. If this is 0 it will truncate all replicates to
the shortest replicate.
smoothing_weight: A float representing how much smoothing to do to the data.
skip_seed_path_glob: A boolean indicating if glob should be skipped. If it
is skipped then `file_path` should directly lead to the directory with all
the data.
Returns:
A tuple containing three numpy arrays representing:
- x: The values to plot on the x-axis.
- y_mean: The values of plot on the y-axis.
- y_std: The standard deviation/spread on the y-axis.
"""
if skip_seed_path_glob:
seed_paths = [file_path]
else:
seed_paths = data_io.get_replicates(file_path)
# Store the minimum starting and maximum ending values to do truncation.
minimum_x = []
maximum_x = []
interpolators = []
for seed_path in seed_paths:
ea = data_io.load_events(seed_path)
x, y = data_io.extract_np_from_scalar_events(ea.Scalars(y_tag))
if x_tag is not None:
_, x = data_io.extract_np_from_scalar_events(ea.Scalars(x_tag))
min_x = np.min(x)
max_x = np.max(x)
if min_trajectory_len > max_x:
logging.info('Skipping: %f', min_x)
continue
interpolator = interpolate.interp1d(x, y)
interpolators.append(interpolator)
minimum_x.append(min_x)
maximum_x.append(max_x)
logging.info('%s, \n minimum_x: %s, maximum_x: %s',
file_path, minimum_x, maximum_x)
start_point = np.max(minimum_x)
end_point = np.min(maximum_x)
x = np.linspace(start_point, end_point, n_points)
ys = []
for interpolator in interpolators:
y = interpolator(x)
if smoothing_weight is not None:
y = apply_linear_smoothing(y, smoothing_weight=smoothing_weight)
ys.append(y)
y_stacked = np.stack(ys)
y_mean = np.mean(y_stacked, 0)
y_std = np.std(y_stacked, 0)
return x, y_mean, y_std
if __name__ == '__main__':
pass
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
2864,
383,
3012,
4992,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 2.765332 | 2,348 |
#!/usr/bin/env python3
import re
from bunkai.base.annotation import Annotations, SpanAnnotation
from bunkai.base.annotator import Annotator
RE_LBS = re.compile(r'[\n\s]*\n[\n\s]*')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
302,
198,
198,
6738,
36355,
1872,
13,
8692,
13,
1236,
14221,
1330,
47939,
11,
49101,
2025,
38983,
198,
6738,
36355,
1872,
13,
8692,
13,
34574,
1352,
1330,
1052,
1662,
1352,
... | 2.520548 | 73 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
# This script was developed with financial support from the Foundation for
# Science and Technology of Portugal, under the grant SFRH/BD/66452/2009.
bl_info = {
"name": "Carnegie Mellon University Mocap Library Browser",
"author": "Daniel Monteiro Basso <daniel@basso.inf.br>",
"version": (2015, 3, 20),
"blender": (2, 66, 6),
"location": "View3D > Tools",
"description": "Assistant for using CMU Motion Capture data",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/3D_interaction/CMU_Mocap_Library_Browser",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"category": "Animation"}
if 'bpy' in locals():
import importlib
library = importlib.reload(library)
download = importlib.reload(download)
makehuman = importlib.reload(makehuman)
data = importlib.reload(data)
else:
from . import library
from . import download
from . import makehuman
from . import data
import os
import bpy
if __name__ == "__main__":
register()
| [
2,
46424,
347,
43312,
38644,
38559,
24290,
9878,
11290,
46424,
198,
2,
198,
2,
220,
770,
1430,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
14,
273,
198,
2,
220,
13096,
340,
739,
262,
2846,
286,
262,
22961,
3611,
5094,
1378... | 3.076547 | 614 |
import maya.cmds as mc
import glTools.utils.blendShape
import glTools.utils.stringUtils
def createFromSelection(origin='local',deformOrder=None,prefix=None):
'''
Create basic blendShape from selection.
@param origin: Create a local or world space belndShape deformer. Accepted values - "local" or "world".
@type origin: str
@param deformOrder: Deformer order. Accepted values - "after", "before", "parallel", "split" or "foc".
@type deformOrder: str or None
@param prefix: Naming prefix
@type prefix: str or None
'''
# Get Selection
sel = mc.ls(sl=1)
if not sel:
print('Empty or invalid selections!')
return None
# Get Base/Target Geometry
baseGeo = sel[-1]
targetGeo = sel[:-1]
# Get Prefix
if not prefix: prefix = baseGeo # glTools.utils.stringUtils.stripSuffix(baseGeo)
# Create BlendShape
blendShape = glTools.utils.blendShape.create(baseGeo,targetGeo,origin,deformOrder,prefix)
# Set Default Weight
if len(targetGeo) == 1: mc.setAttr(blendShape+'.w[0]',1.0)
# Return Result
return blendShape
def endOfChainBlendShape(geo):
'''
Create an End Of Chain override blendShape deformer for the specified geometry.
The override blendShape will be used to apply custom shot animation (cloth, charFX or shotSculpting) that will override the standard rig deformations.
@param geo: The geometry to add an override blendShape deformer to.
@type geo: str
'''
# Checks
if not mc.objExists(geo):
raise Exception('Geometry object "'+geo+'" does not exist!!')
if not mc.listRelatives(geo,s=True,ni=True):
raise Exception('Object "'+geo+'" has no valid shape children!')
# Create Override BlendShapes
blendShape = geo.split(':')[-1]+'_override_blendShape'
if mc.objExists(blendShape):
print('Override blendShape "'+blendShape+'" already exists! Skipping...')
else:
blendShape = mc.blendShape(geo,n=blendShape)[0]
# Return Result
return blendShape
def addOverrideTarget(geo,targetGeo,targetWeight=0):
'''
Add override blendShape target to the specified geometry.
@param geo: The geometry to add an override blendShape target to.
@type geo: str
@param targetGeo: The override target geometry to add to the blendShape deformer.
@type targetGeo: str
@param targetWeight: The override target blend weight to apply.
@type targetWeight: float
'''
# Checks
if not mc.objExists(geo):
raise Exception('Base geometry "'+geo+'" does not exist!!')
if not mc.objExists(targetGeo):
raise Exception('Target geometry "'+targetGeo+'" does not exist!!')
# Get Override BlendShape
blendShape = geo.split(':')[-1]+'_override_blendShape'
if not mc.objExists(blendShape): blendShape = geo+'_override_blendShape'
if not mc.objExists(blendShape):
raise Exception('Override blendShape "'+blendShape+'" does not exist!!')
# Add Target
targetAttr = glTools.utils.blendShape.addTarget( blendShape=blendShape,
target=targetGeo,
base=geo,
targetWeight=targetWeight,
topologyCheck=False )
# Return Result
return targetAttr
def duplicateAndBlend(obj,parent='',search='',replace='',worldSpace=False):
'''
Duplicate a specified deformable object, then blendShape the duplicate to the original.
@param obj: Object to duplicate
@type obj: str
@param parent: Parent transform to place the duplicate object under
@type parent: str
@param search: Names search string used to generate the duplicate object name
@type search: str
@param replace: Names replace string used to generate the duplicate object name
@type replace: str
@param worldSpace: Create the blendShape in local or world space
@type worldSpace: bool
'''
# Check object exists
if not mc.objExists(obj):
raise Exception('Object "'+obj+'" does not exist!')
# Duplicate object
dup = mc.duplicate(obj,rr=True,n=obj.replace(search,replace))[0]
# Create blendShape from original to duplicate
origin = 'local'
if worldSpace: origin = 'world'
blendShape = mc.blendShape(obj,dup,o=origin)[0]
# Set blendShape weight
blendAlias = mc.listAttr(blendShape+'.w',m=True)[0]
mc.setAttr(blendShape+'.'+blendAlias,1.0)
# Parent
if parent and mc.objExists(parent):
mc.parent(dup,parent)
else:
mc.parent(dup,w=True)
# Return result
return blendShape
def regenerateTarget(blendShape,target,base='',connect=False):
'''
Regenerate target geometry for the specified blendShape target.
@param blendShape: BlendShape to regenerate target geometry for
@type blendShape: str
@param target: BlendShape target to regenerate target geometry for
@type target: str
@param base: BlendShape base geometry to regenerate target geometry from
@type base: str
@param connect: Reconnect regenerated target geometry to target input
@type connect: bool
'''
# ==========
# - Checks -
# ==========
if not glTools.utils.blendShape.isBlendShape(blendShape):
raise Exception('Object "'+blendShape+'" is not a valid blendShape!')
if not glTools.utils.blendShape.hasTarget(blendShape,target):
raise Exception('BlendShape "'+blendShape+'" has no target "'+target+'"!')
if base and not glTools.utils.blendShape.hasBase(blendShape,base):
raise Exception('BlendShape "'+blendShape+'" has no base geometry "'+base+'"!')
# Check Existing Live Target Geometry
if glTools.utils.blendShape.hasTargetGeo(blendShape,target,base=base):
targetGeo = glTools.utils.blendShape.getTargetGeo(blendShape,target,baseGeo=base)
print('Target "" for blendShape "" already has live target geometry! Returning existing target geometry...')
return targetGeo
# Get Base Geometry - Default to base index [0]
if not base: base = glTools.utils.blendShape.getBaseGeo(blendShape)[0]
baseIndex = glTools.utils.blendShape.getBaseIndex(blendShape,base)
# Get Target Index
targetIndex = glTools.utils.blendShape.getTargetIndex(blendShape,target)
# ==============================
# - Regenerate Target Geometry -
# ==============================
# Initialize Target Geometry
targetGeo = mc.duplicate(base,n=target)[0]
# Delete Unused Shapes
for targetShape in mc.listRelatives(targetGeo,s=True,pa=True):
if mc.getAttr(targetShape+'.intermediateObject'):
mc.delete(targetShape)
# Get Target Deltas and Components
wtIndex = 6000
targetDelta = mc.getAttr(blendShape+'.inputTarget['+str(baseIndex)+'].inputTargetGroup['+str(targetIndex)+'].inputTargetItem['+str(wtIndex)+'].inputPointsTarget')
targetComp = mc.getAttr(blendShape+'.inputTarget['+str(baseIndex)+'].inputTargetGroup['+str(targetIndex)+'].inputTargetItem['+str(wtIndex)+'].inputComponentsTarget')
for i in xrange(len(targetComp)):
# Get Component Delta
d = targetDelta[i]
# Apply Component Delta
mc.move(d[0],d[1],d[2],targetGeo+'.'+targetComp[i],r=True,os=True)
# Freeze Vertex Transforms
mc.polyMoveVertex(targetGeo)
mc.delete(targetGeo,ch=True)
# ===========================
# - Connect Target Geometry -
# ===========================
if connect: mc.connectAttr(targetGeo+'.outMesh',blendShape+'.inputTarget['+str(baseIndex)+'].inputTargetGroup['+str(targetIndex)+'].inputTargetItem['+str(wtIndex)+'].inputGeomTarget',f=True)
# =================
# - Return Result -
# =================
return targetGeo
def regenerateTargetSplits(target,base,targetSplits=[],replace=False):
'''
Regenerate target splits from a specified master target and base geometry.
Each split is regenerated as a blend from the master shape, weighted (per component) based on the existing split offset.
@param target: Target shape to regenerate target splits from
@type target: str
@param base: Base geometry to measure against to generate split maps.
@type base: str
@param targetSplits: List of target splits to regenerate.
@type targetSplits: list
@param replace: Replace existing splits. Otherwise, create new split geometry.
@type replace: bool
'''
# ==========
# - Checks -
# ==========
pass
def updateTargets(oldBase,newBase,targetList):
'''
Rebuild blendShape targets given an old and a new base geometry.
@param oldBase: Old base geometry
@type oldBase: str
@param newBase: new base geometry
@type newBase: str
@param targetList: List of target shapes to rebuild
@type targetList: list
'''
# ==========
# - Checks -
# ==========
if not mc.objExists(oldBase):
raise Exception('Old base geometry "'+oldBase+'" does not exist!')
if not mc.objExists(newBase):
raise Exception('New base geometry "'+newBase+'" does not exist!')
if not targetList: raise Exception('Empty target list!')
for target in targetList:
if not mc.objExists(target):
raise Exception('Target geometry "'+target+'" does not exist!')
# ==================
# - Update Targets -
# ==================
targetList.insert(0,newBase)
updateBlendShape = mc.blendShape(targetList,oldBase,n='updateTargets_blendShape')[0]
updateBlendAlias = mc.listAttr(updateBlendShape+'.w',m=True)
# Generate New Targets
for i in range(len(updateBlendAlias)):
if not i:
# Set New Base Target Weight (1.0)
mc.setAttr(updateBlendShape+'.'+updateBlendAlias[i],1)
else:
# Set Target Weight
mc.setAttr(updateBlendShape+'.'+updateBlendAlias[i],1)
# Extract New Target from Blended Base
newTarget = mc.duplicate(oldBase,n=updateBlendAlias[0]+'NEW')[0]
# Delete Unused Shapes
for shape in mc.listRelatives(newTarget,s=True,pa=True):
if mc.getAttr(shape+'.intermediateObject'):
mc.delete(shape)
# Update Target
targetBlendShape = mc.blendShape(newTarget,targetList[i])[0]
targetAlias = mc.listAttr(targetBlendShape+'.w',m=True)[0]
mc.setAttr(targetBlendShape+'.'+targetAlias,1)
mc.delete(targetList[i],ch=True)
mc.delete(newTarget)
# Reset Target Weight
mc.setAttr(updateBlendShape+'.'+updateBlendAlias[i],0)
# ===========
# - Cleanup -
# ===========
# Reset New Base Target Weight (0.0)
mc.setAttr(updateBlendShape+'.'+updateBlendAlias[0],0)
# Delete History (Old Base)
#mc.delete(oldBase,ch=True)
mc.delete(updateBlendShape)
# =================
# - Return Result -
# =================
return targetList
| [
11748,
743,
64,
13,
28758,
82,
355,
36650,
198,
198,
11748,
1278,
33637,
13,
26791,
13,
2436,
437,
33383,
198,
11748,
1278,
33637,
13,
26791,
13,
8841,
18274,
4487,
198,
198,
4299,
2251,
4863,
4653,
1564,
7,
47103,
11639,
12001,
3256,
... | 2.889048 | 3,488 |
default_app_config = 'dynamic_formsets.apps.DynamicFormsetsConfig' # NOQA
| [
12286,
62,
1324,
62,
11250,
796,
705,
67,
28995,
62,
23914,
1039,
13,
18211,
13,
44090,
8479,
28709,
16934,
6,
220,
1303,
8005,
48,
32,
198
] | 2.884615 | 26 |
import datetime
import logging
import logging.config
import logging.handlers
import os
import re
import sys
from functools import wraps
from typing import Any, Callable, Dict, FrozenSet, List, Optional, Pattern, Tuple
import gevent
import structlog
LOG_BLACKLIST: Dict[Pattern, str] = {
re.compile(r"\b(access_?token=)([a-z0-9_-]+)", re.I): r"\1<redacted>",
re.compile(
r"(@0x[0-9a-fA-F]{40}:(?:[\w\d._-]+(?::[0-9]+)?))/([0-9a-zA-Z-]+)"
): r"\1/<redacted>",
}
DEFAULT_LOG_LEVEL = "INFO"
MAX_LOG_FILE_SIZE = 20 * 1024 * 1024
LOG_BACKUP_COUNT = 3
_FIRST_PARTY_PACKAGES = frozenset(["raiden", "raiden_contracts"])
def _chain(first_func: Callable, *funcs: Callable) -> Callable:
"""Chains a give number of functions.
First function receives all args/kwargs. Its result is passed on as an argument
to the second one and so on and so forth until all function arguments are used.
The last result is then returned.
"""
@wraps(first_func)
return wrapper
class LogFilter:
""" Utility for filtering log records on module level rules """
def __init__(self, config: Dict[str, str], default_level: str):
""" Initializes a new `LogFilter`
Args:
config: Dictionary mapping module names to logging level
default_level: The default logging level
"""
self._should_log: Dict[Tuple[str, str], bool] = {}
# the empty module is not matched, so set it here
self._default_level = config.get("", default_level)
self._log_rules = [
(logger.split(".") if logger else list(), level) for logger, level in config.items()
]
def should_log(self, logger_name: str, level: str) -> bool:
""" Returns if a message for the logger should be logged. """
if (logger_name, level) not in self._should_log:
log_level_per_rule = self._get_log_level(logger_name)
log_level_per_rule_numeric = getattr(logging, log_level_per_rule.upper(), 10)
log_level_event_numeric = getattr(logging, level.upper(), 10)
should_log = log_level_event_numeric >= log_level_per_rule_numeric
self._should_log[(logger_name, level)] = should_log
return self._should_log[(logger_name, level)]
def add_greenlet_name(
_logger: str, _method_name: str, event_dict: Dict[str, Any]
) -> Dict[str, Any]:
"""Add greenlet_name to the event dict for greenlets that have a non-default name."""
current_greenlet = gevent.getcurrent()
greenlet_name = getattr(current_greenlet, "name", None)
if greenlet_name is not None and not greenlet_name.startswith("Greenlet-"):
event_dict["greenlet_name"] = greenlet_name
return event_dict
def redactor(blacklist: Dict[Pattern, str]) -> Callable[[str], str]:
"""Returns a function which transforms a str, replacing all matches for its replacement"""
return processor_wrapper
| [
11748,
4818,
8079,
198,
11748,
18931,
198,
11748,
18931,
13,
11250,
198,
11748,
18931,
13,
4993,
8116,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
25064,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
19720,
1330,
4377,
11,
4889... | 2.588183 | 1,134 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
from os import path
from pathlib import Path
from psychopy.alerts import alerttools
from psychopy.experiment.components import BaseVisualComponent, Param, getInitVals, _translate
from psychopy.experiment.py2js_transpiler import translatePythonToJavaScript
from psychopy.localization import _localized as __localized
_localized = __localized.copy()
# only use _localized values for label values, nothing functional:
_localized.update({'callback': _translate("Callback Function"),
'forceEndRoutine': _translate('Force end of Routine'),
'text': _translate('Button text'),
'font': _translate('Font'),
'letterHeight': _translate('Letter height'),
'bold': _translate('Bold'),
'italic': _translate('Italic'),
'padding': _translate('Padding'),
'anchor': _translate('Anchor'),
'fillColor': _translate('Fill Colour'),
'borderColor': _translate('Border Colour'),
'borderWidth': _translate('Border Width'),
'oncePerClick': _translate('Run once per click'),
'save': _translate("Record clicks"),
'timeRelativeTo': _translate("Time relative to")
})
class ButtonComponent(BaseVisualComponent):
"""
A component for presenting a clickable textbox with a programmable callback
"""
categories = ['Responses']
targets = ['PsychoPy', 'PsychoJS']
iconFile = Path(__file__).parent / 'button.png'
tooltip = _translate('Button: A clickable textbox')
beta = False
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
2142,
286,
262,
38955,
20519,
5888,
198,
2,
15069,
357,
34,
8,
6244,
12,
7908,
11232,
2631,
343,
344,
357,
... | 2.487484 | 759 |
from __future__ import annotations
import os
from PIL import Image
from arbies.manager import Manager, ConfigDict
from . import Tray
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
11748,
28686,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
610,
29846,
13,
37153,
1330,
9142,
11,
17056,
35,
713,
198,
6738,
764,
1330,
34575,
628
] | 4.060606 | 33 |
from .misc import *
from .tensorboard import run_tensorboard
| [
6738,
764,
44374,
1330,
1635,
198,
6738,
764,
83,
22854,
3526,
1330,
1057,
62,
83,
22854,
3526,
198
] | 3.388889 | 18 |
import base64
import datetime
import decimal
import inspect
import logging
import netaddr
import re
import six
import sys
import uuid
import weakref
from wsme import exc
log = logging.getLogger(__name__)
#: The 'str' (python 2) or 'bytes' (python 3) type.
#: Its use should be restricted to
#: pure ascii strings as the protocols will generally not be
#: be able to send non-unicode strings.
#: To transmit binary strings, use the :class:`binary` type
bytes = six.binary_type
#: Unicode string.
text = six.text_type
class BinaryType(UserType):
"""
A user type that use base64 strings to carry binary data.
"""
basetype = bytes
name = 'binary'
#: The binary almost-native type
binary = BinaryType()
class IntegerType(UserType):
"""
A simple integer type. Can validate a value range.
:param minimum: Possible minimum value
:param maximum: Possible maximum value
Example::
Price = IntegerType(minimum=1)
"""
basetype = int
name = "integer"
@staticmethod
class StringType(UserType):
"""
A simple string type. Can validate a length and a pattern.
:param min_length: Possible minimum length
:param max_length: Possible maximum length
:param pattern: Possible string pattern
Example::
Name = StringType(min_length=1, pattern='^[a-zA-Z ]*$')
"""
basetype = six.string_types
name = "string"
class IPv4AddressType(UserType):
"""
A simple IPv4 type.
"""
basetype = six.string_types
name = "ipv4address"
@staticmethod
class IPv6AddressType(UserType):
"""
A simple IPv6 type.
This type represents IPv6 addresses in the short format.
"""
basetype = six.string_types
name = "ipv6address"
@staticmethod
class UuidType(UserType):
"""
A simple UUID type.
This type allows not only UUID having dashes but also UUID not
having dashes. For example, '6a0a707c-45ef-4758-b533-e55adddba8ce'
and '6a0a707c45ef4758b533e55adddba8ce' are distinguished as valid.
"""
basetype = six.string_types
name = "uuid"
@staticmethod
class Enum(UserType):
"""
A simple enumeration type. Can be based on any non-complex type.
:param basetype: The actual data type
:param values: A set of possible values
If nullable, 'None' should be added the values set.
Example::
Gender = Enum(str, 'male', 'female')
Specie = Enum(str, 'cat', 'dog')
"""
Unset = UnsetType()
#: A special type that corresponds to the host framework request object.
#: It can only be used in the function parameters, and if so the request object
#: of the host framework will be passed to the function.
HostRequest = object()
pod_types = six.integer_types + (
bytes, text, float, bool)
dt_types = (datetime.date, datetime.time, datetime.datetime)
extra_types = (binary, decimal.Decimal)
native_types = pod_types + dt_types + extra_types
# The types for which we allow promotion to certain numbers.
_promotable_types = six.integer_types + (text, bytes)
class wsproperty(property):
"""
A specialised :class:`property` to define typed-property on complex types.
Example::
class MyComplexType(wsme.types.Base):
def get_aint(self):
return self._aint
def set_aint(self, value):
assert avalue < 10 # Dummy input validation
self._aint = value
aint = wsproperty(int, get_aint, set_aint, mandatory=True)
"""
class wsattr(object):
"""
Complex type attribute definition.
Example::
class MyComplexType(wsme.types.Base):
optionalvalue = int
mandatoryvalue = wsattr(int, mandatory=True)
named_value = wsattr(int, name='named.value')
After inspection, the non-wsattr attributes will be replaced, and
the above class will be equivalent to::
class MyComplexType(wsme.types.Base):
optionalvalue = wsattr(int)
mandatoryvalue = wsattr(int, mandatory=True)
"""
#: attribute data type. Can be either an actual type,
#: or a type name, in which case the actual type will be
#: determined when needed (generally just before scanning the api).
datatype = property(_get_datatype, _set_datatype)
def sort_attributes(class_, attributes):
"""Sort a class attributes list.
3 mechanisms are attempted :
#. Look for a _wsme_attr_order attribute on the class_. This allow
to define an arbitrary order of the attributes (useful for
generated types).
#. Access the object source code to find the declaration order.
#. Sort by alphabetically"""
if not len(attributes):
return
attrs = dict((a.key, a) for a in attributes)
if hasattr(class_, '_wsme_attr_order'):
names_order = class_._wsme_attr_order
else:
names = attrs.keys()
names_order = []
try:
lines = []
for cls in inspect.getmro(class_):
if cls is object:
continue
lines[len(lines):] = inspect.getsourcelines(cls)[0]
for line in lines:
line = line.strip().replace(" ", "")
if '=' in line:
aname = line[:line.index('=')]
if aname in names and aname not in names_order:
names_order.append(aname)
if len(names_order) < len(names):
names_order.extend((
name for name in names if name not in names_order))
assert len(names_order) == len(names)
except (TypeError, IOError):
names_order = list(names)
names_order.sort()
attributes[:] = [attrs[name] for name in names_order]
def inspect_class(class_):
"""Extract a list of (name, wsattr|wsproperty) for the given class_"""
attributes = []
for name, attr in inspect.getmembers(class_, iswsattr):
if name.startswith('_'):
continue
if inspect.isroutine(attr):
continue
if isinstance(attr, (wsattr, wsproperty)):
attrdef = attr
else:
if attr not in native_types and (
inspect.isclass(attr) or
isinstance(attr, (list, dict))):
register_type(attr)
attrdef = getattr(class_, '__wsattrclass__', wsattr)(attr)
attrdef.key = name
if attrdef.name is None:
attrdef.name = name
attrdef.complextype = weakref.ref(class_)
attributes.append(attrdef)
setattr(class_, name, attrdef)
sort_attributes(class_, attributes)
return attributes
def list_attributes(class_):
"""
Returns a list of a complex type attributes.
"""
if not iscomplex(class_):
raise TypeError("%s is not a registered type")
return class_._wsme_attributes
# Default type registry
registry = Registry()
class Base(six.with_metaclass(BaseMeta)):
"""Base type for complex types"""
class File(Base):
"""A complex type that represents a file.
In the particular case of protocol accepting form encoded data as
input, File can be loaded from a form file field.
"""
#: The file name
filename = wsattr(text)
#: Mime type of the content
contenttype = wsattr(text)
#: File content
content = wsproperty(binary, _get_content, _set_content)
@property
class DynamicBase(Base):
"""Base type for complex types for which all attributes are not
defined when the class is constructed.
This class is meant to be used as a base for types that have
properties added after the main class is created, such as by
loading plugins.
"""
@classmethod
def add_attributes(cls, **attrs):
"""Add more attributes
The arguments should be valid Python attribute names
associated with a type for the new attribute.
"""
for n, t in attrs.items():
setattr(cls, n, t)
cls.__registry__.reregister(cls)
| [
11748,
2779,
2414,
198,
11748,
4818,
8079,
198,
11748,
32465,
198,
11748,
10104,
198,
11748,
18931,
198,
11748,
2010,
29851,
198,
11748,
302,
198,
11748,
2237,
198,
11748,
25064,
198,
11748,
334,
27112,
198,
11748,
4939,
5420,
198,
198,
6... | 2.536806 | 3,206 |
__author__ = 'Lorenzo'
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
_KEY = '**********************'
| [
834,
9800,
834,
796,
705,
43,
29578,
10872,
6,
628,
198,
56,
12425,
10526,
36,
62,
17614,
62,
35009,
27389,
62,
20608,
796,
366,
11604,
1,
198,
56,
12425,
10526,
36,
62,
17614,
62,
43717,
796,
366,
85,
18,
1,
198,
62,
20373,
796,
... | 2.42 | 50 |
"""User Model."""
from config.database import Model
from orator.orm import has_many
class User(Model):
"""User Model."""
__fillable__ = ["name", "email", "password"]
__auth__ = "email"
@has_many("id", "user_id")
| [
37811,
12982,
9104,
526,
15931,
198,
198,
6738,
4566,
13,
48806,
1330,
9104,
198,
6738,
393,
1352,
13,
579,
1330,
468,
62,
21834,
628,
198,
4871,
11787,
7,
17633,
2599,
198,
220,
220,
220,
37227,
12982,
9104,
526,
15931,
628,
220,
220... | 2.741176 | 85 |
import paramiko
import fooster.console
if __name__ == '__main__':
main()
| [
11748,
5772,
12125,
198,
198,
11748,
11511,
6197,
13,
41947,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.7 | 30 |
"""Provide strategies for given endpoint(s) definition."""
import asyncio
import inspect
import re
from base64 import b64encode
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from urllib.parse import quote_plus
import hypothesis
import hypothesis.strategies as st
from hypothesis_jsonschema import from_schema
from requests.auth import _basic_auth_str
from . import utils
from .exceptions import InvalidSchema
from .hooks import GLOBAL_HOOK_DISPATCHER, HookContext, HookDispatcher
from .models import Case, Endpoint
from .types import Hook
PARAMETERS = frozenset(("path_parameters", "headers", "cookies", "query", "body", "form_data"))
LOCATION_TO_CONTAINER = {
"path": "path_parameters",
"query": "query",
"header": "headers",
"cookie": "cookies",
"body": "body",
"formData": "form_data",
}
SLASH = "/"
def create_test(
endpoint: Endpoint, test: Callable, settings: Optional[hypothesis.settings] = None, seed: Optional[int] = None
) -> Callable:
"""Create a Hypothesis test."""
hook_dispatcher = getattr(test, "_schemathesis_hooks", None)
strategy = endpoint.as_strategy(hooks=hook_dispatcher)
wrapped_test = hypothesis.given(case=strategy)(test)
if seed is not None:
wrapped_test = hypothesis.seed(seed)(wrapped_test)
if asyncio.iscoroutinefunction(test):
wrapped_test.hypothesis.inner_test = make_async_test(test) # type: ignore
if settings is not None:
wrapped_test = settings(wrapped_test)
return add_examples(wrapped_test, endpoint, hook_dispatcher=hook_dispatcher)
def add_examples(test: Callable, endpoint: Endpoint, hook_dispatcher: Optional[HookDispatcher] = None) -> Callable:
"""Add examples to the Hypothesis test, if they are specified in the schema."""
examples: List[Case] = [get_single_example(strategy) for strategy in endpoint.get_strategies_from_examples()]
context = HookContext(endpoint) # context should be passed here instead
GLOBAL_HOOK_DISPATCHER.dispatch("before_add_examples", context, examples)
endpoint.schema.hooks.dispatch("before_add_examples", context, examples)
if hook_dispatcher:
hook_dispatcher.dispatch("before_add_examples", context, examples)
for example in examples:
test = hypothesis.example(case=example)(test)
return test
def is_valid_header(headers: Dict[str, Any]) -> bool:
"""Verify if the generated headers are valid."""
for name, value in headers.items():
if not isinstance(value, str):
return False
if not utils.is_latin_1_encodable(value):
return False
if utils.has_invalid_characters(name, value):
return False
return True
def is_valid_query(query: Dict[str, Any]) -> bool:
"""Surrogates are not allowed in a query string.
`requests` and `werkzeug` will fail to send it to the application.
"""
for name, value in query.items():
if is_surrogate(name) or is_surrogate(value):
return False
return True
def get_case_strategy(endpoint: Endpoint, hooks: Optional[HookDispatcher] = None) -> st.SearchStrategy:
"""Create a strategy for a complete test case.
Path & endpoint are static, the others are JSON schemas.
"""
strategies = {}
static_kwargs: Dict[str, Any] = {"endpoint": endpoint}
for parameter in PARAMETERS:
value = getattr(endpoint, parameter)
if value is not None:
location = {"headers": "header", "cookies": "cookie", "path_parameters": "path"}.get(parameter, parameter)
strategies[parameter] = prepare_strategy(parameter, value, endpoint.get_hypothesis_conversions(location))
else:
static_kwargs[parameter] = None
return _get_case_strategy(endpoint, static_kwargs, strategies, hooks)
def filter_path_parameters(parameters: Dict[str, Any]) -> bool:
"""Single "." chars and empty strings "" are excluded from path by urllib3.
A path containing to "/" or "%2F" will lead to ambiguous path resolution in
many frameworks and libraries, such behaviour have been observed in both
WSGI and ASGI applications.
In this case one variable in the path template will be empty, which will lead to 404 in most of the cases.
Because of it this case doesn't bring much value and might lead to false positives results of Schemathesis runs.
"""
path_parameter_blacklist = (".", SLASH, "")
return not any(
(value in path_parameter_blacklist or isinstance(value, str) and SLASH in value)
for value in parameters.values()
)
| [
37811,
15946,
485,
10064,
329,
1813,
36123,
7,
82,
8,
6770,
526,
15931,
198,
11748,
30351,
952,
198,
11748,
10104,
198,
11748,
302,
198,
6738,
2779,
2414,
1330,
275,
2414,
268,
8189,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
... | 2.859346 | 1,621 |
name = "wrds2pg"
from wrds2pg.wrds2pg import wrds_update, run_file_sql, get_modified_str
from wrds2pg.wrds2pg import make_engine, get_process, wrds_process_to_pg
from wrds2pg.wrds2pg import wrds_id, set_table_comment, get_table_sql
| [
3672,
796,
366,
86,
4372,
82,
17,
6024,
1,
198,
198,
6738,
1319,
9310,
17,
6024,
13,
86,
4372,
82,
17,
6024,
1330,
1319,
9310,
62,
19119,
11,
1057,
62,
7753,
62,
25410,
11,
651,
62,
41771,
62,
2536,
198,
6738,
1319,
9310,
17,
60... | 2.377551 | 98 |
import insightconnect_plugin_runtime
from .schema import GetAttachmentsForAnIncidentInput, GetAttachmentsForAnIncidentOutput, Input, Output, Component
# Custom imports below
from icon_servicenow.util.request_helper import RequestHelper
| [
11748,
11281,
8443,
62,
33803,
62,
43282,
198,
6738,
764,
15952,
2611,
1330,
3497,
33296,
902,
1890,
2025,
25517,
738,
20560,
11,
3497,
33296,
902,
1890,
2025,
25517,
738,
26410,
11,
23412,
11,
25235,
11,
35100,
198,
198,
2,
8562,
17944... | 3.901639 | 61 |
import os
import re
import sys
import imp
import math
import copy
import json
import time
import errno
import shutil
import hashlib
import argparse
import traceback
import functools
import itertools
import subprocess
import xml.dom.minidom
__tool_name__ = 'vosges'
if __name__ == '__main__':
unhandled_exception_hook.notification_hook_on_error = None
sys.excepthook = unhandled_exception_hook
run_parent = argparse.ArgumentParser(add_help = False)
run_parent.add_argument('--queue')
run_parent.add_argument('--cwd', default = os.getcwd())
run_parent.add_argument('--env', action = type('', (argparse.Action, ), dict(__call__ = lambda a, p, n, v, o: getattr(n, a.dest).update(dict([v.split('=')])))), default = {})
run_parent.add_argument('--mem_lo_gb', type = int, default = 2)
run_parent.add_argument('--mem_hi_gb', type = int, default = 10)
run_parent.add_argument('-j', '--jobs', type = int, default = 4, dest = 'parallel_jobs')
run_parent.add_argument('--source', action = 'append', default = [])
run_parent.add_argument('--path', action = 'append', default = [])
run_parent.add_argument('--ld_library_path', action = 'append', default = [])
run_parent.add_argument('--notification_command', default = '''echo This is a dummy notification command, to set a custom one adjust config.notification_command or the command-line argument.''')
run_parent.add_argument('--strftime', default = '%d/%m/%Y %H:%M:%S')
run_parent.add_argument('--max_stdout_size', type = int, default = 2048)
run_parent.add_argument('--seconds_between_queue_checks', type = int, default = 2)
run_parent.add_argument('--seconds_before_automatic_stopping', type = int, default = 10)
parser_parent = argparse.ArgumentParser(parents = [run_parent], add_help = False)
parser_parent.add_argument('--rcfile', default = os.path.expanduser('~/.%src' % __tool_name__))
parser_parent.add_argument('--root', default = '.%s' % __tool_name__)
parser_parent.add_argument('--archive_root')
parser_parent.add_argument('--html_root')
parser_parent.add_argument('--html_root_alias')
parser = argparse.ArgumentParser(parents = [parser_parent]) # separate parser to work around the config construction bug
subparsers = parser.add_subparsers()
cmd = subparsers.add_parser('stop')
cmd.add_argument('experiment_script')
cmd.add_argument('--verbose', action = 'store_const', dest = 'stderr', default = open(os.devnull, 'w'), const = None)
cmd.set_defaults(func = stop)
cmd = subparsers.add_parser('clean')
cmd.add_argument('experiment_script')
cmd.set_defaults(func = clean)
cmd = subparsers.add_parser('log')
cmd.add_argument('experiment_script')
cmd.add_argument('--xpath', default = '/')
cmd.add_argument('--stdout', action = 'store_false', dest = 'stderr')
cmd.add_argument('--stderr', action = 'store_false', dest = 'stdout')
cmd.set_defaults(func = log)
cmd = subparsers.add_parser('status')
cmd.add_argument('experiment_script')
cmd.add_argument('--xpath', default = '/')
parser._get_option_tuples = lambda arg_string: [] if any([subparser._get_option_tuples(arg_string) for action in parser._subparsers._actions if isinstance(action, argparse._SubParsersAction) for subparser in action.choices.values()]) else super(ArgumentParser, parser)._get_option_tuples(arg_string) # monkey patching for https://bugs.python.org/issue14365, hack inspired by https://bugs.python.org/file24945/argparse_dirty_hack.py
cmd.add_argument('--html', dest = 'html', action = 'store_true')
cmd.set_defaults(func = status, print_html_report_location = True)
cmd = subparsers.add_parser('run', parents = [run_parent])
cmd.add_argument('experiment_script')
cmd.add_argument('--dry', action = 'store_true')
cmd.add_argument('--locally', action = 'store_true')
cmd.add_argument('--notify', action = 'store_true', dest = 'notify_enabled')
cmd.add_argument('--archive', action = 'store_true', dest = 'archive_enabled')
cmd.set_defaults(func = run)
cmd = subparsers.add_parser('resume', parents = [run_parent])
cmd.add_argument('experiment_script')
cmd.add_argument('--dry', action = 'store_true')
cmd.add_argument('--locally', action = 'store_true')
cmd.add_argument('--notify', action = 'store_true', dest = 'notify_enabled')
cmd.add_argument('--archive', action = 'store_true', dest = 'archive_enabled')
cmd.set_defaults(func = resume)
cmd = subparsers.add_parser('archive')
cmd.add_argument('experiment_script')
cmd.set_defaults(func = archive)
args = vars(parser.parse_args())
config = copy.deepcopy(parser_parent.parse_args([])) # deepcopy to make config.html_root != args.get('html_root'), a hack constructing the config object to be used in rcfile exec and script exec
config.default_job_options = JobOptions(**vars(config)) # using default values from argparse to init the config
sys.modules[__tool_name__] = imp.new_module(__tool_name__)
vars(sys.modules[__tool_name__]).update(dict(config = config, Exec = Exec, Path = Path))
config.experiment_script_scope = {}
if os.path.exists(config.rcfile):
exec open(config.rcfile).read() in config.experiment_script_scope
config.default_job_options = JobOptions(parent = config.default_job_options, **args) # updating config using command-line args
vars(config).update({k : args.pop(k) or v for k, v in vars(config).items() if k in args}) # removing all keys from args except the method args
P.init(config, args.pop('experiment_script'))
try:
args.pop('func')(config, **args)
except KeyboardInterrupt:
print 'Quitting (Ctrl+C pressed). To stop jobs:'
print ''
print '%s stop "%s"' % (__tool_name__, P.experiment_script)
print ''
| [
11748,
28686,
198,
11748,
302,
198,
11748,
25064,
198,
11748,
848,
198,
11748,
10688,
198,
11748,
4866,
198,
11748,
33918,
198,
11748,
640,
198,
11748,
11454,
3919,
198,
11748,
4423,
346,
198,
11748,
12234,
8019,
198,
11748,
1822,
29572,
... | 2.941915 | 1,911 |
#######################################################################
# Copyright (C) 2017 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
# from .network_utils import *
# from .network_bodies import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from .network_utils import BaseNet, layer_init
from ..utils.old_config import Config
from ..utils.torch_utils import tensor
# from .network_bodies import DummyBody
from .network_utils import make_mlp
import math
| [
29113,
29113,
4242,
21017,
198,
2,
15069,
357,
34,
8,
2177,
45797,
83,
506,
19439,
7,
23548,
648,
1477,
648,
83,
506,
13,
20322,
31,
14816,
13,
785,
8,
220,
220,
220,
1303,
198,
2,
2448,
3411,
1813,
284,
13096,
262,
2438,
355,
890... | 3.263889 | 216 |
# HTML Parser - Part 2 "https://www.hackerrank.com/challenges/html-parser-part-2/problem"
from html.parser import HTMLParser
html = ""
for i in range(int(input())):
html += input().rstrip()
html += '\n'
parser = MyHTMLParser()
parser.feed(html)
parser.close()
| [
2,
11532,
23042,
263,
532,
2142,
362,
366,
5450,
1378,
2503,
13,
31153,
8056,
962,
13,
785,
14,
36747,
34120,
14,
6494,
12,
48610,
12,
3911,
12,
17,
14,
45573,
1,
198,
198,
6738,
27711,
13,
48610,
1330,
11532,
46677,
628,
198,
198,
... | 2.74 | 100 |
# Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Windows specific utility functions."""
import ctypes
import logging
import os
import platform
import re
import string
import subprocess
import sys
import six
from utils import tools
from api.platforms import common
from api.platforms import gpu
## Private stuff.
_WIN32_CLIENT_NAMES = {
u'5.0': u'2000',
u'5.1': u'XP',
u'5.2': u'XP',
u'6.0': u'Vista',
u'6.1': u'7',
u'6.2': u'8',
u'6.3': u'8.1',
u'10.0': u'10',
}
_WIN32_SERVER_NAMES = {
u'5.2': u'2003Server',
u'6.0': u'2008Server',
u'6.1': u'2008ServerR2',
u'6.2': u'2012Server',
u'6.3': u'2012ServerR2',
u'10.0': u'Server',
}
@tools.cached
def _get_mount_points():
"""Returns the list of 'fixed' drives in format 'X:\\'."""
ctypes.windll.kernel32.GetDriveTypeW.argtypes = (ctypes.c_wchar_p,)
ctypes.windll.kernel32.GetDriveTypeW.restype = ctypes.c_ulong
DRIVE_FIXED = 3
# https://msdn.microsoft.com/library/windows/desktop/aa364939.aspx
return [
u'%s:\\' % letter
for letter in string.ascii_lowercase
if ctypes.windll.kernel32.GetDriveTypeW(letter + ':\\') == DRIVE_FIXED
]
def _get_disk_info(mount_point):
"""Returns total and free space on a mount point in Mb."""
total_bytes = ctypes.c_ulonglong(0)
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(
ctypes.c_wchar_p(mount_point), None, ctypes.pointer(total_bytes),
ctypes.pointer(free_bytes))
return {
u'free_mb': round(free_bytes.value / 1024. / 1024., 1),
u'size_mb': round(total_bytes.value / 1024. / 1024., 1),
}
@tools.cached
def _get_win32com():
"""Returns an uninitialized WMI client."""
try:
import pythoncom
from win32com import client # pylint: disable=F0401
return client, pythoncom
except ImportError:
# win32com is included in pywin32, which is an optional package that is
# installed by Swarming devs. If you find yourself needing it to run without
# pywin32, for example in cygwin, please send us a CL with the
# implementation that doesn't use pywin32.
return None, None
@tools.cached
def _get_wmi_wbem():
"""Returns a WMI client connected to localhost ready to do queries."""
client, _ = _get_win32com()
if not client:
return None
wmi_service = client.Dispatch('WbemScripting.SWbemLocator')
return wmi_service.ConnectServer('.', 'root\\cimv2')
@tools.cached
def _get_wmi_wbem_for_storage():
"""
Returns a WMI client connected to localhost ready to do queries for storage.
"""
client, pythoncom = _get_win32com()
if not client:
return None
wmi_service = client.Dispatch('WbemScripting.SWbemLocator')
try:
return wmi_service.ConnectServer('.', 'Root\\Microsoft\\Windows\\Storage')
except pythoncom.com_error:
return None
# Regexp for _get_os_numbers()
_CMD_RE = r'\[version (\d+\.\d+)\.(\d+(?:\.\d+|))\]'
@tools.cached
def _get_os_numbers():
"""Returns the normalized OS version and build numbers as strings.
Actively work around AppCompat version lie shim.
Returns:
- 5.1, 6.1, etc. There is no way to distinguish between Windows 7
and Windows Server 2008R2 since they both report 6.1.
- build number, like '10240'. Mostly relevant on Windows 10.
"""
# Windows is lying to us until python adds to its manifest:
# <supportedOS Id="{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}"/>
# and it doesn't.
# So ask nicely to cmd.exe instead, which will always happily report the right
# version. Here's some sample output:
# - XP: Microsoft Windows XP [Version 5.1.2600]
# - Win10: Microsoft Windows [Version 10.0.10240]
# - Win7 or Win2K8R2: Microsoft Windows [Version 6.1.7601]
# - Win1709: Microsoft Windows [Version 10.0.16299.19]
#
# Some locale (like fr_CA) use a lower case 'version'.
out = subprocess.check_output(['cmd.exe', '/c', 'ver']).strip().decode()
match = re.search(_CMD_RE, out, re.IGNORECASE)
if not match:
# Failed to start cmd.exe, that's really bad. Return a dummy value to not
# crash.
logging.error('Failed to run cmd.exe /c ver:\n%s', out)
return '0.0', '0'
return match.group(1), match.group(2)
def _is_topmost_window(hwnd):
"""Returns True if |hwnd| is a topmost window."""
ctypes.windll.user32.GetWindowLongW.restype = ctypes.c_long # LONG
ctypes.windll.user32.GetWindowLongW.argtypes = [
ctypes.c_void_p, # HWND
ctypes.c_int
]
# -20 is GWL_EXSTYLE
ex_styles = ctypes.windll.user32.GetWindowLongW(hwnd, -20)
# 8 is WS_EX_TOPMOST
return bool(ex_styles & 8)
def _get_window_class(hwnd):
"""Returns the class name of |hwnd|."""
ctypes.windll.user32.GetClassNameW.restype = ctypes.c_int
ctypes.windll.user32.GetClassNameW.argtypes = [
ctypes.c_void_p, # HWND
ctypes.c_wchar_p,
ctypes.c_int
]
name = ctypes.create_unicode_buffer(257)
name_len = ctypes.windll.user32.GetClassNameW(hwnd, name, len(name))
if name_len <= 0 or name_len >= len(name):
raise ctypes.WinError(descr='GetClassNameW failed; %s' %
ctypes.FormatError())
return name.value
## Public API.
def from_cygwin_path(path):
"""Converts an absolute cygwin path to a standard Windows path."""
if not path.startswith('/cygdrive/'):
logging.error('%s is not a cygwin path', path)
return None
# Remove the cygwin path identifier.
path = path[len('/cygdrive/'):]
# Add : after the drive letter.
path = path[:1] + ':' + path[1:]
return path.replace('/', '\\')
def to_cygwin_path(path):
"""Converts an absolute standard Windows path to a cygwin path."""
if len(path) < 2 or path[1] != ':':
# TODO(maruel): Accept \\?\ and \??\ if necessary.
logging.error('%s is not a win32 path', path)
return None
return '/cygdrive/%s/%s' % (path[0].lower(), path[3:].replace('\\', '/'))
@tools.cached
def get_os_version_number():
"""Returns the normalized OS version number as a string.
Returns:
- '5.1', '6.1', '10.0', etc. There is no way to distinguish between Windows
7 and Windows Server 2008R2 since they both report 6.1.
"""
return _get_os_numbers()[0]
@tools.cached
def get_client_versions():
"""Gets the client versions (or client equivalent for server).
Returns:
A list of client versions (or client equivalent for server).
E.g. '10' for Windows 10 and Windows Server 2016.
"""
version_nubmer = get_os_version_number()
if version_nubmer in _WIN32_CLIENT_NAMES:
return [_WIN32_CLIENT_NAMES[version_nubmer]]
return []
@tools.cached
def get_os_version_names():
"""Returns the marketing/user-friendly names of the OS.
The return value contains the base marketing name, e.g. Vista, 10, or
2008Server. For Windows Server starting with 2016, this value is always
"Server".
For versions released before Windows 10, the return value also contains the
name with the service pack, e.g. 7-SP1 or 2012ServerR2-SP0.
For Windows 10 and Windows Server starting with 2016, the return value
includes "10-" or "Server-" followed by one or more parts of the build number.
E.g. for Windows 10 with build number 18362.207, the return value includes
10-18362, 10-18362.207. For Windows Server 2019 with build number 17763.557,
the return value includes Server-17763, Server-17763.557.
"""
# Python keeps a local map in platform.py and it is updated at newer python
# release. Since our python release is a bit old, do not rely on it.
is_server = sys.getwindowsversion().product_type != 1
lookup = _WIN32_SERVER_NAMES if is_server else _WIN32_CLIENT_NAMES
version_number, build_number = _get_os_numbers()
marketing_name = lookup.get(version_number, version_number)
if version_number == u'10.0':
rv = [marketing_name]
# Windows 10 doesn't have service packs, the build number now is the
# reference number. More discussion in
# https://docs.google.com/document/d/1iF1tbc1oedCQ9J6aL7sHeuaayY3bs52fuvKxvLLZ0ig
if '.' in build_number:
major_version = build_number.split(u'.')[0]
rv.append(u'%s-%s' % (marketing_name, major_version))
rv.append(u'%s-%s' % (marketing_name, build_number))
rv.sort()
return rv
service_pack = platform.win32_ver()[2] or u'SP0'
return [marketing_name, u'%s-%s' % (marketing_name, service_pack)]
def get_disks_info():
"""Returns disk infos on all mount point in Mb."""
return {p: _get_disk_info(p) for p in _get_mount_points()}
@tools.cached
def get_audio():
"""Returns audio device as listed by WMI."""
wbem = _get_wmi_wbem()
if not wbem:
return None
# https://msdn.microsoft.com/library/aa394463.aspx
return [
device.Name
for device in wbem.ExecQuery('SELECT * FROM Win32_SoundDevice')
if device.Status == 'OK'
]
@tools.cached
def get_visual_studio_versions():
"""Retrieves all installed Visual Studio versions.
The returned version list is sorted such that the first element is the highest
version number.
Returns:
A list of Visual Studio version strings.
"""
from six.moves import winreg
try:
k = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
'SOFTWARE\\Wow6432Node\\Microsoft\\VSCommon')
# pylint: disable=undefined-variable
except WindowsError:
return None
try:
versions = []
for i in range(winreg.QueryInfoKey(k)[0]):
sub_key = winreg.EnumKey(k, i)
if re.match(r'\d+\.\d+', sub_key):
versions.append(sub_key)
return sorted(versions, key=float, reverse=True)
finally:
k.Close()
@tools.cached
def get_gpu():
"""Returns video device as listed by WMI.
Not cached as the GPU driver may change underneat.
"""
wbem = _get_wmi_wbem()
if not wbem:
return None, None
_, pythoncom = _get_win32com()
dimensions = set()
state = set()
# https://msdn.microsoft.com/library/aa394512.aspx
try:
for device in wbem.ExecQuery('SELECT * FROM Win32_VideoController'):
# The string looks like:
# PCI\VEN_15AD&DEV_0405&SUBSYS_040515AD&REV_00\3&2B8E0B4B&0&78
pnp_string = device.PNPDeviceID
ven_id = u'UNKNOWN'
dev_id = u'UNKNOWN'
match = re.search(r'VEN_([0-9A-F]{4})', pnp_string)
if match:
ven_id = match.group(1).lower()
match = re.search(r'DEV_([0-9A-F]{4})', pnp_string)
if match:
dev_id = match.group(1).lower()
dev_name = device.VideoProcessor or u''
version = device.DriverVersion or u''
ven_name, dev_name = gpu.ids_to_names(
ven_id, u'Unknown', dev_id, dev_name)
dimensions.add(ven_id)
dimensions.add(u'%s:%s' % (ven_id, dev_id))
if version:
dimensions.add(u'%s:%s-%s' % (ven_id, dev_id, version))
state.add(u'%s %s %s' % (ven_name, dev_name, version))
else:
state.add(u'%s %s' % (ven_name, dev_name))
except pythoncom.com_error as e:
# This generally happens when this is called as the host is shutting down.
logging.error('get_gpu(): %s', e)
return sorted(dimensions), sorted(state)
@tools.cached
def get_integrity_level():
"""Returns the integrity level of the current process as a string.
TODO(maruel): It'd be nice to make it work on cygwin. The problem is that
ctypes.windll is unaccessible and it is not known to the author how to use
stdcall convention through ctypes.cdll.
"""
if get_os_version_number() == u'5.1':
# Integrity level is Vista+.
return None
mapping = {
0x0000: u'untrusted',
0x1000: u'low',
0x2000: u'medium',
0x2100: u'medium high',
0x3000: u'high',
0x4000: u'system',
0x5000: u'protected process',
}
# This was specifically written this way to work on cygwin except for the
# windll part. If someone can come up with a way to do stdcall on cygwin, that
# would be appreciated.
BOOL = ctypes.c_long
DWORD = ctypes.c_ulong
HANDLE = ctypes.c_void_p
TOKEN_READ = DWORD(0x20008)
# Use the same casing as in the C declaration:
# https://msdn.microsoft.com/library/windows/desktop/aa379626.aspx
TokenIntegrityLevel = ctypes.c_int(25)
ERROR_INSUFFICIENT_BUFFER = 122
# All the functions used locally. First open the process' token, then query
# the SID to know its integrity level.
ctypes.windll.kernel32.GetLastError.argtypes = ()
ctypes.windll.kernel32.GetLastError.restype = DWORD
ctypes.windll.kernel32.GetCurrentProcess.argtypes = ()
ctypes.windll.kernel32.GetCurrentProcess.restype = ctypes.c_void_p
ctypes.windll.advapi32.OpenProcessToken.argtypes = (HANDLE, DWORD,
ctypes.POINTER(HANDLE))
ctypes.windll.advapi32.OpenProcessToken.restype = BOOL
ctypes.windll.advapi32.GetTokenInformation.argtypes = (HANDLE, ctypes.c_long,
ctypes.c_void_p, DWORD,
ctypes.POINTER(DWORD))
ctypes.windll.advapi32.GetTokenInformation.restype = BOOL
ctypes.windll.advapi32.GetSidSubAuthorityCount.argtypes = [ctypes.c_void_p]
ctypes.windll.advapi32.GetSidSubAuthorityCount.restype = ctypes.POINTER(
ctypes.c_ubyte)
ctypes.windll.advapi32.GetSidSubAuthority.argtypes = (ctypes.c_void_p, DWORD)
ctypes.windll.advapi32.GetSidSubAuthority.restype = ctypes.POINTER(DWORD)
# First open the current process token, query it, then close everything.
token = ctypes.c_void_p()
proc_handle = ctypes.windll.kernel32.GetCurrentProcess()
if not ctypes.windll.advapi32.OpenProcessToken(proc_handle, TOKEN_READ,
ctypes.byref(token)):
logging.error('Failed to get process\' token')
return None
if token.value == 0:
logging.error('Got a NULL token')
return None
try:
# The size of the structure is dynamic because the TOKEN_MANDATORY_LABEL
# used will have the SID appened right after the TOKEN_MANDATORY_LABEL in
# the heap allocated memory block, with .Label.Sid pointing to it.
info_size = DWORD()
if ctypes.windll.advapi32.GetTokenInformation(token, TokenIntegrityLevel,
ctypes.c_void_p(), info_size,
ctypes.byref(info_size)):
logging.error('GetTokenInformation() failed expectation')
return None
if info_size.value == 0:
logging.error('GetTokenInformation() returned size 0')
return None
if ctypes.windll.kernel32.GetLastError() != ERROR_INSUFFICIENT_BUFFER:
logging.error('GetTokenInformation(): Unknown error: %d',
ctypes.windll.kernel32.GetLastError())
return None
token_info = TOKEN_MANDATORY_LABEL()
ctypes.resize(token_info, info_size.value)
if not ctypes.windll.advapi32.GetTokenInformation(
token, TokenIntegrityLevel, ctypes.byref(token_info), info_size,
ctypes.byref(info_size)):
logging.error(
'GetTokenInformation(): Unknown error with buffer size %d: %d',
info_size.value,
ctypes.windll.kernel32.GetLastError())
return None
p_sid_size = ctypes.windll.advapi32.GetSidSubAuthorityCount(
token_info.Label.Sid)
res = ctypes.windll.advapi32.GetSidSubAuthority(
token_info.Label.Sid, p_sid_size.contents.value - 1)
value = res.contents.value
return mapping.get(value) or u'0x%04x' % value
finally:
ctypes.windll.kernel32.CloseHandle(token)
@tools.cached
def get_physical_ram():
"""Returns the amount of installed RAM in Mb, rounded to the nearest number.
"""
# https://msdn.microsoft.com/library/windows/desktop/aa366589.aspx
stat = MemoryStatusEx()
stat.dwLength = ctypes.sizeof(MemoryStatusEx) # pylint: disable=W0201
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
return int(round(stat.dwTotalPhys / 1024. / 1024.))
def get_uptime():
"""Return uptime for Windows 7 and later.
Excludes sleep time.
"""
val = ctypes.c_ulonglong(0)
if ctypes.windll.kernel32.QueryUnbiasedInterruptTime(ctypes.byref(val)) != 0:
return val.value / 10000000.
return 0.
def get_reboot_required():
"""Returns True if the system should be rebooted to apply updates.
This is not guaranteed to notice all conditions that could require reboot.
"""
# Based on https://stackoverflow.com/a/45717438
k = None
from six.moves import winreg
try:
k = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\WindowsUpdate\\'
'Auto Update\\RebootRequired')
_, num_values, _ = winreg.QueryInfoKey(k)
return num_values > 0
except WindowsError: # pylint: disable=undefined-variable
# This error very likely means the RebootRequired key does not exist,
# meaning reboot is not required.
return False
finally:
if k:
k.Close()
@tools.cached
def get_ssd():
"""Returns a list of SSD disks."""
wbem = _get_wmi_wbem_for_storage()
if not wbem:
return ()
# https://docs.microsoft.com/en-us/previous-versions/windows/desktop/stormgmt/msft-physicaldisk
try:
return sorted(
d.DeviceId for d in wbem.ExecQuery('SELECT * FROM MSFT_PhysicalDisk')
if d.MediaType == 4
)
except AttributeError:
return ()
def list_top_windows():
"""Returns a list of the class names of topmost windows.
Windows owned by the shell are ignored.
"""
# The function prototype of EnumWindowsProc.
window_enum_proc_prototype = ctypes.WINFUNCTYPE(
ctypes.c_long, # BOOL
ctypes.c_void_p, # HWND
ctypes.c_void_p) # LPARAM
# Set up various user32 functions that are needed.
ctypes.windll.user32.EnumWindows.restype = ctypes.c_long # BOOL
ctypes.windll.user32.EnumWindows.argtypes = [
window_enum_proc_prototype,
ctypes.py_object
]
ctypes.windll.user32.IsWindowVisible.restype = ctypes.c_long # BOOL
ctypes.windll.user32.IsWindowVisible.argtypes = [ctypes.c_void_p] # HWND
ctypes.windll.user32.IsIconic.restype = ctypes.c_long # BOOL
ctypes.windll.user32.IsIconic.argtypes = [ctypes.c_void_p] # HWND
out = []
def on_window(hwnd, lparam): # pylint: disable=unused-argument
"""Evaluates |hwnd| to determine whether or not it is a topmost window.
In case |hwnd| is a topmost window, its class name is added to the
collection of topmost window class names to return.
"""
# Dig deeper into visible, non-iconified, topmost windows.
if (ctypes.windll.user32.IsWindowVisible(hwnd) and
not ctypes.windll.user32.IsIconic(hwnd) and
_is_topmost_window(hwnd)):
# Fetch the class name and make sure it's not owned by the Windows shell.
class_name = _get_window_class(hwnd)
if (class_name and
class_name not in ['Button', 'Shell_TrayWnd',
'Shell_SecondaryTrayWnd']):
out.append(class_name)
return 1
ctypes.windll.user32.EnumWindows(window_enum_proc_prototype(on_window), None)
return out
@tools.cached
def get_computer_system_info():
"""Return a named tuple, which lists the following params from the WMI class
Win32_ComputerSystemProduct:
name, vendor, version, uuid
"""
wbem = _get_wmi_wbem()
if not wbem:
return None
info = None
# https://msdn.microsoft.com/en-us/library/aa394105
for device in wbem.ExecQuery('SELECT * FROM Win32_ComputerSystemProduct'):
info = common.ComputerSystemInfo(
name=device.Name,
vendor=device.Vendor,
version=device.Version,
serial=device.IdentifyingNumber)
return info
| [
2,
15069,
1853,
383,
406,
9598,
40,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
198,
2,
326,
460,
307,
1043,
287,
262,
38559,
24290,
2393,
13,
1... | 2.538372 | 7,766 |
import numpy as np
import pandas as pd
import GPy, GPyOpt
from sklearn.model_selection import train_test_split
from sklearn.metrics import brier_score_loss as brier_score
from sklearn.metrics import accuracy_score, f1_score
from scipy.sparse import load_npz
from stuff.models import NBSVM, simpleNBSVM
from stuff.tools import tfidf_to_counts
from stuff.metrics import binary_diagnostics
# Importing the data
filedir = 'C:/data/addm/'
seeds = np.array(pd.read_csv(filedir + 'seeds.csv')).flatten()
corpus = pd.read_csv(filedir + 'corpus_with_lemmas_clean.csv')
doctermat = load_npz(filedir + 'doctermat.npz')
# Setting the features and targets
X = tfidf_to_counts(np.array(doctermat.todense(),
dtype=np.uint16))
y = np.array(corpus.aucaseyn, dtype=np.uint8)
n_range = range(corpus.shape[0])
# Toggle for the optimization loop
optimize = False
opt_iter = 30
if optimize:
# Regular function for hyperparameter evaluation
# Bounds for the GP optimizer
bounds = [{'name': 'beta',
'type': 'continuous',
'domain': (0.8, 1.0)},
{'name': 'C',
'type': 'discrete',
'domain': (0.001, 0.01, 1.0, 2, 2**2)}
]
# Function for GPyOpt to optimize
# Running the optimization
train, val = train_test_split(n_range,
test_size=0.3,
stratify=y,
random_state=10221983)
opt_mod = GPyOpt.methods.BayesianOptimization(f=f,
num_cores=20,
domain=bounds,
initial_design_numdata=5)
opt_mod.run_optimization(opt_iter)
best = opt_mod.x_opt
# Saving the best parameters to CSV
pd.Series(best).to_csv(filedir + 'models/best_nbsvm_params.csv',
index=False)
# Running the splits
stats = pd.DataFrame(np.zeros([10, 15]))
for i, seed in enumerate(seeds):
train, test = train_test_split(n_range,
stratify=y,
random_state=seed,
test_size=0.3)
if i == 0:
test_guesses = pd.DataFrame(np.zeros([X[test].shape[0], 10]))
# Fitting the model
mod = simpleNBSVM(C=0.001)
print('Fitting model ' + str(i))
mod.fit(X[train], y[train])
# Getting the predicted probs and thresholded guesses
guesses = mod.predict(X[test]).flatten()
test_guesses.iloc[:, i] = guesses
bin_stats = binary_diagnostics(y[test], guesses, accuracy=True)
print(bin_stats)
stats.iloc[i, :] = bin_stats.values
# Writing the output to CSV
stats.columns = ['tp', 'fp', 'tn', 'fn', 'sens', 'spec', 'ppv', 'npv',
'f1', 'acc', 'true', 'pred', 'abs', 'rel', 'mcnemar']
stats.to_csv(filedir + 'stats/nbsvm_simple_stats.csv',
index=False)
test_guesses.to_csv(filedir + 'guesses/nbsvm_simple_test_guesses.csv',
index=False)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
14714,
88,
11,
14714,
88,
27871,
198,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,
62,
35312,
198,
6738,
1341,
35720,
13,
4164,
10... | 1.964713 | 1,587 |
# -*- coding: utf-8 -*-
import pytest
from pyleecan.Classes.MeshMat import MeshMat
from pyleecan.Classes.NodeMat import NodeMat
from pyleecan.Classes.CellMat import CellMat
import numpy as np
@pytest.mark.MeshSol
class Test_get_node(object):
"""unittest for nodes getter methods"""
@classmethod
def test_MeshMat_triangle3(self):
"""unittest with CellMat and NodeMat objects, only Triangle3 elements are defined"""
nodes = self.mesh.get_node(indices=[1, 2])
solution = np.array([[1, 0], [1, 2]])
testA = np.sum(abs(solution - nodes))
msg = (
"Wrong projection: returned " + str(nodes) + ", expected: " + str(solution)
)
DELTA = 1e-10
assert abs(testA - 0) < DELTA, msg
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
12972,
9288,
198,
6738,
279,
2349,
721,
272,
13,
9487,
274,
13,
37031,
19044,
1330,
47529,
19044,
198,
6738,
279,
2349,
721,
272,
13,
9487,
274,
13,
19667,
19044,... | 2.396226 | 318 |
configfile: 'config_spocd1_pi.yaml'
bowtie = "/usr/local/Cellar/bowtie/1.2.1.1/bin/bowtie"
rule all:
input:
expand("Processed/mapped/{sample}_mirna_mature.sam", sample = config["samples"]),
expand("Processed/unmapped/{sample}_mirna_mature_unmmaped.fasta", sample = config["samples"]),
expand("Processed/mapped/{sample}_mirna_hairpin.sam", sample = config["samples"]),
expand("Processed/unmapped/{sample}_mirna_hairpin_unmmaped.fasta", sample = config["samples"]),
expand("Processed/mapped/{sample}_ncRNA.sam", sample = config["samples"]),
expand("Processed/unmapped/{sample}_ncRNA_unmmaped.fasta", sample = config["samples"]),
expand("Processed/mapped/{sample}_nrRNA.sam", sample = config["samples"]),
expand("Processed/unmapped/{sample}_nrRNA_unmmaped.fasta", sample = config["samples"])
rule map_mirna_mature:
input:
fasta = "Processed/fasta/{sample}_count.fasta"
output:
mirna_mature = "Processed/mapped/{sample}_mirna_mature.sam",
unmapped = "Processed/unmapped/{sample}_mirna_mature_unmmaped.fasta"
params:
mirnas = config["mirnas"]
shell:
"""
{bowtie} -v 0 -f --un {output.unmapped} {params.mirnas} {input.fasta} \
{output.mirna_mature}
"""
rule map_mirna_hairpin:
input:
fasta = "Processed/unmapped/{sample}_mirna_mature_unmmaped.fasta"
output:
mirna_mature = "Processed/mapped/{sample}_mirna_hairpin.sam",
unmapped = "Processed/unmapped/{sample}_mirna_hairpin_unmmaped.fasta"
params:
mirnas = config["mirnas_hairpin"]
shell:
"""
{bowtie} -v 0 -f --un {output.unmapped} {params.mirnas} {input.fasta} \
{output.mirna_mature}
"""
rule map_ncRNA:
input:
"Processed/unmapped/{sample}_mirna_hairpin_unmmaped.fasta"
output:
ncRNA = "Processed/mapped/{sample}_ncRNA.sam",
unmapped = "Processed/unmapped/{sample}_ncRNA_unmmaped.fasta"
params:
ncRNA = config["ncRNA"]
shell:
"""
{bowtie} -v 0 --un {output.unmapped} -f \
{params.ncRNA} {input} {output.ncRNA}
"""
rule map_nrRNA:
input:
"Processed/unmapped/{sample}_ncRNA_unmmaped.fasta"
output:
nrRNA = "Processed/mapped/{sample}_nrRNA.sam",
unmapped = "Processed/unmapped/{sample}_nrRNA_unmmaped.fasta"
params:
nrRNA = config["nrRNA"]
shell:
"""
{bowtie} -v 0 --un {output.unmapped} -f \
{params.nrRNA} {input} {output.nrRNA}
"""
| [
11250,
7753,
25,
705,
11250,
62,
2777,
420,
67,
16,
62,
14415,
13,
88,
43695,
6,
198,
8176,
36224,
796,
12813,
14629,
14,
12001,
14,
34,
14203,
14,
8176,
36224,
14,
16,
13,
17,
13,
16,
13,
16,
14,
8800,
14,
8176,
36224,
1,
198,
... | 2.240187 | 1,070 |
'''
Definition of Document
class Document:
def __init__(self, id, cotent):
self.id = id
self.content = content
'''
# @param {Document[]} docs a list of documents
# @return {dict(string, int[])} an inverted index | [
7061,
6,
198,
36621,
286,
16854,
198,
4871,
16854,
25,
198,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
4686,
11,
269,
33715,
2599,
198,
220,
220,
220,
220,
220,
220,
220,
2116,
13,
312,
796,
4686,
198,
220,
220,
220,
220,
... | 2.655556 | 90 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-12 08:17
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
319,
2177,
12,
3023,
12,
1065,
8487,
25,
1558,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738... | 2.933333 | 75 |
from django.conf.urls import *
urlpatterns = patterns('punchclock.views',
url(r'^$', 'start_task'),
url(r'^switch/$', 'switch_task'),
url(r'^activities/$', 'get_activities'),
url(r'^shift-details/$', 'shift_details'),
)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
1635,
198,
198,
6371,
33279,
82,
796,
7572,
10786,
79,
3316,
15750,
13,
33571,
3256,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.714286 | 196 |
"""
(C) Copyright 2021 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on June 30, 2021
"""
from collections import OrderedDict
import os
from fuse.eval.metrics.classification.metrics_thresholding_common import MetricApplyThresholds
from fuse.utils.utils_debug import FuseUtilsDebug
from fuse.utils.gpu import choose_and_enable_multiple_gpus
import logging
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data.dataloader import DataLoader
from fuse.utils.utils_logger import fuse_logger_start
from fuse.data.sampler.sampler_balanced_batch import FuseSamplerBalancedBatch
from fuse.models.model_default import FuseModelDefault
from fuse.models.heads.head_global_pooling_classifier import FuseHeadGlobalPoolingClassifier
from fuse.losses.loss_default import FuseLossDefault
from fuse.eval.metrics.classification.metrics_classification_common import MetricAUCROC, MetricAccuracy, MetricROCCurve
from fuse.managers.callbacks.callback_tensorboard import FuseTensorboardCallback
from fuse.managers.callbacks.callback_metric_statistics import FuseMetricStatisticsCallback
from fuse.managers.callbacks.callback_time_statistics import FuseTimeStatisticsCallback
from fuse.managers.manager_default import FuseManagerDefault
from fuse_examples.classification.cmmd.dataset import CMMD_2021_dataset
from fuse.models.backbones.backbone_inception_resnet_v2 import FuseBackboneInceptionResnetV2
from fuse.eval.evaluator import EvaluatorDefault
##########################################
# Debug modes
##########################################
mode = 'default' # Options: 'default', 'fast', 'debug', 'verbose', 'user'. See details in FuseUtilsDebug
debug = FuseUtilsDebug(mode)
##########################################
# Train Common Params
##########################################
# ============
# Data
# ============
TRAIN_COMMON_PARAMS = {}
TRAIN_COMMON_PARAMS['data.train_num_workers'] = 8
TRAIN_COMMON_PARAMS['data.validation_num_workers'] = 8
######################################
# Inference Common Params
######################################
INFER_COMMON_PARAMS = {}
INFER_COMMON_PARAMS['infer_filename'] = 'validation_set_infer.gz'
INFER_COMMON_PARAMS['checkpoint'] = 'best' # Fuse TIP: possible values are 'best', 'last' or epoch_index.
INFER_COMMON_PARAMS['data.train_num_workers'] = TRAIN_COMMON_PARAMS['data.train_num_workers']
# ===============
# Manager - Train
# ===============
NUM_GPUS = 1
TRAIN_COMMON_PARAMS['data.batch_size'] = 2 * NUM_GPUS
TRAIN_COMMON_PARAMS['manager.train_params'] = {
'num_gpus': NUM_GPUS,
'num_epochs': 100,
'virtual_batch_size': 1, # number of batches in one virtual batch
'start_saving_epochs': 10, # first epoch to start saving checkpoints from
'gap_between_saving_epochs': 100, # number of epochs between saved checkpoint
}
# best_epoch_source
# if an epoch values are the best so far, the epoch is saved as a checkpoint.
TRAIN_COMMON_PARAMS['manager.best_epoch_source'] = {
'source': 'metrics.auc', # can be any key from losses or metrics dictionaries
'optimization': 'max', # can be either min/max
'on_equal_values': 'better',
# can be either better/worse - whether to consider best epoch when values are equal
}
TRAIN_COMMON_PARAMS['manager.learning_rate'] = 1e-5
TRAIN_COMMON_PARAMS['manager.weight_decay'] = 0.001
TRAIN_COMMON_PARAMS['manager.resume_checkpoint_filename'] = None
#################################
# Train Template
#################################
######################################
# Inference Template
######################################
# Analyze Common Params
######################################
EVAL_COMMON_PARAMS = {}
EVAL_COMMON_PARAMS['infer_filename'] = INFER_COMMON_PARAMS['infer_filename']
EVAL_COMMON_PARAMS['num_workers'] = 4
EVAL_COMMON_PARAMS['batch_size'] = 8
######################################
# Analyze Template
######################################
######################################
# Run
######################################
if __name__ == "__main__":
# allocate gpus
if NUM_GPUS == 0:
TRAIN_COMMON_PARAMS['manager.train_params']['device'] = 'cpu'
# uncomment if you want to use specific gpus instead of automatically looking for free ones
force_gpus = None # [0]
choose_and_enable_multiple_gpus(NUM_GPUS, force_gpus=force_gpus)
RUNNING_MODES = ['train', 'infer', 'eval'] # Options: 'train', 'infer', 'eval'
# Path to save model
root = ''
# Path to the stored CMMD dataset location
# dataset should be download from https://wiki.cancerimagingarchive.net/pages/viewpage.action?pageId=70230508
# download requires NBIA data retriever https://wiki.cancerimagingarchive.net/display/NBIA/Downloading+TCIA+Images
# put on the following in the main folder -
# 1. CMMD_clinicaldata_revision.csv which is a converted version of CMMD_clinicaldata_revision.xlsx
# 2. folder named CMMD which is the downloaded data folder
root_data = None #TODO: add path to the data folder
assert root_data is not None, "Error: please set root_data, the path to the stored CMMD dataset location"
# Name of the experiment
experiment = 'model_new/CMMD_classification'
# Path to cache data
cache_path = 'examples/'
# Name of the cached data folder
experiment_cache = 'CMMD_'
paths = {'data_dir': root_data,
'model_dir': os.path.join(root, experiment, 'model_dir_transfer'),
'data_misc_dir' : os.path.join(root, 'data_misc'),
'force_reset_model_dir': True,
# If True will reset model dir automatically - otherwise will prompt 'are you sure' message.
'cache_dir': os.path.join(cache_path, experiment_cache + '_cache_dir'),
'inference_dir': os.path.join(root, experiment, 'infer_dir'),
'eval_dir': os.path.join(root, experiment, 'eval_dir')}
# train
if 'train' in RUNNING_MODES:
run_train(paths=paths, train_common_params=TRAIN_COMMON_PARAMS, reset_cache=False)
# infer
if 'infer' in RUNNING_MODES:
run_infer(paths=paths, infer_common_params=INFER_COMMON_PARAMS)
#
# eval
if 'eval' in RUNNING_MODES:
run_eval(paths=paths, eval_common_params=EVAL_COMMON_PARAMS)
| [
198,
37811,
198,
198,
7,
34,
8,
15069,
33448,
19764,
11421,
13,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.0537 | 2,216 |
# coding=utf-8
__author__ = "Dragan Vidakovic"
import codecs
def read_file_content(path):
"""
Read file from given path
:param path: file path
:return: file content
"""
f = codecs.open(path, 'r', 'utf8')
content = f.read()
f.close()
return content
def read_file_line(path):
"""
Read all lines from file
:param path: file path
:return: list of lines
"""
f = codecs.open(path, 'r', 'utf8')
lines = f.readlines()
f.close()
return lines
| [
2,
19617,
28,
40477,
12,
23,
198,
834,
9800,
834,
796,
366,
6187,
7329,
38965,
461,
17215,
1,
198,
11748,
40481,
82,
628,
198,
4299,
1100,
62,
7753,
62,
11299,
7,
6978,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
4149,
23... | 2.412322 | 211 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import csv
import json
import logging
import os
import time
import sys
from collections import namedtuple
from datetime import datetime
from pprint import pformat
from subprocess import Popen, PIPE
from celery import Celery
from celery.events.snapshot import Polaroid
from shapeshift import JSONFormatter
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'atmosphere.settings')
sys.path.append("/opt/dev/atmosphere")
import django
django.setup()
from django.conf import settings
try:
import logstash
except ImportError:
raise "Requires python-logstash to be installed"
logger = logging.getLogger(__name__)
DEFAULT_LOGSTASH_PORT = 5002
DEFAULT_MESSAGE_RATE = 60
ConnectionInfo = namedtuple("ConnectionInfo", ["listening", "establish_wait"])
ActiveInfo = namedtuple("ActiveInfo", ["active_workers", "idle_workers", "active_tasks"])
ReservedInfo = namedtuple("ReservedInfo", ["reserved_tasks", "queued_tasks"])
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog="host_watch",
description="Forwards machine statistics to logstash.")
parser.add_argument("host", help="Hostname of the logstash server")
parser.add_argument("--port", default=DEFAULT_LOGSTASH_PORT, type=int,
help="Specify the port logstash is using.")
parser.add_argument("--log-to", dest="logfile",
help="Specifies a file to log to.")
parser.add_argument(
"--rate", default=DEFAULT_MESSAGE_RATE, type=int,
help="How often messages are sent to logstash in seconds (default=60)")
args = parser.parse_args()
main(args)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
1822,
29572,
198,
11748,
269,
21370,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
640,
198... | 2.755738 | 610 |
"""
GET: data muncul jika url dijalankan lewat browser
POST: data tidak bisa diambil lewat browser, namun hanya bisa diambil berdasar permintaan dengan metode post dari form
"""
import requests
import json
import pprint
# Coba jalankan get dari link
try:
result = requests.get('https://www.idx.co.id/umbraco/Surface/Helper/GetStockChart?indexCode=SRTG&period=1W')
if result.status_code == 200:
# print(result.status_code)
# print(result.text)
data = json.loads(result.text)
chart_data = data['ChartData']
# pprint.pprint(data)
print(data['ChartData'])
f = open('data.csv', 'w')
f.write('#Tanggal;Value\n')
for d in chart_data:
tanggal = d['Date']
value = d['Close']
print(tanggal, ';', value)
f.write('{};{}\n'.format(tanggal, value))
f.close()
# Jika connection error maka....
except Exception as ex:
print(ex)
| [
37811,
198,
18851,
25,
1366,
29856,
3129,
474,
9232,
19016,
2566,
73,
282,
962,
272,
443,
47261,
6444,
198,
32782,
25,
1366,
29770,
461,
275,
9160,
48428,
33473,
443,
47261,
6444,
11,
299,
321,
403,
289,
34183,
275,
9160,
48428,
33473,
... | 2.262411 | 423 |
import sys
import regex
instr_regex = regex.compile(r"""(?|(rect)\ (\d+)x(\d+) # use same group numbers in each | branch
| rotate\ (row)\ y=(\d+)\ by\ (\d+)
| rotate\ (column)\ x=(\d+)\ by\ (\d+))""",
regex.VERBOSE)
parse_table = {"rect": Rect, "row": Row, "column": Col}
if __name__ == '__main__':
g = Grid(50 ,6)
for line in sys.stdin:
g.apply(parse(line.rstrip()).transform(g))
print sum(1 for y in range(g.height) for x in range(g.width) if g.grid[y][x] == '#')
g.render()
| [
11748,
25064,
198,
11748,
40364,
198,
198,
259,
2536,
62,
260,
25636,
796,
40364,
13,
5589,
576,
7,
81,
15931,
18109,
30,
91,
7,
2554,
19415,
357,
59,
67,
28988,
87,
38016,
67,
28988,
1303,
779,
976,
1448,
3146,
287,
1123,
930,
8478... | 1.920128 | 313 |
from data_functions import conf
import shutil
filebase = "./../Logs/"
data = filebase + "cooldown_20200422_19.db"
if __name__ == "__main__":
df = conf(data)
df = df.loc[df.times_res - df.times_temps < 1]
# print(df.iloc[:, 1:4:2])
dfexp = df.iloc[:, 1:4:2]
# print(data[10:-3])
newfile = data[10:-3] + ".dat"
dfexp.to_csv(newfile, index=False)
shutil.copyfile(
newfile, "C:/Users/Lab-user/Dropbox/SPLITCOIL_data/Eugen_27_M2/" + newfile
)
| [
6738,
1366,
62,
12543,
2733,
1330,
1013,
198,
11748,
4423,
346,
198,
198,
7753,
8692,
796,
366,
19571,
40720,
11187,
82,
30487,
198,
7890,
796,
2393,
8692,
1343,
366,
1073,
15041,
62,
1238,
15724,
1828,
62,
1129,
13,
9945,
1,
628,
198... | 2.103896 | 231 |
import os
import pytest
from spandex import TableLoader
from spandex.spatialtoolz import conform_srids
@pytest.fixture(scope='function')
def loader(request):
"""Recreate sample schema from shapefiles and tear down when done."""
# Configure TableLoader to use directory containing sample shapefiles.
root_path = os.path.dirname(__file__)
data_path = os.path.join(root_path, '../../test_data')
loader = TableLoader(directory=data_path)
# Recreate PostgreSQL sample schema.
with loader.database.cursor() as cur:
cur.execute("""
CREATE EXTENSION IF NOT EXISTS postgis;
DROP SCHEMA IF EXISTS sample CASCADE;
CREATE SCHEMA sample;
""")
loader.database.refresh()
# Load all shapefiles in test data directory.
for filename in os.listdir(data_path):
file_root, file_ext = os.path.splitext(filename)
if file_ext.lower() == '.shp':
shp_path = os.path.join(data_path, filename)
table_name = 'sample.' + file_root
loader.load_shp(shp_path, table_name)
# Reproject all non-conforming SRIDs into project SRID.
conform_srids(loader.srid, schema=loader.tables.sample)
# Tear down sample schema when done.
request.addfinalizer(teardown)
return loader
| [
11748,
28686,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
599,
392,
1069,
1330,
8655,
17401,
198,
6738,
599,
392,
1069,
13,
2777,
34961,
25981,
89,
1330,
17216,
62,
27891,
2340,
628,
198,
31,
9078,
9288,
13,
69,
9602,
7,
29982,
1163... | 2.565815 | 509 |
#!/usr/bin/env python
import math
import numpy as np
from floripy.mathutils.linalg import perm_tensor
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
781,
273,
541,
88,
13,
11018,
26791,
13,
75,
1292,
70,
1330,
9943,
62,
83,
22854,
628
] | 2.810811 | 37 |
from django.contrib import admin
from .models import Contest, Problem, Submittion, RegistContestUser, Standing
# Register your models here.
admin.site.register(Contest)
admin.site.register(Problem)
admin.site.register(Submittion)
admin.site.register(RegistContestUser)
admin.site.register(Standing)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
27297,
11,
20647,
11,
3834,
20124,
295,
11,
13811,
4264,
395,
12982,
11,
22493,
198,
198,
2,
17296,
534,
4981,
994,
13,
198,
28482,
13,
15654,
13,
30238,
7,
... | 3.409091 | 88 |
# this is just here to trick pytest into finding my modules
# I probably set something up wrong, but this is the quickest workaround I found to fix it :) | [
2,
428,
318,
655,
994,
284,
6908,
12972,
9288,
656,
4917,
616,
13103,
198,
2,
314,
2192,
900,
1223,
510,
2642,
11,
475,
428,
318,
262,
46264,
46513,
314,
1043,
284,
4259,
340,
14373
] | 4.5 | 34 |
#!/bin/bin/env python
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
setup(
name='djangorestframework-nr',
version='0.1.1',
description='Provide nested router support to Django REST Framework',
url='https://github.com/ipglobal/django-rest-framework-nr',
author='Jarrod Baumann',
author_email='jarrod@unixc.org',
license='MIT',
keywords='djangorestframework nested',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=[],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Environment :: Web Environment',
'Framework :: Django',
]
)
| [
2,
48443,
8800,
14,
8800,
14,
24330,
21015,
198,
198,
2,
16622,
4702,
900,
37623,
10141,
625,
1233,
26791,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
2,
1675,
779,
257,
6414,
21004,
198,
6738,
40481,
82,
1330,... | 2.77724 | 413 |
import tensorflow as tf
from tensorflow.contrib.rnn import DropoutWrapper
class SwitchableDropoutWrapper(DropoutWrapper):
"""
A wrapper of tensorflow.contrib.rnn.DropoutWrapper that does not apply
dropout if is_train is not True (dropout only in training).
"""
| [
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
11192,
273,
11125,
13,
3642,
822,
13,
81,
20471,
1330,
14258,
448,
36918,
2848,
628,
198,
4871,
14645,
540,
26932,
448,
36918,
2848,
7,
26932,
448,
36918,
2848,
2599,
198,
220,
220,
220,
... | 3 | 93 |
"""
add-tables.py
A Python script that adds a few tables to Deephaven.
@copyright Deephaven Data Labs
"""
from pydeephaven import Session
import time
session = None
#Simple retry loop in case the server tries to launch before Deephaven is ready
count = 0
max_count = 5
while (count < max_count):
try:
session = Session()
count = max_count
except Exception as e:
print("Failed to connect to Deephaven... Waiting to try again")
print(e)
time.sleep(5)
count += 1
if session is None:
sys.exit(f"Failed to connect to Deephaven after {max_count} attempts")
tableOne = session.empty_table(1)
tableTwo = session.empty_table(2)
session.bind_table(name="tableOne", table=tableOne)
session.bind_table(name="tableTwo", table=tableTwo)
| [
37811,
198,
2860,
12,
83,
2977,
13,
9078,
198,
198,
32,
11361,
4226,
326,
6673,
257,
1178,
8893,
284,
10766,
39487,
13,
198,
198,
31,
22163,
4766,
10766,
39487,
6060,
23500,
198,
37811,
198,
6738,
12972,
22089,
39487,
1330,
23575,
198,
... | 2.825 | 280 |
import os
from enum import Enum
| [
11748,
28686,
198,
6738,
33829,
1330,
2039,
388,
628
] | 3.666667 | 9 |
# Copyright (c) 2019 Eric Steinberger
import numpy as np
import sys
from PokerRL.cfr._MCCFRBase import MCCFRBase as _MCCFRBase
| [
2,
15069,
357,
66,
8,
13130,
7651,
15215,
21041,
628,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
198,
198,
6738,
36157,
7836,
13,
66,
8310,
13557,
44,
4093,
10913,
14881,
1330,
337,
4093,
10913,
14881,
355,
4808,
44,
4093,
... | 2.977273 | 44 |
s1 = input()
s2 = input()
s3 = input()
if s1[-1] == s2[0] and s2[-1] == s3[0] and s3[-1] == s1[0] :
print("good")
else :
print("bad")
#이거 뭔가 이상
| [
82,
16,
796,
5128,
3419,
198,
82,
17,
796,
5128,
3419,
198,
82,
18,
796,
5128,
3419,
198,
361,
264,
16,
58,
12,
16,
60,
6624,
264,
17,
58,
15,
60,
290,
264,
17,
58,
12,
16,
60,
6624,
264,
18,
58,
15,
60,
290,
264,
18,
58,
... | 1.583333 | 96 |
from data_loader import load_data, tokenizer
from models import BertForMultipleLabelSequenceClassificationWithFocalLoss
from transformers import AutoConfig
import torch
from tqdm.auto import tqdm
from transformers import get_scheduler
from transformers import AdamW
from sklearn.metrics import accuracy_score, f1_score
label_list = ['확진자수','완치자수','사망여부','집단감염','백신관련','방역지침','경제지원','마스크','국제기구','병원관련']
if __name__ == '__main__':
main() | [
6738,
1366,
62,
29356,
1330,
3440,
62,
7890,
11,
11241,
7509,
198,
6738,
4981,
1330,
22108,
1890,
31217,
33986,
44015,
594,
9487,
2649,
3152,
37,
4374,
43,
793,
198,
198,
6738,
6121,
364,
1330,
11160,
16934,
198,
11748,
28034,
198,
6738... | 1.855422 | 249 |
"""Converting between different types of graphs"""
from copy import deepcopy
import math
from typing import Dict, List, Tuple, Union
import networkx as nx
import pandas as pd
from .exception import UnexpectedSelfLoopException
from .types import (
Edge,
EdgeFunction,
EdgeFunctionName,
EdgeList,
EdgeProperties,
MultiEdge,
Vertex,
VertexFunction,
VertexFunctionName,
VertexList,
)
def to_vertex_dataframe(graph: nx.Graph) -> pd.DataFrame:
"""Convert graph vertices to pandas dataframe
Args:
graph: Input graph
Returns:
pandas dataframe with vertex set as index
"""
vertex_data = list(map(lambda x: dict(vertex=x[0], **x[1]), graph.nodes(data=True)))
vertex_df = pd.DataFrame(vertex_data)
vertex_df = vertex_df.set_index("vertex")
return vertex_df
def asymmetric_from_directed(G: nx.DiGraph) -> nx.DiGraph:
"""Create asymmetric directed graph from directed graph
Split every node u two nodes u1 and u2.
We add a directed arc between u1 and u2.
Any previous inward edge v->u is now v->u1 and any outward edge from u is now u2->v.
Args:
G: Directed graph
Returns:
Directed asymmetric graph
"""
asymmetric_graph = nx.DiGraph()
nodes_for_adding: List[Tuple[Vertex, Dict]] = []
edges_for_adding: List[Tuple[Vertex, Vertex, Dict]] = []
# deepcopy when not a view
asymmetric_graph.graph.update(deepcopy(G.graph))
# find the id of the biggest vertex
biggest_vertex = biggest_vertex_id_from_graph(G)
for vertex, data in G.nodes(data=True):
# split the vertex into two
head = split_head(biggest_vertex, vertex)
tail = split_tail(biggest_vertex, vertex)
# the data is copied to both vertices
tail_data = data.copy()
head_data = data.copy()
# split the value of the prize
prize: int = data.get(VertexFunctionName.prize, 0)
tail_data[VertexFunctionName.prize] = tail_prize(prize)
head_data[VertexFunctionName.prize] = head_prize(prize)
# add vertex to asymmetric graph with new data
nodes_for_adding.append((head, head_data))
nodes_for_adding.append((tail, tail_data))
# add zero-cost edge from tail to head
edge = (tail, head, {EdgeFunctionName.cost.value: 0})
edges_for_adding.append(edge)
for u, v, edge_data in G.edges(data=True):
# add edge from head of u to tail of v with data
u_head = split_head(biggest_vertex, u)
v_tail = split_tail(biggest_vertex, v)
edge_uv = (u_head, v_tail, edge_data)
edges_for_adding.append(edge_uv)
# add nodes and edges then return graph
asymmetric_graph.add_nodes_from(nodes_for_adding)
asymmetric_graph.add_edges_from(edges_for_adding)
return asymmetric_graph
def asymmetric_from_undirected(G: nx.Graph) -> nx.DiGraph:
"""Create asymmetric directed graph from undirected graph
Args:
G: Undirected graph
Returns:
Directed asymmetric graph
"""
directed_graph = G.to_directed()
return asymmetric_from_directed(directed_graph)
def biggest_vertex_id_from_graph(G: nx.Graph) -> Vertex:
"""Return the vertex with the largest integer id
Args:
G: Graph
Returns:
Vertex with biggest id
"""
return max(G)
def get_original_from_split_vertex(
biggest_vertex: Vertex, split_vertex: Vertex
) -> Vertex:
"""Return the original vertex id given a split vertex (may be head or tail)
Args:
biggest_vertex: The vertex with the biggest id in the original graph
split_vertex: A split vertex in asymmetric graph
Returns:
ID of the vertex in the original graph
"""
if is_vertex_split_head(biggest_vertex, split_vertex):
return split_vertex - 2 * (biggest_vertex + 1)
# else split tail
return split_vertex - biggest_vertex - 1
def get_original_path_from_split_path(
biggest_vertex: Vertex, split_path: VertexList
) -> VertexList:
"""Get the path in the original graph given a path of split vertices in the asymmetric graph
Args:
biggest_vertex: The vertex with the biggest id in the original graph
split_path: A path of split vertices in the asymmetric directed graph
Returns:
A path of vertices in the original graph
"""
original_path = []
previous_vertex = -1
for split_vertex in split_path:
original_vertex = get_original_from_split_vertex(biggest_vertex, split_vertex)
if is_vertex_split_tail(biggest_vertex, split_vertex):
original_path.append(original_vertex)
elif (
is_vertex_split_head(biggest_vertex, split_vertex)
and previous_vertex != original_vertex
):
original_path.append(original_vertex)
previous_vertex = original_vertex
return original_path
def is_split_vertex_pair(biggest_vertex: Vertex, tail: Vertex, head: Vertex) -> bool:
"""Does the arc (tail, head) represent a split vertex in the original graph?
Args:
biggest_vertex: The vertex with the biggest id in the original graph
tail: Tail of edge in directed graph
head: Head of edge in directed graph
Returns:
True if the arc (tail, head) represents a split vertex in the original graph
"""
return (
head - tail == biggest_vertex + 1
and is_vertex_split_head(biggest_vertex, head)
and is_vertex_split_tail(biggest_vertex, tail)
)
def is_vertex_split_tail(biggest_vertex: Vertex, vertex: Vertex) -> bool:
"""Is the vertex a tail in the asymmetric graph?
Args:
biggest_vertex: The vertex with the biggest id in the original graph
vertex: A potential tail of an edge in directed graph
Returns:
True if the vertex is a tail
"""
return biggest_vertex + 1 <= vertex < 2 * (biggest_vertex + 1)
def is_vertex_split_head(biggest_vertex: Vertex, split_vertex: Vertex) -> bool:
"""Is the vertex a head in the asymmetric graph?
Args:
biggest_vertex: The vertex with the biggest id in the original graph
split_vertex: A potential head of an edge in directed graph
Returns:
True if the vertex is a head
"""
return 2 * (biggest_vertex + 1) <= split_vertex < 3 * (biggest_vertex + 1)
def split_head(biggest_vertex: Vertex, original_vertex: Vertex) -> Vertex:
"""Get the split head of the vertex
Args:
biggest_vertex: The vertex with the biggest id in the original graph
original_vertex: Vertex in the original graph
Returns:
New split vertex that is a head of all arcs in the asymmetric graph
"""
return 2 * (biggest_vertex + 1) + original_vertex
def split_tail(biggest_vertex: Vertex, original_vertex: Vertex) -> Vertex:
"""Get the split tail of the vertex
Args:
biggest_vertex: The vertex with the biggest id in the original graph
original_vertex: Vertex in the original graph
Returns:
New split vertex that is a tail of all arcs in the asymmetric graph
"""
return biggest_vertex + 1 + original_vertex
def head_prize(prize: int) -> int:
"""Get the prize of the split head
Args:
prize: The prize of a vertex
Returns
Split head prize
"""
if prize % 2 == 1:
return math.ceil(prize / 2.0)
return int(prize / 2)
def tail_prize(prize: int) -> int:
"""Get the prize of the split tail
Args:
prize: The prize of a vertex
Returns
Split tail prize
"""
return math.floor(prize / 2.0)
def new_dummy_vertex(vertex: int, key: int, biggest: int) -> int:
"""New dummy vertex ID
Args:
vertex: Vertex ID
key: Edge key
biggest: Biggest vertex ID
Returns:
ID of a new negative dummy vertex if key is greater than one.
Otherwise return the same vertex ID as the input.
"""
if key > 0:
return -((biggest + 1) * (key - 1)) - vertex - 1
return vertex
def old_vertex_from_dummy(dummy: int, key: int, biggest) -> int:
"""Old vertex ID from the dummy vertex ID"""
if dummy < 0:
return -(dummy + (biggest + 1) * (key - 1) + 1)
return dummy
def to_simple_undirected(G: nx.MultiGraph) -> nx.Graph:
"""Given an undirected multigraph, multi edges to create a simple undirected graph.
Args:
G: Undirected networkx multi graph.
Returns:
Undirected networkx simple graph with no multi edges.
Notes:
Assumes the vertex ids are integers.
"""
if not isinstance(G, nx.MultiGraph) and isinstance(G, nx.Graph):
return G
if isinstance(G, nx.DiGraph):
raise TypeError("Directed graphs are not valid for this method")
simple_graph = nx.Graph()
# copy graph attributes to new graph
for key, value in simple_graph.graph.items():
simple_graph.graph[key] = value
# copy vertex attributes
for v, data in G.nodes(data=True):
simple_graph.add_node(v, **data)
biggest = biggest_vertex_id_from_graph(G)
for u, v, k, data in G.edges.data(keys=True):
if u == v and k > 0:
message = "Self loop found with key greater than zero: "
message += "implies there is more than one self loop on this vertex."
raise UnexpectedSelfLoopException(message)
# the first multi edge - add all data to new graph edge
if k == 0:
simple_graph.add_edge(u, v, **data)
# multi edge - create new vertex for the source if it does not yet exist
elif k > 0:
vertex_data = G.nodes[u]
dummy = new_dummy_vertex(u, k, biggest)
simple_graph.add_node(dummy, **vertex_data)
simple_graph.add_edge(u, dummy, **data)
simple_graph.add_edge(dummy, v, **data)
else:
raise ValueError("Negative key for edge.")
return simple_graph
def split_edges(edge_list: EdgeList) -> List[Edge]:
"""Split each edge (u,v) by adding a new vertex w and two new edges (u,w), (w,v).
Args:
edge_list: List of edges or multi-edges
Returns:
List of edges (size 2 tuple).
Size of returned edge list is twice the size of the input edges.
"""
new_vertex = -1
splits: List[Edge] = []
for edge in edge_list:
splits.append((edge[0], new_vertex))
splits.append((new_vertex, edge[1]))
new_vertex -= 1
return splits
LookupFromSplit = Dict[Edge, Union[Edge, MultiEdge]]
def lookup_from_split(edge_list: EdgeList, splits: List[Edge]) -> LookupFromSplit:
"""Get lookup from a split edge to an original edge.
Args:
edge_list: Edge in original graph.
splits: List of edges created by [split_edges][tspwplib.converter.split_edges].
Returns:
Dictionary lookup from split edges to the original edges.
"""
lookup = {}
for i, edge in enumerate(edge_list):
lookup[splits[2 * i]] = edge
lookup[splits[2 * i + 1]] = edge
return lookup
LookupToSplit = Dict[Union[Edge, MultiEdge], Tuple[Edge, Edge]]
def lookup_to_split(edge_list: EdgeList, splits: List[Edge]) -> LookupToSplit:
"""Get lookup from an original edge to the two split edges.
Args:
edge_list: Edge in original graph.
splits: List of edges created by [split_edges][tspwplib.converter.split_edges].
Returns:
Dictionary lookup from the original edges to a pair of split edges.
"""
lookup = {}
for i, edge in enumerate(edge_list):
lookup[edge] = (splits[2 * i], splits[2 * i + 1])
return lookup
def prize_from_weighted_edges(
edge_weights: EdgeFunction, to_split: LookupToSplit
) -> VertexFunction:
"""Get a prize function on the vertices from a weight function on the edges.
Args:
edge_weights: Lookup from edges to weights.
to_split: Lookup from original edges to pairs of split edges
(see [lookup_to_split][tspwplib.converter.lookup_to_split]).
Returns:
Lookup from fake vertices to weight of original edge that the fake vertex represents.
"""
prizes = {}
for edge, weight in edge_weights.items():
first_split, second_split = to_split[edge]
if first_split[1] != second_split[0]:
message = "Second vertex of first edge and first vertex of second edge "
message += "must match in to_split_lookup"
raise LookupError(message)
vertex = first_split[1]
prizes[vertex] = weight
return prizes
def split_edge_cost(
edge_cost: EdgeFunction, to_split: LookupToSplit
) -> Dict[Edge, float]:
"""Assign half the cost of the original edge to each of the split edges.
Args:
edge_cost: Lookup from edges to cost.
to_split: Lookup from original edges to pairs of split edges
(see [lookup_to_split][tspwplib.converter.lookup_to_split]).
Returns:
Lookup from split edges to cost.
Notes:
The cost is cast to a float.
"""
split_cost = {}
for edge, cost in edge_cost.items():
first_split, second_split = to_split[edge]
half_cost = float(cost) / 2.0
split_cost[first_split] = half_cost
split_cost[second_split] = half_cost
return split_cost
def split_graph_from_properties(
edge_properties: EdgeProperties,
edge_attr_to_split: str = "cost",
edge_attr_to_vertex: str = "length",
new_vertex_attr: str = "prize",
old_edge_attr: str = "old_edge",
) -> nx.Graph:
"""Split edges with properties and create undirected simple graph.
Args:
edge_properties: Keys are edges. Values are dicts of edge attributes.
edge_attr_to_split: Name of edge attribute. Assign half the value to each split edge.
edge_attr_to_vertex: Name of edge attribute. Assign edge value to a new vertex attribute.
new_vertex_attr: Name of the newly created vertex attribute.
old_edge_attr: Name of the newly created attribute for the old edge ID.
Returns:
Undirected simple graph with edge attributes for cost, prize and old_edge
Notes:
To get the original_edge that a split edge represents, access the 'old_edge' attribute
"""
# check that every edge has an attribute to split and an attr to move to vertex
is_edge_attr_to_split = True
is_edge_attr_to_vertex = True
for data in edge_properties.values():
if not edge_attr_to_split in data:
is_edge_attr_to_split = False
if not edge_attr_to_vertex in data:
is_edge_attr_to_vertex = False
# split edges and create lookups
edge_list = list(edge_properties.keys())
splits = split_edges(edge_list)
to_split = lookup_to_split(edge_list, splits)
from_split = lookup_from_split(edge_list, splits)
# create graph and assign prizes and costs
G = nx.Graph()
G.add_edges_from(splits)
if is_edge_attr_to_vertex:
prize = prize_from_weighted_edges(
{edge: item[edge_attr_to_vertex] for edge, item in edge_properties.items()},
to_split,
)
nx.set_node_attributes(G, 0.0, name=new_vertex_attr)
nx.set_node_attributes(G, prize, name=new_vertex_attr)
if is_edge_attr_to_split:
cost = split_edge_cost(
{edge: item[edge_attr_to_split] for edge, item in edge_properties.items()},
to_split,
)
nx.set_edge_attributes(G, 0.0, name=edge_attr_to_split)
nx.set_edge_attributes(G, cost, name=edge_attr_to_split)
nx.set_edge_attributes(G, from_split, name=old_edge_attr)
return G
| [
37811,
3103,
48820,
1022,
1180,
3858,
286,
28770,
37811,
628,
198,
6738,
4866,
1330,
2769,
30073,
198,
11748,
10688,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
11,
309,
29291,
11,
4479,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
... | 2.525421 | 6,235 |
from datetime import datetime, timedelta
from typing import Optional
from fastapi import HTTPException, status
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from jose import JWTError, jwt
from passlib.context import CryptContext
from .models import crud
from pydantic import BaseModel
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
SECRET_KEY = "09d25e094faa6ca2556c818166b7a9563b93f7099f6f0f4caa6cf63b88e8d3e7"
ALGORITHM = "HS256" | [
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
3049,
15042,
1330,
14626,
16922,
11,
3722,
198,
6738,
3049,
15042,
13,
12961,
1330,
440,
30515,
17,
35215,
3856,
11258,
11,
440,
30515,
... | 2.939024 | 164 |
import warnings
from dataclasses import asdict, dataclass
from io import BytesIO
from math import sqrt
from pathlib import Path
from subprocess import PIPE, run
from typing import Callable, Dict, Iterable, List, Optional, Union, Tuple
import numpy as np
from lhotse.utils import Decibels, Pathlike, Seconds, SetContainingAnything, JsonMixin, YamlMixin, fastcopy
Channels = Union[int, List[int]]
# TODO: document the dataclasses like this:
# https://stackoverflow.com/a/3051356/5285891
@dataclass
class AudioSource:
"""
AudioSource represents audio data that can be retrieved from somewhere.
Supported sources of audio are currently:
- 'file' (formats supported by librosa, possibly multi-channel)
- 'command' [unix pipe] (must be WAVE, possibly multi-channel)
"""
type: str
channels: List[int]
source: str
def load_audio(
self,
offset_seconds: float = 0.0,
duration_seconds: Optional[float] = None,
) -> np.ndarray:
"""
Load the AudioSource (both files and commands) with librosa,
accounting for many audio formats and multi-channel inputs.
Returns numpy array with shapes: (n_samples) for single-channel,
(n_channels, n_samples) for multi-channel.
"""
assert self.type in ('file', 'command')
source = self.source
if self.type == 'command':
if offset_seconds != 0.0 or duration_seconds is not None:
# TODO(pzelasko): How should we support chunking for commands?
# We risk being very inefficient when reading many chunks from the same file
# without some caching scheme, because we'll be re-running commands.
raise ValueError("Reading audio chunks from command AudioSource type is currently not supported.")
source = BytesIO(run(self.source, shell=True, stdout=PIPE).stdout)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
samples, sampling_rate = read_audio(source, offset=offset_seconds, duration=duration_seconds)
# explicit sanity check for duration as librosa does not complain here
if duration_seconds is not None:
num_samples = samples.shape[0] if len(samples.shape) == 1 else samples.shape[1]
available_duration = num_samples / sampling_rate
if available_duration < duration_seconds - 1e-3: # set the allowance as 1ms to avoid float error
raise ValueError(
f'Requested more audio ({duration_seconds}s) than available ({available_duration}s)'
)
return samples.astype(np.float32)
@staticmethod
@dataclass
class Recording:
"""
Recording represents an AudioSource along with some metadata.
"""
id: str
sources: List[AudioSource]
sampling_rate: int
num_samples: int
duration: Seconds
@staticmethod
def from_sphere(sph_path: Pathlike, relative_path_depth: Optional[int] = None) -> 'Recording':
"""
Read a SPHERE file's header and create the corresponding ``Recording``.
:param sph_path: Path to the sphere (.sph) file.
:param relative_path_depth: optional int specifying how many last parts of the file path
should be retained in the ``AudioSource``. By default writes the path as is.
:return: a new ``Recording`` instance pointing to the sphere file.
"""
from sphfile import SPHFile
sph_path = Path(sph_path)
sphf = SPHFile(sph_path)
return Recording(
id=sph_path.stem,
sampling_rate=sphf.format['sample_rate'],
num_samples=sphf.format['sample_count'],
duration=sphf.format['sample_count'] / sphf.format['sample_rate'],
sources=[
AudioSource(
type='file',
channels=list(range(sphf.format['channel_count'])),
source=(
'/'.join(sph_path.parts[-relative_path_depth:])
if relative_path_depth is not None and relative_path_depth > 0
else str(sph_path)
)
)
]
)
@property
@property
@staticmethod
@dataclass
class RecordingSet(JsonMixin, YamlMixin):
"""
RecordingSet represents a dataset of recordings. It does not contain any annotation -
just the information needed to retrieve a recording (possibly multi-channel, from files
or from shell commands and pipes) and some metadata for each of them.
It also supports (de)serialization to/from YAML and takes care of mapping between
rich Python classes and YAML primitives during conversion.
"""
recordings: Dict[str, Recording]
@staticmethod
@staticmethod
def filter(self, predicate: Callable[[Recording], bool]) -> 'RecordingSet':
"""
Return a new RecordingSet with the Recordings that satisfy the `predicate`.
:param predicate: a function that takes a recording as an argument and returns bool.
:return: a filtered RecordingSet.
"""
return RecordingSet.from_recordings(rec for rec in self if predicate(rec))
class AudioMixer:
"""
Utility class to mix multiple raw audio into a single one.
It pads the signals with zero samples for differing lengths and offsets.
"""
def __init__(self, base_audio: np.ndarray, sampling_rate: int):
"""
:param base_audio: The raw audio used to initialize the AudioMixer are a point of reference
in terms of offset for all audios mixed into them.
:param sampling_rate: Sampling rate of the audio.
"""
self.tracks = [base_audio]
self.sampling_rate = sampling_rate
self.reference_energy = audio_energy(base_audio)
@property
def unmixed_audio(self) -> np.ndarray:
"""
Return a numpy ndarray with the shape (num_tracks, num_samples), where each track is
zero padded and scaled adequately to the offsets and SNR used in ``add_to_mix`` call.
"""
return np.vstack(self.tracks)
@property
def mixed_audio(self) -> np.ndarray:
"""
Return a numpy ndarray with the shape (1, num_samples) - a mono mix of the tracks
supplied with ``add_to_mix`` calls.
"""
return np.sum(self.unmixed_audio, axis=0, keepdims=True)
def add_to_mix(
self,
audio: np.ndarray,
snr: Optional[Decibels] = None,
offset: Seconds = 0.0,
):
"""
Add audio (only support mono-channel) of a new track into the mix.
:param audio: An array of audio samples to be mixed in.
:param snr: Signal-to-noise ratio, assuming `audio` represents noise (positive SNR - lower `audio` energy,
negative SNR - higher `audio` energy)
:param offset: How many seconds to shift `audio` in time. For mixing, the signal will be padded before
the start with low energy values.
:return:
"""
assert audio.shape[0] == 1 # TODO: support multi-channels
assert offset >= 0.0, "Negative offset in mixing is not supported."
reference_audio = self.tracks[0]
dtype = reference_audio.dtype
num_samples_offset = round(offset * self.sampling_rate)
current_num_samples = reference_audio.shape[1]
audio_to_add = audio
# When there is an offset, we need to pad before the start of the audio we're adding.
if offset > 0:
audio_to_add = np.hstack([
np.zeros((1, num_samples_offset), dtype),
audio_to_add
])
incoming_num_samples = audio_to_add.shape[1]
mix_num_samples = max(current_num_samples, incoming_num_samples)
# When the existing samples are less than what we anticipate after the mix,
# we need to pad after the end of the existing audio mixed so far.
# Since we're keeping every track as a separate entry in the ``self.tracks`` list,
# we need to pad each of them so that their shape matches when performing the final mix.
if current_num_samples < mix_num_samples:
for idx in range(len(self.tracks)):
padded_audio = np.hstack([
self.tracks[idx],
np.zeros((1, mix_num_samples - current_num_samples), dtype)
])
self.tracks[idx] = padded_audio
# When the audio we're mixing in are shorter that the anticipated mix length,
# we need to pad after their end.
# Note: we're doing that non-efficiently, as it we potentially re-allocate numpy arrays twice,
# during this padding and the offset padding before. If that's a bottleneck, we'll optimize.
if incoming_num_samples < mix_num_samples:
audio_to_add = np.hstack([
audio_to_add,
np.zeros((1, mix_num_samples - incoming_num_samples), dtype)
])
# When SNR is requested, find what gain is needed to satisfy the SNR
gain = 1.0
if snr is not None:
added_audio_energy = audio_energy(audio)
target_energy = self.reference_energy * (10.0 ** (-snr / 10))
# When mixing time-domain singals, we are working with root-power (field) quantities,
# whereas the energy ratio applies to power quantities. To compute the gain correctly,
# we need to take a square root of the energy ratio.
gain = sqrt(target_energy / added_audio_energy)
# self.mixed_audio = reference_audio + gain * audio_to_add
self.tracks.append(gain * audio_to_add)
| [
11748,
14601,
198,
6738,
4818,
330,
28958,
1330,
355,
11600,
11,
4818,
330,
31172,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
6738,
10688,
1330,
19862,
17034,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
850,
14681,
1330,
350,
4061,... | 2.479104 | 3,972 |
# Please note that this script was based on the original code developed by MITRE ATT&CK team used for the generation of
# ATT&CK Navigator files. This script was slightly modified and configured to output JSON file instead.
# https://github.com/mitre/attack-navigator
# attack_layers_simple.py - the "hello, world" for ATT&CK Navigator layer generation
# Takes a simple CSV file containing ATT&CK technique IDs and counts of groups, software and articles/reports that reference this technique
# and generates an ATT&CK Navigator layer file with techniques scored and color-coded based on an algorithm
# This sample is intended to demonstrate generating layers from external data sources such as CSV files.
import argparse
import csv
import json
import sys
import os
# Static ATT&CK Navigator layer JSON fields
VERSION = "2.0"
NAME = "ATT&CK with Empire"
DESCRIPTION = "ATT&CK Matrix Techniques used from PowerShell Empire"
DOMAIN = "Galactic Empire"
# Changed to function
if __name__ == '__main__':
#layer.py executed as script
generate() | [
2,
4222,
3465,
326,
428,
4226,
373,
1912,
319,
262,
2656,
2438,
4166,
416,
17168,
2200,
26195,
5,
34,
42,
1074,
973,
329,
262,
5270,
286,
201,
198,
2,
26195,
5,
34,
42,
13244,
23823,
3696,
13,
220,
770,
4226,
373,
4622,
9518,
290,... | 3.596667 | 300 |
import numpy as np | [
11748,
299,
32152,
355,
45941
] | 3.6 | 5 |
from datetime import datetime
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
import scrapy
from suumo_scrapy.items import SuumoScrapyItem, ArticleItemLoader
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
15881,
88,
13,
2815,
365,
742,
974,
669,
1330,
7502,
11627,
40450,
198,
6738,
15881,
88,
13,
2777,
4157,
1330,
327,
13132,
41294,
11,
14330,
198,
11748,
15881,
88,
198,
6738,
424,
43... | 3.416667 | 60 |
__author__ = 'outm'
| [
834,
9800,
834,
796,
705,
448,
76,
6,
198
] | 2.222222 | 9 |
"""
@brief test log(time=2s)
"""
import sys
import os
import unittest
import re
from pyquickhelper.loghelper import fLOG
from pyquickhelper.jenkinshelper.jenkins_server import JenkinsExt
from pyquickhelper.jenkinshelper.jenkins_helper import default_engines, setup_jenkins_server_yml
if __name__ == "__main__":
unittest.main()
| [
37811,
198,
31,
65,
3796,
220,
220,
220,
220,
220,
1332,
2604,
7,
2435,
28,
17,
82,
8,
198,
37811,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
555,
715,
395,
198,
11748,
302,
198,
198,
6738,
12972,
24209,
2978,
525,
13,
... | 2.75 | 124 |
import abc
import json
import logging
import pathlib
from typing import TYPE_CHECKING, Callable, List, Optional, Tuple
import PIL.Image
import PIL.ImageFilter
from ..defaults import Defaults
from ..globals import Globals
if TYPE_CHECKING:
from .contents import Image
logger = logging.getLogger()
class BaseProxyManager(abc.ABC):
"""BUG:LOW It would be ideal to trigger Proxy image/color generation and
loading i.e. the 'cache' method *after* the site has been built and
validated. However it seems like proxy items become immutable after
instantiation. In order for them to work properly, they must cached on
instantiation. If 'cache' is run later in the build process, they show
internal changes but these are undetectable from the Flask layer."""
@abc.abstractmethod
@property
@abc.abstractmethod
@property
@property
def root(self) -> pathlib.Path:
"""Returns an absolute path to the proxy's root data directory.
Transforms the original image path:
/site-name/pages/page-name/image-name.ext
...to the site-cache path:
/site-name/site-cache/pages/page-name/image-name
"""
return (
Globals.site_paths.cache
/ Defaults.DIRECTORY_NAME_PAGES
/ self.image.page.directory_name
/ self.image.name
)
| [
11748,
450,
66,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
3108,
8019,
198,
6738,
19720,
1330,
41876,
62,
50084,
2751,
11,
4889,
540,
11,
7343,
11,
32233,
11,
309,
29291,
198,
198,
11748,
350,
4146,
13,
5159,
198,
11748,
350,
4... | 2.771543 | 499 |
class KeysConverter(TypeConverter,IComparer):
"""
Provides a System.ComponentModel.TypeConverter to convert System.Windows.Forms.Keys objects to and from other representations.
KeysConverter()
"""
def CanConvertFrom(self,*__args):
"""
CanConvertFrom(self: KeysConverter,context: ITypeDescriptorContext,sourceType: Type) -> bool
Returns a value indicating whether this converter can convert an object in the specified source
type to the native type of the converter using the specified context.
context: An System.ComponentModel.ITypeDescriptorContext that provides a format context,which can be
used to extract additional information about the environment this converter is being invoked
from. This parameter or properties of this parameter can be null.
sourceType: The System.Type to convert from.
Returns: true if the conversion can be performed; otherwise,false.
"""
pass
def CanConvertTo(self,*__args):
"""
CanConvertTo(self: KeysConverter,context: ITypeDescriptorContext,destinationType: Type) -> bool
Returns a value indicating whether this converter can convert an object in the specified source
type to the native type of the converter using the specified context.
context: An System.ComponentModel.ITypeDescriptorContext that provides a format context,which can be
used to extract additional information about the environment this converter is being invoked
from. This parameter or properties of this parameter can be null.
destinationType: The System.Type to convert to.
Returns: true if the conversion can be performed; otherwise,false.
"""
pass
def Compare(self,a,b):
"""
Compare(self: KeysConverter,a: object,b: object) -> int
Compares two key values for equivalence.
a: An System.Object that represents the first key to compare.
b: An System.Object that represents the second key to compare.
Returns: An integer indicating the relationship between the two parameters.Value Type Condition A
negative integer. a is less than b. zero a equals b. A positive integer. a is greater than b.
"""
pass
def ConvertFrom(self,*__args):
"""
ConvertFrom(self: KeysConverter,context: ITypeDescriptorContext,culture: CultureInfo,value: object) -> object
Converts the specified object to the converter's native type.
context: An ITypeDescriptorContext that provides a format context,which can be used to extract
additional information about the environment this converter is being invoked from. This
parameter or properties of this parameter can be null.
culture: A CultureInfo object to provide locale information.
value: The object to convert.
Returns: An object that represents the converted value.
"""
pass
def ConvertTo(self,*__args):
"""
ConvertTo(self: KeysConverter,context: ITypeDescriptorContext,culture: CultureInfo,value: object,destinationType: Type) -> object
Converts the specified object to the specified destination type.
context: An System.ComponentModel.ITypeDescriptorContext that provides a format context,which can be
used to extract additional information about the environment this converter is being invoked
from. This parameter or properties of this parameter can be null.
culture: A System.Globalization.CultureInfo to provide locale information.
value: The System.Object to convert.
destinationType: The System.Type to convert the object to.
Returns: An System.Object that represents the converted value.
"""
pass
def GetConvertFromException(self,*args):
"""
GetConvertFromException(self: TypeConverter,value: object) -> Exception
Returns an exception to throw when a conversion cannot be performed.
value: The System.Object to convert,or null if the object is not available.
Returns: An System.Exception that represents the exception to throw when a conversion cannot be performed.
"""
pass
def GetConvertToException(self,*args):
"""
GetConvertToException(self: TypeConverter,value: object,destinationType: Type) -> Exception
Returns an exception to throw when a conversion cannot be performed.
value: The System.Object to convert,or null if the object is not available.
destinationType: A System.Type that represents the type the conversion was trying to convert to.
Returns: An System.Exception that represents the exception to throw when a conversion cannot be performed.
"""
pass
def GetStandardValues(self,context=None):
"""
GetStandardValues(self: KeysConverter,context: ITypeDescriptorContext) -> StandardValuesCollection
Returns a collection of standard values for the data type that this type converter is designed
for when provided with a format context.
context: An System.ComponentModel.ITypeDescriptorContext that provides a format context,which can be
used to extract additional information about the environment this converter is being invoked
from. This parameter or properties of this parameter can be null.
Returns: A System.ComponentModel.TypeConverter.StandardValuesCollection that holds a standard set of
valid values,which can be empty if the data type does not support a standard set of values.
"""
pass
def GetStandardValuesExclusive(self,context=None):
"""
GetStandardValuesExclusive(self: KeysConverter,context: ITypeDescriptorContext) -> bool
Determines if the list of standard values returned from GetStandardValues is an exclusive list
using the specified System.ComponentModel.ITypeDescriptorContext.
context: A formatter context. This object can be used to extract additional information about the
environment this converter is being invoked from. This may be null,so you should always check.
Also,properties on the context object may also return null.
Returns: true if the collection returned from erload:System.Windows.Forms.KeysConverter.GetStandardValues
is an exhaustive list of possible values; otherwise,false if other values are possible. The
default implementation for this method always returns false.
"""
pass
def GetStandardValuesSupported(self,context=None):
"""
GetStandardValuesSupported(self: KeysConverter,context: ITypeDescriptorContext) -> bool
Gets a value indicating whether this object supports a standard set of values that can be picked
from a list.
context: An System.ComponentModel.ITypeDescriptorContext that provides a format context,which can be
used to extract additional information about the environment this converter is being invoked
from. This parameter or properties of this parameter can be null.
Returns: Always returns true.
"""
pass
def SortProperties(self,*args):
"""
SortProperties(self: TypeConverter,props: PropertyDescriptorCollection,names: Array[str]) -> PropertyDescriptorCollection
Sorts a collection of properties.
props: A System.ComponentModel.PropertyDescriptorCollection that has the properties to sort.
names: An array of names in the order you want the properties to appear in the collection.
Returns: A System.ComponentModel.PropertyDescriptorCollection that contains the sorted properties.
"""
pass
def __cmp__(self,*args):
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
| [
4871,
26363,
3103,
332,
353,
7,
6030,
3103,
332,
353,
11,
2149,
3361,
11258,
2599,
201,
198,
37227,
201,
198,
47081,
257,
4482,
13,
21950,
17633,
13,
6030,
3103,
332,
353,
284,
10385,
4482,
13,
11209,
13,
8479,
82,
13,
40729,
5563,
... | 3.281766 | 2,424 |
import argparse
from typing import Any, Sequence
| [
11748,
1822,
29572,
198,
6738,
19720,
1330,
4377,
11,
45835,
628,
628,
628,
628,
628
] | 3.866667 | 15 |
from .....data.URL import QIWIWalletURLS
from .....connector.aiohttp_connector import Connector
from .....data_types.connector.request_type import PUT
from .....data_types.QIWIWallet import HookType, NotifyType
| [
6738,
11485,
986,
7890,
13,
21886,
1330,
1195,
40,
36326,
47152,
4261,
6561,
198,
6738,
11485,
986,
8443,
273,
13,
64,
952,
4023,
62,
8443,
273,
1330,
8113,
273,
198,
6738,
11485,
986,
7890,
62,
19199,
13,
8443,
273,
13,
25927,
62,
... | 3.212121 | 66 |
#!/usr/bin/env python2
from __future__ import print_function
import requests
import logging
import sys
# debug_mode()
if len(sys.argv) > 1:
url = sys.argv[1]
else:
url = "http://localhost:5000/"
# first request
first_hit = requests.get(url)
first_json = first_hit.json()
token = first_json.pop('token')
title, next_url = first_json.popitem()
print("My ID is {}".format(token))
# subsequent requests
done = False
link_titles = [title]
while not done:
print("Accessing {}".format(next_url))
hit = requests.post(next_url, json={'token': token})
response = hit.json()
if 'answer' in response:
print(response)
print('Link titles: {}'.format(', '.join(link_titles)))
done = True
else:
token = response.pop('token')
title, next_url = response.popitem()
link_titles.append(title)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
7007,
198,
11748,
18931,
198,
11748,
25064,
628,
198,
198,
2,
14257,
62,
14171,
3419,
198,
361,
18896,
7,
17597,
13,... | 2.587879 | 330 |
x = 1
tools.showMessage(str(x)) | [
87,
796,
352,
198,
198,
31391,
13,
12860,
12837,
7,
2536,
7,
87,
4008
] | 2.285714 | 14 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 11 10:27:44 2020
@author: usingh
Test various pyrpipe modules used with each other
"""
from pyrpipe import sra,qc,mapping,assembly,quant,tools
from pyrpipe import pyrpipe_utils as pu
from testingEnvironment import testSpecs
import os
testVars=testSpecs()
fq1=testVars.fq1
fq2=testVars.fq2
rRNAfasta=testVars.rRNAfa
#srr='ERR3770564' #single end arabidopsis data
#srr='SRR978414' #small a thal paired end data
srr='SRR4113368'
workingDir=testVars.testDir
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
7031,
2365,
1367,
838,
25,
1983,
25,
2598,
12131,
198,
198,
31,
9800,
25,
1262,
71,
198,
... | 2.434978 | 223 |