content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
class DataPlane:
"""Helper Class for interacting with the dataplane"""
def create_asset(self, s3bucket, s3key):
"""
Method to create an asset in the dataplane
:param s3bucket: S3 Bucket of the asset
:param s3key: S3 Key of the asset
:return: Dataplane response
"""
path = "/create"
resource = "/create"
method = "POST"
body = {"Input": {"S3Bucket": s3bucket, "S3Key": s3key}}
dataplane_response = self.call_dataplane(path, resource, method, body)
return dataplane_response
def store_asset_metadata(self, asset_id, operator_name, workflow_id, results, paginate=False, end=False):
"""
Method to store asset metadata in the dataplane
:param operator_name: The name of the operator that created this metadata
:param results: The metadata itself, or what the result of the operator was
:param asset_id: The id of the asset
:param workflow_id: Worfklow ID that generated this metadata
Pagination params:
:param paginate: Boolean to tell dataplane that the results will come in as pages
:param end: Boolean to declare the last page in a set of paginated results
:return: Dataplane response
"""
path = "/metadata/{asset_id}".format(asset_id=asset_id)
resource = "/metadata/{asset_id}"
path_params = {"asset_id": asset_id}
method = "POST"
body = {"OperatorName": operator_name, "Results": results, "WorkflowId": workflow_id}
query_params = {}
if paginate or end:
if paginate is True:
query_params["paginated"] = "true"
if end is True:
query_params["end"] = "true"
else:
query_params = None
dataplane_response = self.call_dataplane(path, resource, method, body, path_params, query_params)
return dataplane_response
def retrieve_asset_metadata(self, asset_id, operator_name=None, cursor=None):
"""
Method to retrieve metadata from the dataplane
:param asset_id: The id of the asset
:param operator_name: Optional parameter for filtering response to include only data
generated by a specific operator
:param cursor: Optional parameter for retrieving additional pages of asset metadata
:return: Dataplane response
"""
if operator_name:
path = "/metadata/{asset_id}/operator".format(asset_id=asset_id, operator=operator_name)
else:
path = "/metadata/{asset_id}".format(asset_id=asset_id)
resource = "/metadata/{asset_id}"
path_params = {"asset_id": asset_id}
method = "GET"
query_params = {}
if cursor:
query_params["cursor"] = cursor
else:
query_params = None
dataplane_response = self.call_dataplane(path, resource, method, None, path_params, query_params)
return dataplane_response
| [
4871,
6060,
3646,
1531,
25,
198,
220,
220,
220,
37227,
47429,
5016,
329,
24986,
351,
262,
1366,
14382,
37811,
628,
220,
220,
220,
825,
2251,
62,
562,
316,
7,
944,
11,
264,
18,
27041,
316,
11,
264,
18,
2539,
2599,
198,
220,
220,
22... | 2.478332 | 1,223 |
# Copyright 2021 Beijing DP Technology Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A collection of common Haiku modules for use in protein folding."""
import haiku as hk
import jax.numpy as jnp
class Linear(hk.Module):
"""Protein folding specific Linear Module.
This differs from the standard Haiku Linear in a few ways:
* It supports inputs of arbitrary rank
* Initializers are specified by strings
"""
def __init__(self,
num_output: int,
initializer: str = 'linear',
use_bias: bool = True,
bias_init: float = 0.,
name: str = 'linear'):
"""Constructs Linear Module.
Args:
num_output: number of output channels.
initializer: What initializer to use, should be one of {'linear', 'relu',
'zeros'}
use_bias: Whether to include trainable bias
bias_init: Value used to initialize bias.
name: name of module, used for name scopes.
"""
super().__init__(name=name)
self.num_output = num_output
self.initializer = initializer
self.use_bias = use_bias
self.bias_init = bias_init
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
"""Connects Module.
Args:
inputs: Tensor of shape [..., num_channel]
Returns:
output of shape [..., num_output]
"""
n_channels = int(inputs.shape[-1])
weight_shape = [n_channels, self.num_output]
if self.initializer == 'linear':
weight_init = hk.initializers.VarianceScaling(mode='fan_in', scale=1.)
elif self.initializer == 'relu':
weight_init = hk.initializers.VarianceScaling(mode='fan_in', scale=2.)
elif self.initializer == 'zeros':
weight_init = hk.initializers.Constant(0.0)
weights = hk.get_parameter('weights', weight_shape, inputs.dtype,
weight_init)
# this is equivalent to einsum('...c,cd->...d', inputs, weights)
# but turns out to be slightly faster
inputs = jnp.swapaxes(inputs, -1, -2)
output = jnp.einsum('...cb,cd->...db', inputs, weights)
output = jnp.swapaxes(output, -1, -2)
if self.use_bias:
bias = hk.get_parameter('bias', [self.num_output], inputs.dtype,
hk.initializers.Constant(self.bias_init))
output += bias
return output
| [
2,
15069,
33448,
11618,
27704,
8987,
1766,
1539,
12052,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
1378... | 2.605505 | 1,090 |
# form
from django import forms
# choices
from cooggerapp.choices import *
# models
from cooggerapp.models import (
Content,
UserFollow,
OtherInformationOfUsers,
ReportModel)
from django.db import models
from django.contrib.auth.models import User
| [
2,
1296,
198,
6738,
42625,
14208,
1330,
5107,
198,
198,
2,
7747,
198,
6738,
763,
519,
1362,
1324,
13,
6679,
1063,
1330,
1635,
198,
198,
2,
4981,
198,
6738,
763,
519,
1362,
1324,
13,
27530,
1330,
357,
198,
220,
220,
220,
14041,
11,
... | 2.98913 | 92 |
"""
@author StellaVerkijk
This script gathers predictions of the From Scratch Language Model for a data set of sentences where names are masked.
Filepaths are hardcoded since the data used could not be released to the public because of privacy issues
"""
import transformers
from transformers import pipeline, RobertaTokenizer, RobertaForMaskedLM, AutoTokenizer, AutoModel
from collections import Counter
import pandas as pd
import pickle
print("Loading model...")
tokenizer = RobertaTokenizer.from_pretrained("../../processing/from_scratch_final_model_new_vocab")
model = RobertaForMaskedLM.from_pretrained("../../processing/from_scratch_final_model_new_vocab")
fill_mask = pipeline('fill-mask', model=model, tokenizer=tokenizer)
print("Getting sentences...")
df = pd.read_csv("anon_specific_testset_eval.csv", delimiter = ';')
list_sens = df['sentences'].tolist()
list_of_ds = []
for sen in list_sens:
d = dict()
d['sen'] = sen
list_of_ds.append(d)
print("Making predictions...")
i = 0
for d in list_of_ds:
i+=1
pre_dicts = fill_mask(d['sen'], top_k=40)
tokens = []
for pred in pre_dicts:
tokens.append(pred['token_str'])
d['predictions'] = tokens
print("Adding all predictions together...")
all_predictions = []
for d in list_of_ds:
for item in d['predictions']:
all_predictions.append(item)
print(len(all_predictions))
print("Counting predictions...")
from collections import Counter
counts = Counter(all_predictions)
df = pd.DataFrame(list(counts.items()),columns = ['Prediction','times predicted'])
df = df.sort_values(by='times predicted', ascending=False)
df.to_csv("predictions_from_scratch_unseen_data.csv", sep = ';', index = None)
| [
37811,
198,
31,
9800,
45856,
53,
9587,
45961,
198,
1212,
4226,
43609,
16277,
286,
262,
3574,
1446,
36722,
15417,
9104,
329,
257,
1366,
900,
286,
13439,
810,
3891,
389,
29229,
13,
198,
8979,
6978,
82,
389,
1327,
40976,
1201,
262,
1366,
... | 2.886855 | 601 |
'''Base sequence classes.'''
import collections
import coral
from coral.sequence._sequence import Sequence
from coral.constants.molecular_bio import COMPLEMENTS
class NucleicAcid(Sequence):
'''Abstract sequence container for a single nucleic acid sequence
molecule.'''
def __init__(self, sequence, material, circular=False, run_checks=True,
any_char='N'):
'''
:param sequence: Input sequence.
:type sequence: str
:param material: Material type (dna, rna)
:type material: str
:param circular: The topology of the sequence - if the ends connect,
(a circular sequence), set to True. Otherwise, set to
False. Enables operations like .rotate().
:type circular: bool
:param run_checks: Check inputs / formats (disabling increases speed):
alphabet check
case
:param any_char: Character representing \'any\', e.g. N for DNA.
:type any_char: str
:type run_checks: bool
:returns: coral.sequence.Sequence instance.
'''
super(NucleicAcid, self).__init__(sequence, material,
run_checks=run_checks,
any_char=any_char)
self.circular = circular
def circularize(self):
'''Circularize the sequence, if linear.
:returns: A circularized version of the current sequence.
:rtype: coral.sequence._sequence.Sequence
'''
copy = self.copy()
copy.circular = True
return copy
def gc(self):
'''Find the frequency of G and C in the current sequence.'''
gc = len([base for base in self.seq if base == 'C' or base == 'G'])
return float(gc) / len(self)
def is_rotation(self, other):
'''Determine whether two sequences are the same, just at different
rotations.
:param other: The sequence to check for rotational equality.
:type other: coral.sequence._sequence.Sequence
'''
if len(self) != len(other):
return False
for i in range(len(self)):
if self.rotate(i) == other:
return True
return False
def linearize(self, index=0):
'''Linearize the Sequence at an index.
:param index: index at which to linearize.
:type index: int
:returns: A linearized version of the current sequence.
:rtype: coral.sequence._sequence.Sequence
:raises: ValueError if the input is a linear sequence.
'''
if not self.circular and index != 0:
raise ValueError('Cannot relinearize a linear sequence.')
copy = self.copy()
# Snip at the index
if index:
return copy[index:] + copy[:index]
copy.circular = False
return copy
def locate(self, pattern):
'''Find sequences matching a pattern.
:param pattern: Sequence for which to find matches.
:type pattern: str
:returns: Indices of pattern matches.
:rtype: list of ints
'''
if self.circular:
if len(pattern) >= 2 * len(self):
raise ValueError('Search pattern longer than searchable ' +
'sequence.')
seq = self + self[:len(pattern) - 1]
return super(NucleicAcid, seq).locate(pattern)
else:
return super(NucleicAcid, self).locate(pattern)
def mw(self):
'''Calculate the molecular weight.
:returns: The molecular weight of the current sequence in amu.
:rtype: float
'''
counter = collections.Counter(self.seq.lower())
mw_a = counter['a'] * 313.2
mw_t = counter['t'] * 304.2
mw_g = counter['g'] * 289.2
mw_c = counter['c'] * 329.2
mw_u = counter['u'] * 306.2
if self.material == 'dna':
return mw_a + mw_t + mw_g + mw_c + 79.0
else:
return mw_a + mw_u + mw_g + mw_c + 159.0
def rotate(self, n):
'''Rotate Sequence by n bases.
:param n: Number of bases to rotate.
:type n: int
:returns: The current sequence reoriented at `index`.
:rtype: coral.sequence._sequence.Sequence
:raises: ValueError if applied to linear sequence or `index` is
negative.
'''
if not self.circular and n != 0:
raise ValueError('Cannot rotate a linear sequence')
else:
rotated = self[-n:] + self[:-n]
return rotated.circularize()
def rotate_to(self, index):
'''Orient Sequence to index (only applies to circular sequences).
:param index: Position at which to re-zero the Sequence.
:type index: int
:returns: The current sequence reoriented at `index`.
:rtype: coral.sequence._sequence.Sequence
:raises: ValueError if applied to linear sequence or `index` is
negative.
'''
return self.rotate(-index)
def tm(self, parameters='cloning'):
'''Find the melting temperature.
:param parameters: The tm method to use (cloning, santalucia98,
breslauer86)
:type parameters: str
'''
return coral.analysis.tm(self, parameters=parameters)
| [
7061,
6,
14881,
8379,
6097,
2637,
7061,
198,
11748,
17268,
198,
11748,
29537,
198,
6738,
29537,
13,
43167,
13557,
43167,
1330,
45835,
198,
6738,
29537,
13,
9979,
1187,
13,
76,
2305,
10440,
62,
65,
952,
1330,
49269,
28957,
628,
198,
4871... | 2.227905 | 2,444 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from base import BaseObject
from nlusvc import ExecuteSparqlQuery
class CertificationHierarchyGenerator(BaseObject):
""" Generate a mapping file for parent-child Certification relationships """
def __init__(self,
is_debug: bool = True):
"""
Created:
6-Aug-2019
craig.trim@ibm.com
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/629
Updated:
15-Oct-2019
xavier.verges@es.ibm.com
* sorted lists to make changes in the generated files easier to track
"""
BaseObject.__init__(self, __name__)
self._map = {}
self._is_debug = True
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
628,
198,
6738,
2779,
1330,
7308,
10267,
198,
6738,
299,
41790,
28435,
1330,
8393,
1133,
50,
1845,
13976,
20746,
628,
198,
4871,
... | 2.196481 | 341 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from future import standard_library
standard_library.install_aliases()
from builtins import object
import json
import requests
import threading
import uuid
import logging
from .nurest_response import NURESTResponse
from bambou import bambou_logger
HTTP_CODE_ZERO = 0
HTTP_CODE_SUCCESS = 200
HTTP_CODE_CREATED = 201
HTTP_CODE_EMPTY = 204
HTTP_CODE_MULTI_STATUS = 207
HTTP_CODE_MULTIPLE_CHOICES = 300
HTTP_CODE_BAD_REQUEST = 400
HTTP_CODE_UNAUTHORIZED = 401
HTTP_CODE_PERMISSION_DENIED = 403
HTTP_CODE_NOT_FOUND = 404
HTTP_CODE_METHOD_NOT_ALLOWED = 405
HTTP_CODE_CONNECTION_TIMEOUT = 408
HTTP_CODE_CONFLICT = 409
HTTP_CODE_PRECONDITION_FAILED = 412
HTTP_CODE_AUTHENTICATION_EXPIRED = 419
HTTP_CODE_INTERNAL_SERVER_ERROR = 500
HTTP_CODE_SERVICE_UNAVAILABLE = 503
HTTP_METHOD_HEAD = 'HEAD'
HTTP_METHOD_POST = 'POST'
HTTP_METHOD_GET = 'GET'
HTTP_METHOD_PUT = 'PUT'
HTTP_METHOD_DELETE = 'DELETE'
class NURESTConnection(object):
""" Connection that enable HTTP requests """
def __init__(self, request, as_async, callback=None, callbacks=dict(), root_object=None):
""" Intializes a new connection for a given request
NURESTConnection object is in charge of the HTTP call. It relies on request library
Args:
request: the NURESTRequest to send
callback: the method that will be fired after sending
callbacks: a dictionary of user callbacks. Should contains local and remote callbacks
"""
self._uses_authentication = True
self._has_timeouted = False
# self._is_cancelled = False
self._ignore_request_idle = False
self._xhr_timeout = 3000
self._response = None
self._error_message = None
self._transaction_id = uuid.uuid4().hex
self._request = request
self._async = as_async
self._callback = callback
self._callbacks = callbacks
self._user_info = None
self._object_last_action_timer = None
self._root_object = root_object
# Properties
@property
def callbacks(self):
""" Get callbacks
Returns:
It returns an array containing user callbacks
"""
return self._callbacks
@property
def request(self):
""" Get request. Read-only property
Returns:
Returns the NURESTRequest object
"""
return self._request
@property
def transaction_id(self):
""" Get transaction ID. Read-only property
Returns:
Returns the transaction ID
"""
return self._transaction_id
@property
def response(self):
""" Get response
Returns:
It returns the NURESTResponse object of the request
"""
return self._response
@response.setter
def response(self, response):
""" Set response
Args:
response: the NURESTResponse object
"""
self._response = response
@property
def user_info(self):
""" Get user info
Returns:
It returns additionnal user information
"""
return self._user_info
@user_info.setter
def user_info(self, info):
""" Set user info
Args:
info: Information to carry
"""
self._user_info = info
@property
def timeout(self):
""" Get timeout
Returns:
It returns the timeout time in seconds. Default is 3000.
"""
return self._xhr_timeout
@timeout.setter
def timeout(self, timeout):
""" Set timeout
Args:
timeout: Number of seconds before timeout
"""
self._xhr_timeout = timeout
@property
def ignore_request_idle(self):
""" Get ignore request idle
Returns:
It returns a boolean. By default ignore request idle is set to False.
"""
return self._ignore_request_idle
@ignore_request_idle.setter
def ignore_request_idle(self, ignore):
""" Set ignore request idle
Args:
ignore: boolean to ignore request idle
"""
self._ignore_request_idle = ignore
@property
def has_timeouted(self):
""" Get has timouted
Returns:
Returns True if the request has timeout.
"""
return self._has_timeouted
@property
def as_async(self):
""" Get as_async
Returns:
Returns True if the request is asynchronous
"""
return self._async
# Methods
def has_succeed(self):
""" Check if the connection has succeed
Returns:
Returns True if connection has succeed.
False otherwise.
"""
status_code = self._response.status_code
if status_code in [HTTP_CODE_ZERO, HTTP_CODE_SUCCESS, HTTP_CODE_CREATED, HTTP_CODE_EMPTY, HTTP_CODE_MULTIPLE_CHOICES, HTTP_CODE_MULTI_STATUS]:
return True
if status_code in [HTTP_CODE_BAD_REQUEST, HTTP_CODE_UNAUTHORIZED, HTTP_CODE_PERMISSION_DENIED, HTTP_CODE_NOT_FOUND, HTTP_CODE_METHOD_NOT_ALLOWED, HTTP_CODE_CONNECTION_TIMEOUT, HTTP_CODE_CONFLICT, HTTP_CODE_PRECONDITION_FAILED, HTTP_CODE_INTERNAL_SERVER_ERROR, HTTP_CODE_SERVICE_UNAVAILABLE]:
return False
raise Exception('Unknown status code %s.', status_code)
def has_callbacks(self):
""" Check if the request has callbacks
Returns:
Returns YES if there is a local or remote callbacks
"""
return len(self._callbacks) > 0
def handle_response_for_connection(self, should_post=False):
""" Check if the response succeed or not.
In case of error, this method also print messages and set
an array of errors in the response object.
Returns:
Returns True if the response has succeed, False otherwise
"""
status_code = self._response.status_code
data = self._response.data
# TODO : Get errors in response data after bug fix : http://mvjira.mv.usa.alcatel.com/browse/VSD-2735
if data and 'errors' in data:
self._response.errors = data['errors']
if status_code in [HTTP_CODE_SUCCESS, HTTP_CODE_CREATED, HTTP_CODE_EMPTY, HTTP_CODE_MULTI_STATUS]:
return True
if status_code == HTTP_CODE_MULTIPLE_CHOICES:
if not should_post:
return True
return False
if status_code in [HTTP_CODE_PERMISSION_DENIED, HTTP_CODE_UNAUTHORIZED]:
if not should_post:
return True
return False
if status_code in [HTTP_CODE_CONFLICT, HTTP_CODE_NOT_FOUND, HTTP_CODE_BAD_REQUEST, HTTP_CODE_METHOD_NOT_ALLOWED, HTTP_CODE_PRECONDITION_FAILED, HTTP_CODE_SERVICE_UNAVAILABLE]:
if not should_post:
return True
return False
if status_code == HTTP_CODE_INTERNAL_SERVER_ERROR:
return False
if status_code == HTTP_CODE_ZERO:
bambou_logger.error("NURESTConnection: Connection error with code 0. Sending NUNURESTConnectionFailureNotification notification and exiting.")
return False
bambou_logger.error("NURESTConnection: Report this error, because this should not happen: %s" % self._response)
return False
# HTTP Calls
def _did_receive_response(self, response):
""" Called when a response is received """
try:
data = response.json()
except:
data = None
self._response = NURESTResponse(status_code=response.status_code, headers=response.headers, data=data, reason=response.reason)
level = logging.WARNING if self._response.status_code >= 300 else logging.DEBUG
bambou_logger.info('< %s %s %s [%s] ' % (self._request.method, self._request.url, self._request.params if self._request.params else "", self._response.status_code))
bambou_logger.log(level, '< headers: %s' % self._response.headers)
bambou_logger.log(level, '< data:\n%s' % json.dumps(self._response.data, indent=4))
self._callback(self)
return self
def _did_timeout(self):
""" Called when a resquest has timeout """
bambou_logger.debug('Bambou %s on %s has timeout (timeout=%ss)..' % (self._request.method, self._request.url, self.timeout))
self._has_timeouted = True
if self.as_async:
self._callback(self)
else:
return self
def _make_request(self, session=None):
""" Make a synchronous request """
if session is None:
session = NURESTSession.get_current_session()
self._has_timeouted = False
# Add specific headers
controller = session.login_controller
enterprise = controller.enterprise
user_name = controller.user
api_key = controller.api_key
certificate = controller.certificate
if self._root_object:
enterprise = self._root_object.enterprise_name
user_name = self._root_object.user_name
api_key = self._root_object.api_key
if self._uses_authentication:
self._request.set_header('X-Nuage-Organization', enterprise)
self._request.set_header('Authorization', controller.get_authentication_header(user_name, api_key))
if controller.is_impersonating:
self._request.set_header('X-Nuage-ProxyUser', controller.impersonation)
headers = self._request.headers
data = json.dumps(self._request.data)
bambou_logger.info('> %s %s %s' % (self._request.method, self._request.url, self._request.params if self._request.params else ""))
bambou_logger.debug('> headers: %s' % headers)
bambou_logger.debug('> data:\n %s' % json.dumps(self._request.data, indent=4))
response = self.__make_request(requests_session=session.requests_session, method=self._request.method, url=self._request.url, params=self._request.params, data=data, headers=headers, certificate=certificate)
retry_request = False
if response.status_code == HTTP_CODE_MULTIPLE_CHOICES and 'responseChoice' not in self._request.url:
if '?' in self._request.url:
self._request.url += '&responseChoice=1'
else:
self._request.url += '?responseChoice=1'
bambou_logger.debug('Bambou got [%s] response. Trying to force response choice' % HTTP_CODE_MULTIPLE_CHOICES)
retry_request = True
elif response.status_code == HTTP_CODE_AUTHENTICATION_EXPIRED and session:
bambou_logger.debug('Bambou got [%s] response . Trying to reconnect your session that has expired' % HTTP_CODE_AUTHENTICATION_EXPIRED)
session.reset()
session.start()
retry_request = True
if retry_request:
bambou_logger.info('> %s %s %s' % (self._request.method, self._request.url, self._request.params if self._request.params else ""))
bambou_logger.debug('> headers: %s' % headers)
bambou_logger.debug('> data:\n %s' % json.dumps(self._request.data, indent=4))
response = self.__make_request(requests_session=session.requests_session, method=self._request.method, url=self._request.url, params=self._request.params, data=data, headers=headers, certificate=certificate)
return self._did_receive_response(response)
def __make_request(self, requests_session, method, url, params, data, headers, certificate):
""" Encapsulate requests call
"""
verify = False
timeout = self.timeout
try: # TODO : Remove this ugly try/except after fixing Java issue: http://mvjira.mv.usa.alcatel.com/browse/VSD-546
response = requests_session.request(method=method,
url=url,
data=data,
headers=headers,
verify=verify,
timeout=timeout,
params=params,
cert=certificate)
except requests.exceptions.SSLError:
try:
response = requests_session.request(method=method,
url=url,
data=data,
headers=headers,
verify=verify,
timeout=timeout,
params=params,
cert=certificate)
except requests.exceptions.Timeout:
return self._did_timeout()
except requests.exceptions.Timeout:
return self._did_timeout()
return response
def start(self):
""" Make an HTTP request with a specific method """
# TODO : Use Timeout here and _ignore_request_idle
from .nurest_session import NURESTSession
session = NURESTSession.get_current_session()
if self.as_async:
thread = threading.Thread(target=self._make_request, kwargs={'session': session})
thread.is_daemon = False
thread.start()
return self.transaction_id
return self._make_request(session=session)
def reset(self):
""" Reset the connection
"""
self._request = None
self._response = None
self._transaction_id = uuid.uuid4().hex
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
66,
8,
1853,
11,
43757,
25791,
12,
25596,
1087,
3457,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
2... | 2.313518 | 6,695 |
import pybullet as p
import math
import pybullet_data
import time
import random
import numpy as np
import serial
# t in ms; the closer t is to 0, more accuracy but less smooth motion
# start of main program
MAX_MOTIONS_IN_SEQUENCE = 4
NUM_OF_LEGS = 6
NUM_OF_JOINTS_PER_LEG = 3
NUM_OF_SERVOS = NUM_OF_LEGS * NUM_OF_JOINTS_PER_LEG
UNIQUE_THREAD_ID = random.randint(1, 10000)
LENGTH_OF_CYCLE = 12
LENGTH_OF_START_SEQUENCE = 2 + 1
LENGTH_OF_SEQUENCE = LENGTH_OF_START_SEQUENCE + LENGTH_OF_CYCLE
LENGTH_OF_GAIT_STATE = NUM_OF_SERVOS + 1
STARTING_HEIGHT = 1.375
STARTING_Y = 0.01
TARGET_HEIGHT = STARTING_HEIGHT
firstCycleComplete = False
REAL_HEXAPOD_CONNECTED = False
CONFIG_MODE = False
ssc32 = None
if REAL_HEXAPOD_CONNECTED:
ssc32 = serial.Serial('COM3', 115200, timeout=2) # open serial port
control_IDs = []
# PyBullet Init
physicsClient = None
if __name__ == "__main__":
physicsClient = p.connect(p.GUI)
else:
physicsClient = p.connect(p.DIRECT)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
plane_ID = None
hexapod_ID = None
resetPyBulletSimulation()
programStartTime = time.time()
servoRangeOfMotion = (math.pi * 3 / 8)
JOINT_INDEXES = [x for x in range(0, 24) if (x + 1) % 4 != 0]
FEET_INDEXES = [x for x in range(0, 24) if (x + 1) % 4 == 0]
p.setRealTimeSimulation(0)
print(f'PyBullet Instance ID: {UNIQUE_THREAD_ID}')
if __name__ == "__main__":
main()
| [
11748,
12972,
15065,
1616,
355,
279,
198,
11748,
10688,
198,
11748,
12972,
15065,
1616,
62,
7890,
198,
11748,
640,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11389,
628,
198,
198,
2,
256,
287,
13845,
26,
262,
5699... | 2.430293 | 581 |
from django.shortcuts import render_to_response
from django.views.generic import RedirectView, TemplateView
from blog.models import BlogPost, BlogPostForm
from datetime import datetime
from django.http import HttpResponseRedirect
from django.views.decorators.csrf import csrf_exempt
# Create your views here.
@csrf_exempt # This skips csrf validation. Use csrf_protect to have validation | [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
62,
1462,
62,
26209,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
2297,
1060,
7680,
11,
37350,
7680,
198,
6738,
4130,
13,
27530,
1330,
14001,
6307,
11,
14001,
6307,
8479,
198,
67... | 3.536364 | 110 |
import argparse
import gym
from kuka.env import KukaPoseEnv
import numpy as np
from pybullet_envs.bullet import KukaGymEnv
from util import write_video, ensure_folder
from PIL import Image
import multiprocessing
REWARD_CUTOFF = 1.0
if __name__ == '__main__':
path = "./tmp/video/"
main(get_cli_args()) | [
11748,
1822,
29572,
198,
11748,
11550,
198,
6738,
479,
14852,
13,
24330,
1330,
509,
14852,
47,
577,
4834,
85,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
12972,
15065,
1616,
62,
268,
14259,
13,
15065,
1616,
1330,
509,
14852,
38,
4948... | 2.736842 | 114 |
# -*- coding:utf-8 -*-
'''
有一组数,对于其中任意两个数组,若前面一个大于后面一个数字,则这两个数字组成一个逆序对。
请设计一个高效的算法,计算给定数组中的逆序对个数。
给定一个int数组A和它的大小n,请返回A中的逆序对个数。保证n小于等于5000。
'''
# testcase
A = [1,2,3,4,5,6,7,0]
n = len(A)
res = 7
print(AntiOrder().count(A, n) == res) | [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
201,
198,
7061,
6,
201,
198,
17312,
231,
31660,
163,
119,
226,
46763,
108,
171,
120,
234,
43380,
117,
12859,
236,
17739,
114,
40792,
20015,
119,
35707,
237,
10310,
97,
10310,
10... | 0.84589 | 292 |
from typing import Optional
from fastapi import HTTPException, Request, status
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
from google.auth.transport import requests
from google.oauth2 import id_token
| [
6738,
19720,
1330,
32233,
198,
198,
6738,
3049,
15042,
1330,
14626,
16922,
11,
19390,
11,
3722,
198,
6738,
3049,
15042,
13,
12961,
1330,
7154,
51,
4537,
1457,
1634,
34,
445,
14817,
11,
14626,
3856,
11258,
198,
6738,
23645,
13,
18439,
13... | 4.017544 | 57 |
from string import ascii_lowercase, punctuation
| [
6738,
4731,
1330,
355,
979,
72,
62,
21037,
7442,
11,
21025,
2288,
628,
198
] | 3.571429 | 14 |
from abc import ABC, abstractmethod
class BaseOutputter(ABC):
"""An abstract base class to establish outputter methods"""
@abstractmethod
def display_location_name(self, location_name):
"""Display the name of the player's current location"""
raise NotImplementedError()
@abstractmethod
def display_game_text(self, text):
"""Display generic, non-specific game text"""
raise NotImplementedError()
@abstractmethod
def display_person_reaction(self, person_name, text):
"""Display speech from the named person"""
raise NotImplementedError()
| [
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
628,
198,
4871,
7308,
26410,
353,
7,
24694,
2599,
198,
220,
220,
220,
37227,
2025,
12531,
2779,
1398,
284,
4474,
5072,
353,
5050,
37811,
628,
220,
220,
220,
2488,
397,
8709,
24396,
198,
220... | 2.980676 | 207 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 22 14:40:14 2020
@author: ghiggi
"""
import os
import sys
sys.path.append('../')
from modules.my_remap import remap_grib_files
### Define folder paths
proj_dir = "/ltenas3/DeepSphere/"
data_dir = "/ltenas3/DeepSphere/data/raw"
CDO_grids_dir = os.path.join(proj_dir, "grids","CDO_grids")
CDO_grids_weights_dir = os.path.join(proj_dir, "grids", "CDO_grids_weights")
##-----------------------------------------------------------------------------.
# Define spherical samplings to remap
spherical_samplings = [
# 400 km
'Healpix_400km',
# 'Icosahedral_400km',
# 'O24',
# 'Equiangular_400km',
# 'Equiangular_400km_tropics',
# 'Cubed_400km',
## 100 km
# 'Healpix_100km'
]
# Define dataset to remap
datasets = ['IFS_HRES', 'IFS_ENS']
# Define variable types to remap
variable_types = ['dynamic']
##-----------------------------------------------------------------------------.
# Remap
for sampling in spherical_samplings:
for dataset in datasets:
for variable_type in variable_types:
remap_grib_files(data_dir = data_dir,
CDO_grids_dir = CDO_grids_dir,
CDO_grids_weights_dir = CDO_grids_weights_dir,
dataset = dataset,
sampling = sampling,
variable_type = variable_type,
precompute_weights = True,
normalization = 'fracarea',
compression_level = 1,
n_threads = 4,
force_remapping=False) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
4280,
2534,
1478,
25,
1821,
25,
1415,
12131,
198,
198,
31,
9800,
25,
24997,
6950,
... | 2.087485 | 823 |
import io
import sys
from threading import RLock
from segno.envelopes import ChecksumEnvelope
from segno.messaging import send, receive
#
# DEBUG: Test Functionality
_lock = RLock()
in_stream = sys.stdin.buffer # type: io.FileIO
out_stream = sys.stdout.buffer # type: io.FileIO
buffer = io.BytesIO()
send(buffer, b'Lorem ipsum.')
print(buffer.getvalue())
send(buffer, ChecksumEnvelope().wrap(b'Lorem ipsum.'))
print(buffer.getvalue())
print(receive(buffer))
buffer.seek(0)
print(receive(buffer))
print(ChecksumEnvelope().unwrap(receive(buffer)))
| [
11748,
33245,
198,
11748,
25064,
198,
6738,
4704,
278,
1330,
371,
25392,
198,
198,
6738,
384,
70,
3919,
13,
268,
1091,
274,
1330,
47719,
388,
4834,
1091,
68,
198,
6738,
384,
70,
3919,
13,
37348,
3039,
1330,
3758,
11,
3328,
198,
198,
... | 2.718447 | 206 |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 31 10:17:09 2015
@author: mje
"""
import mne
from mne.minimum_norm import (apply_inverse_epochs, read_inverse_operator)
from mne.time_frequency import cwt_morlet
import numpy as np
from my_settings import *
# Using the same inverse operator when inspecting single trials Vs. evoked
snr = 1.0 # Standard assumption for average data but using it for single trial
lambda2 = 1.0 / snr ** 2
method = "MNE" # use dSPM method (could also be MNE or sLORETA)
freqs = np.arange(8, 13, 1)
n_cycle = freqs / 3.
conditions = ["ent_left", "ctl_left", "ent_right", "ctl_right"]
def ITC_over_trials(data, faverage=True):
"""Calculate the ITC over time.
Parameters
----------
data : numpy array
It should be trials x channels x frequencies x times.
faverage : bool
If true the average is returned, If false each frequency is returned.
Returns
-------
result : numpy array
The result is a numpy array with the length equal to the number of
trials.
"""
result = np.empty([data.shape[1], data.shape[-1]])
for freq in range(result.shape[0]):
for i in range(result.shape[1]):
result[freq, i] = \
np.abs(np.mean(np.exp(1j * (np.angle(data[:, freq, i])))))
if faverage:
result = result.mean(axis=0).squeeze()
return result
for subject in subjects:
# Load data
labels = mne.read_labels_from_annot(subject, parc='PALS_B12_Brodmann',
regexp="Bro",
subjects_dir=subjects_dir)
labels_occ = [labels[6], labels[7]]
inverse_operator = read_inverse_operator(mne_folder +
"%s-inv.fif" % subject)
src = mne.read_source_spaces(mne_folder + "%s-oct6-src.fif" % subject)
epochs = mne.read_epochs(epochs_folder +
"%s_ds_filtered_ica_mc_tsss-epo.fif" % subject)
# epochs.resample(250, n_jobs=4)
for condition in conditions:
stcs = apply_inverse_epochs(epochs[condition],
inverse_operator,
lambda2,
method,
pick_ori="normal")
for label in labels_occ:
label_ts = []
for j in range(len(stcs)):
label_ts.append(mne.extract_label_time_course(stcs[j],
labels=label,
src=src,
mode="mean_flip"))
label_ts = np.squeeze(np.asarray(label_ts))
tfr = cwt_morlet(label_ts, epochs.info["sfreq"], freqs,
use_fft=True, n_cycles=n_cycle)
np.save(tf_folder + "%s_%s_%s_MNE-tfr" % (subject, condition,
label.name),
tfr)
del stcs
del tfr
del epochs
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
2447,
3261,
838,
25,
1558,
25,
2931,
1853,
198,
198,
31,
9800,
25,
285,
18015,
198,
37811,
198,
11748,
285,
710,
198,
6738,
285,
710,
13,... | 1.884592 | 1,655 |
import argparse
import json
from pathlib import Path
import pandas as pd
from processing.utils_for_data_writing import perform_processing
if __name__ == '__main__':
main()
| [
11748,
1822,
29572,
198,
11748,
33918,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
7587,
13,
26791,
62,
1640,
62,
7890,
62,
16502,
1330,
1620,
62,
36948,
628,
198,
198,
361,
11593,
36... | 3.232143 | 56 |
#!/usr/bin/env python3
"""
This tool can be used to generate and remove artificial datasets for use during
development.
"""
import logging
import random
import subprocess
import sys
import uuid
from datetime import datetime
# load database connection variables from the environment file
ENV = {}
for line in open('../.env'):
line = line.strip()
if not line or line[0] == '#':
continue
option, value = line.split('=')
ENV[option] = value.strip("'")
def random_string(length, letters="abcdefghijklmnopqrstuvwxyz"):
"""
Returns a random string of length `length` from a set of letters.
"""
return "".join([random.choice(letters) for _ in range(length)])
def execute_on_db(query):
"""
Executes a query in the database and returns the output.
Note that this function uses command line queries instead of using the API.
This is to be completely independent from the rest of the implementation.
"""
user = ENV.get('POSTGRES_USER', 'postgres')
database = ENV.get('POSTGRES_DB', 'asv')
cmd = ['docker', 'exec', 'asv-db', 'psql', '-U', user, database, '-c',
query]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output, error = process.communicate()
if error:
logging.error('error: %s', error)
sys.exit(1)
# format the output as a list of dictionaries to be a bit easier to work
# with.
output = output.decode('utf-8')
output = output.split('\n')
headers = [h.strip() for h in output[0].split('|')]
retvals = []
for row in output[2:]:
if row.startswith('('):
# this is to check if we've gotten to the row-count-line at the end
# of queries, ex:
#
# dataset_id ...
# ---------- ...
# dataset_1 ...
# ...
# dataset_14
# (14 rows)
#
break
values = [v.strip() for v in row.split('|')]
retvals += [dict(zip(headers, values))]
return retvals
def print_datasets():
"""
This function prints a human readable list of which datasets are currently
available in the database.
"""
query = 'SELECT dataset_id AS id FROM dataset;'
datasets = execute_on_db(query)
real = [d for d in datasets if not d['id'].startswith('TEST')]
test = [d for d in datasets if d['id'].startswith('TEST')]
if not datasets:
logging.info("There are no datasets in the database")
if real:
logging.info("Real datasets:")
for dataset in real:
logging.info(" - %s", dataset['id'])
if test:
logging.info("Test datasets:")
for dataset in test:
logging.info(" - %s", dataset['id'])
def insert_random_sampling_event(dataset):
"""
Inserts a sampling event into the given dataset, with all required fields
containing random data.
"""
event_id = f"{dataset}-{uuid.uuid1().hex}"
event_date = datetime.today().strftime('%Y-%m-%d')
sampling_protocol = 'test'
sample_size_value = random.randint(10, 1000)
location_id = 'test'
decimal_latitude = random.random() * 180 - 90
decimal_longitude = random.random() * 360 - 180
execute_on_db(f"""INSERT INTO sampling_event(
event_id, dataset_id,
event_date, sampling_protocol,
sample_size_value, location_id,
decimal_latitude, decimal_longitude)
VALUES('{event_id}', '{dataset}', '{event_date}',
'{sampling_protocol}', '{sample_size_value}',
'{location_id}',
'{decimal_latitude}', '{decimal_longitude}');
""")
return event_id
def insert_random_mixs(dataset):
"""
inserts random values into the mixs table.
One entry for each sampling event
in the given dataset.
"""
events = execute_on_db(f"""SELECT event_id FROM sampling_event
WHERE dataset_id = '{dataset}';""")
for event_id in [event['event_id'] for event in events]:
target_gene = random_string(20)
target_subfragment = random_string(20)
pcr_primer_name_forward = random_string(20)
pcr_primer_forward = random_string(10, 'ACTG')
pcr_primer_name_reverse = random_string(20)
pcr_primer_reverse = random_string(10, 'ACTG')
env_broad_scale = random_string(20)
env_local_scale = random_string(20)
env_medium = random_string(20)
execute_on_db(f"""INSERT INTO mixs(event_id, target_gene,
target_subfragment,
pcr_primer_name_forward,
pcr_primer_forward,
pcr_primer_name_reverse,
pcr_primer_reverse,
env_broad_scale, env_local_scale,
env_medium
)
VALUES('{event_id}', '{target_gene}',
'{target_subfragment}',
'{pcr_primer_name_forward}',
'{pcr_primer_forward}',
'{pcr_primer_name_reverse}',
'{pcr_primer_reverse}', '{env_broad_scale}',
'{env_local_scale}', '{env_medium}');
""")
# I want all columns as variables in this function, so I want to have a lot
# of local variables.
#
# pylint: disable=too-many-locals
def insert_random_asvs(dataset, number, batch=100):
"""
Inserts `number` random asv's into the dataset, prefixed with the dataset
id so that they can be removed when the test dataset is purged.
"""
current_batch = []
for _ in range(int(number)):
asv_id = f'{dataset}-{uuid.uuid1().hex}'[:36]
length = random.randint(200, 2500)
sequence = random_string(length, "ACTG")
current_batch += [f"('{asv_id}', '{sequence}')"]
if len(current_batch) >= batch:
execute_on_db(f"""INSERT INTO asv(asv_id, asv_sequence)
VALUES {",".join(current_batch)};""")
current_batch = []
if current_batch:
execute_on_db(f"""INSERT INTO asv(asv_id, asv_sequence)
VALUES {",".join(current_batch)};""")
def insert_random_taxon_annotations(dataset, batch=100):
"""
Inserts a random taxon annotation for each asv associated with the given
dataset.
"""
asv_query = f"SELECT asv_id FROM asv WHERE asv_id LIKE '{dataset}%';"
asvs = [a['asv_id'] for a in execute_on_db(asv_query)]
current_batch = []
for asv_id in asvs:
status = 'valid'
kingdom = random.choice(['Bacteria', 'Fungi', 'Archaea', 'Protozoa',
'Chromista', 'Plantae', 'Animalia'])
phylum = random_string(20)
t_class = random_string(20)
oorder = random_string(10)
family = random_string(15)
genus = random_string(25)
specific_epithet = random_string(20)
date_identified = (f'{random.randint(1980,2020)}-'
f'{random.randint(1,12)}-'
f'{random.randint(1,28)}')
reference_db = random_string(20)
annotation_algorithm = random_string(20)
current_batch += [f"""('{asv_id}', '{status}', '{kingdom}', '{phylum}',
'{t_class}', '{oorder}', '{family}', '{genus}',
'{specific_epithet}', '{date_identified}',
'{reference_db}', '{annotation_algorithm}'
)"""]
if len(current_batch) >= batch:
execute_on_db(f"""INSERT INTO taxon_annotation(asv_id, status,
kingdom, phylum,
class, oorder,
family, genus,
specific_epithet,
date_identified,
reference_db,
annotation_algorithm
)
VALUES {",".join(current_batch)};""")
current_batch = []
if current_batch:
execute_on_db(f"""INSERT INTO taxon_annotation(asv_id, status,
kingdom, phylum,
class, oorder,
family, genus,
specific_epithet,
date_identified,
reference_db,
annotation_algorithm
)
VALUES {",".join(current_batch)};""")
def insert_random_occurences(event_id, dataset, occurences, batch=100):
"""
Inserts `number` random occurrences into the sampling event, prefixed with
the dataset id so that they can be removed when the test dataset is purged.
The occurrences will be assigned to a random asv from the dataset.
"""
asv_query = f"SELECT asv_id FROM asv WHERE asv_id LIKE '{dataset}%';"
asvs = [a['asv_id'] for a in execute_on_db(asv_query)]
current_batch = []
for _ in range(occurences):
occurence_id = f'{dataset}-{uuid.uuid1().hex}'
asv_id = random.choice(asvs)
organism_quantity = random.randint(1, 1000)
previous_identifications = ''
asv_id_alias = ''
current_batch += [f"""('{occurence_id}', '{event_id}', '{asv_id}',
'{organism_quantity}',
'{previous_identifications}',
'{asv_id_alias}')"""]
if len(current_batch) >= batch:
query = f"""INSERT INTO occurrence(occurrence_id, event_id, asv_id,
organism_quantity,
previous_identifications,
asv_id_alias)
VALUES {",".join(current_batch)};"""
execute_on_db(query)
current_batch = []
if current_batch:
query = f"""INSERT INTO occurrence(occurrence_id, event_id, asv_id,
organism_quantity,
previous_identifications,
asv_id_alias)
VALUES {",".join(current_batch)};"""
execute_on_db(query)
def insert_dataset(num_datasets, occurrences):
"""
Inserts a number of datasets, each having a set number of occurrences.
All affected tables will have random data.
"""
datasets = execute_on_db('SELECT * FROM dataset;')
test_sets = [d for d in datasets if d['dataset_id'].startswith('TEST')]
test_nums = [int(dataset['dataset_id'][5:]) for dataset in test_sets]
last_dataset = max(test_nums) if test_nums else 0
for dataset_num in range(last_dataset+1, last_dataset+num_datasets+1):
dataset = f'TEST_{dataset_num}'
logging.info("Inserting %s with %s occurrences", dataset, occurrences)
execute_on_db(f"""INSERT INTO dataset(dataset_id, provider_email)
VALUES('{dataset}', 'TEST');
""")
# insert sampling event
event_id = insert_random_sampling_event(dataset)
# and mixs
insert_random_mixs(dataset)
# insert asv's (half as many asv's as occurrences)
insert_random_asvs(dataset, occurrences/2)
# insert taxon_annotations for the asv's
insert_random_taxon_annotations(dataset)
# and finally occurrences
insert_random_occurences(event_id, dataset, occurrences)
def purge_test_datasets():
"""
Removes all datasets where the dataset_id start with TEST, as well as all
the associated data for these datasets.
"""
# remove occurrences
logging.info("Removing test occurrences")
execute_on_db("DELETE FROM occurrence WHERE occurrence_id LIKE 'TEST%'")
# remove taxon annotations
logging.info("Removing test taxon annotations")
execute_on_db("DELETE FROM taxon_annotation WHERE asv_id LIKE 'TEST%'")
# remove asvs
logging.info("Removing test asvs")
execute_on_db("DELETE FROM asv WHERE asv_id LIKE 'TEST%'")
# remove mixs
logging.info("Removing test mixs")
execute_on_db("DELETE FROM mixs WHERE event_id LIKE 'TEST%';")
# remove sampling events
logging.info("Removing test sampling events")
execute_on_db("DELETE FROM sampling_event WHERE dataset_id LIKE 'TEST%';")
logging.info("Removing test datasets")
execute_on_db("DELETE FROM dataset WHERE dataset_id LIKE 'TEST%';")
if __name__ == '__main__':
import argparse
PARSER = argparse.ArgumentParser(description=__doc__)
PARSER.add_argument("action",
help=("The database action to perform. Valid options "
"are 'list', 'insert', and 'purge'."))
PARSER.add_argument("--datasets", "-d", type=int, default=1,
help=("sets the number of test datasets to insert "
"into the database, when running 'insert'"))
PARSER.add_argument("--occurrences", "-o", type=int, default=10000,
help=("sets the number of occurrences to insert to "
"new test datasets"))
PARSER.add_argument("--host", default='http://localhost:5000',
help="sets the host for testing endpoints")
PARSER.add_argument("--replicates", "-r", type=int, default=100,
help=("sets the number of replicate requests when "
"timing endpoints"))
ARGS = PARSER.parse_args()
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
if ARGS.action == 'list':
print_datasets()
elif ARGS.action == 'insert':
insert_dataset(ARGS.datasets, ARGS.occurrences)
elif ARGS.action == 'purge':
purge_test_datasets()
else:
logging.error("Unknown action '%s'", ARGS.action)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
1212,
2891,
460,
307,
973,
284,
7716,
290,
4781,
11666,
40522,
329,
779,
1141,
198,
31267,
13,
198,
37811,
198,
198,
11748,
18931,
198,
11748,
4738,
198,
11748,
850,
14... | 1.957851 | 7,687 |
import requests
import os
import linecache
| [
11748,
7007,
198,
11748,
28686,
198,
11748,
1627,
23870,
628,
628
] | 4.181818 | 11 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-01-18 16:42
from __future__ import unicode_literals
from django.db import migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
1433,
319,
13130,
12,
486,
12,
1507,
1467,
25,
3682,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,... | 2.696429 | 56 |
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtCore import pyqtSignal
from gui.running_item_widget import Ui_Form
from wizard.vars import defaults
from wizard.tools import log
from wizard.tools import utility as utils
from wizard.prefs.main import prefs
logger = log.pipe_log(__name__)
prefs = prefs()
| [
6738,
9485,
48,
83,
20,
1330,
33734,
54,
312,
11407,
11,
33734,
14055,
11,
33734,
8205,
72,
198,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
14055,
1330,
12972,
39568,
11712,
282,
198,
198,
6738,
11774,
13,
20270,
62,
9186,
62,
42655,
... | 2.916667 | 108 |
# The MIT License (MIT)
#
# Copyright (c) 2016 Maeve Kennedy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from pathlib import Path
import sys
import re
import csv
from enum import Enum
to_csv(sys.argv[1])
| [
198,
198,
2,
383,
17168,
13789,
357,
36393,
8,
198,
2,
198,
2,
15069,
357,
66,
8,
1584,
34673,
303,
10401,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
286,
428... | 3.694864 | 331 |
from django import forms
from .models import RestaurantLocation
from .validator import validate_category
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
764,
27530,
1330,
26078,
14749,
198,
6738,
764,
12102,
1352,
1330,
26571,
62,
22872,
628,
198
] | 4.652174 | 23 |
class TypeDescriptionProviderAttribute(Attribute, _Attribute):
"""
Specifies the custom type description provider for a class. This class cannot be inherited.
TypeDescriptionProviderAttribute(typeName: str)
TypeDescriptionProviderAttribute(type: Type)
"""
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, *__args):
"""
__new__(cls: type,typeName: str)
__new__(cls: type,type: Type)
"""
pass
TypeName = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the type name for the type description provider.
Get: TypeName(self: TypeDescriptionProviderAttribute) -> str
"""
| [
4871,
5994,
11828,
29495,
33682,
7,
33682,
11,
4808,
33682,
2599,
201,
198,
220,
220,
220,
37227,
201,
198,
18291,
6945,
262,
2183,
2099,
6764,
10131,
329,
257,
1398,
13,
770,
1398,
2314,
307,
19552,
13,
201,
198,
201,
198,
220,
201,
... | 2.748538 | 342 |
# Copyright 2017,2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from zvmsdk import config
LOGGER = Logger('ZVMSDK')
LOG = LOGGER.getlog()
| [
2,
15069,
2177,
11,
7908,
19764,
11421,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11846... | 3.266055 | 218 |
# This program counts from 0 to 9
counter = 0
while counter < 10:
print(counter)
counter = counter + 1
print("All done!") | [
2,
770,
1430,
9853,
422,
657,
284,
860,
198,
198,
24588,
796,
657,
198,
4514,
3753,
1279,
838,
25,
198,
220,
220,
220,
3601,
7,
24588,
8,
198,
220,
220,
220,
3753,
796,
3753,
1343,
352,
198,
4798,
7203,
3237,
1760,
2474,
8
] | 3.023256 | 43 |
# -*- coding: utf-8 -*-
"""Test error handling."""
import frost.error
import flask
import werkzeug.exceptions
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
14402,
4049,
9041,
526,
15931,
198,
198,
11748,
21682,
13,
18224,
198,
11748,
42903,
198,
11748,
266,
9587,
2736,
1018,
13,
1069,
11755,
628,
628,
198
] | 2.804878 | 41 |
# coding: utf-8
# # Programm for generating fragmentation diagrams in mass spectrometry
# ## Function filtering data from .csv file - returns plottable pandas documents
# In[1]:
# ## Function plotting the final pandas documents and saving copies
# In[2]:
# ### main imports
# In[3]:
import os
import time
import numpy as np
from numpy import trapz
import pandas as pd
from scipy.signal import savgol_filter
from sklearn.svm import SVR
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
import pickle as pl
# ### setting constants and file path
# In[4]:
#time = time.strftime("%d%m%Y") # should be in accordance with date of measure - if not, change it!
time = '28092018'
plantname = 'Wein'
filename = 'LCMS_Wein_PQD_28092018_777PQD-Version3'
filepath = 'RawFiles/'+time+'/'+plantname+'/'+filename+'.csv'
version = 'Version3'
plant = input("Specify plant: ")
category = input("Specify catabolite type: ")
catabolite = input("Specify fragmented mass: ")
fragmentation_mode = input("Specify fragmentation mode: ")
catabolites_string = input("Specify [M]-fragments of above catabolite: ")
catabolites = catabolites_string.split(",")
highest_value_overall = 0
ms_info_overall = int(catabolites[0])
# In[6]:
plot_diag(catabolites, plant, category, version, catabolite, fragmentation_mode)
| [
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
1303,
6118,
76,
329,
15453,
42965,
37067,
287,
2347,
5444,
398,
11973,
198,
198,
2,
22492,
15553,
25431,
1366,
422,
764,
40664,
2393,
532,
5860,
458,
1252,
540,
19798,
292,
4963,
198... | 2.943844 | 463 |
import glob
import os
import shutil
import configuration
if __name__ == "__main__":
print("start")
MAHNOB_HCI()
print("end")
| [
11748,
15095,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
198,
11748,
8398,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
628,
220,
220,
220,
3601,
7203,
9688,
4943,
628,
220,
220,
220,
8779,
39,
45,
9864,... | 2.636364 | 55 |
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
| [
11748,
28686,
201,
198,
11748,
25064,
201,
198,
201,
198,
17597,
13,
6978,
13,
28463,
7,
16,
11,
28686,
13,
6978,
13,
22179,
7,
17597,
13,
6978,
58,
15,
4357,
705,
492,
6,
4008,
201,
198,
201
] | 2.135135 | 37 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from unittest import TestCase
from src.utils.derivation_utils import toCamelCase
# limitations under the License.
| [
2,
15069,
12131,
3012,
11419,
198,
2,
220,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743... | 3.777143 | 175 |
import sys
import re
lines = open(sys.argv[1], 'r')
for line in lines:
line = line.replace('\n', '').replace('\r', '')
if len(line) > 0:
print(len([m.start() for m in re.finditer('(?=<--<<|>>-->)', line)]))
lines.close()
| [
11748,
25064,
201,
198,
11748,
302,
201,
198,
6615,
796,
1280,
7,
17597,
13,
853,
85,
58,
16,
4357,
705,
81,
11537,
201,
198,
1640,
1627,
287,
3951,
25,
201,
198,
220,
220,
220,
1627,
796,
1627,
13,
33491,
10786,
59,
77,
3256,
101... | 2.166667 | 114 |
import cloudscraper
import requests
import os
import csv
from bs4 import BeautifulSoup
# https://www.geeksforgeeks.org/implementing-web-scraping-python-beautiful-soup/
# create a cloudscraper instance
scraper = cloudscraper.create_scraper()
base_url = 'https://www.fanfiction.net/'
# urls = ['https://www.fanfiction.net/anime/Card-Captor-Sakura/?&srt=1&lan=1&r=103&p=2', 'https://www.fanfiction.net/anime/Card-Captor-Sakura/?&srt=1&lan=1&r=103',
# 'https://www.fanfiction.net/anime/Card-Captor-Sakura/?&srt=1&lan=1&r=103&p=3', 'https://www.fanfiction.net/anime/Card-Captor-Sakura/?&srt=1&lan=1&r=103&p=4']
urls = ['https://www.fanfiction.net/comic/Marvel/?&srt=1&lan=1&r=103&p=2', 'https://www.fanfiction.net/comic/Marvel/?&srt=1&lan=1&r=103',
'https://www.fanfiction.net/comic/Marvel/?&srt=1&lan=1&r=103&p=3', 'https://www.fanfiction.net/comic/Marvel/?&srt=1&lan=1&r=103&p=4']
fanfics = []
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
for url in urls:
response = scraper.get(url).text
response_html = BeautifulSoup(response, 'html5lib')
table = response_html.find('div', attrs={'id': 'content_wrapper_inner'})
for row in table.findAll('div', attrs={'class': 'z-list zhover zpointer'}):
fanfic = {}
fanfic['title'] = row.find('a', attrs={'class': 'stitle'}).text
fanfic['picture'] = row.img['src']
fanfic['author'] = row.select_one("a[href*='u']").text
fanfic['synopsis'] = row.select_one(
"div[class='z-indent z-padtop']").text.split('Rated')[0]
# fanfic['options'] = row.find('div', attrs={'class': 'z-padtop2 xgray'}).text
fanfic['classement'] = row.find(
'div', attrs={'class': 'z-padtop2 xgray'}).text.split('-')[0]
fanfic['language'] = row.find(
'div', attrs={'class': 'z-padtop2 xgray'}).text.split('-')[1]
fanfic['genre'] = row.find(
'div', attrs={'class': 'z-padtop2 xgray'}).text.split('-')[2]
fanfic['link_fanfic'] = row.a['href']
fanfics.append(fanfic)
# print(row.prettify())
# print(fanfics)
print(fanfic)
for fanfic in fanfics:
filename = os.path.join(
BASE_DIR, 'fanfics/management/commands/fanfictions_scraping_2.csv')
with open(filename, 'w', newline='') as f:
w = csv.DictWriter(f, ['title', 'picture', 'author', 'synopsis',
'classement', 'language', 'genre', 'link_fanfic'])
w.writeheader()
for fc in fanfics:
w.writerow(fc)
# filename = 'fanfictions_scraping.csv'
# with open(filename, 'w', newline='') as f:
# w = csv.DictWriter(f, ['title', 'picture', 'author', 'synopsis',
# 'classement', 'language', 'genre', 'link_fanfic'])
# w.writeheader()
# for fanfic in fanfics:
# w.writerow(fanfic)
| [
11748,
6279,
1416,
38545,
198,
11748,
7007,
198,
11748,
28686,
198,
11748,
269,
21370,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
198,
2,
3740,
1378,
2503,
13,
469,
2573,
30293,
2573,
13,
2398,
14,
320,
26908,
278,
12,
12384... | 2.03569 | 1,457 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import collections as _collections
import os as _os
import proton as _proton
import proton.handlers as _handlers
import proton.reactor as _reactor
import uuid as _uuid
import shutil as _shutil
import subprocess as _subprocess
import sys as _sys
import time as _time
import tempfile as _tempfile
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--host", metavar="HOST", default="127.0.0.1",
help="Listen for connections on HOST (default 127.0.0.1)")
parser.add_argument("--port", metavar="PORT", default=5672, type=int,
help="Listen for connections on PORT (default 5672)")
parser.add_argument("--id", metavar="ID",
help="Set the container identity to ID (default is generated)")
parser.add_argument("--ready-file", metavar="FILE",
help="The file used to indicate the server is ready")
# parser.add_argument("--user", metavar="USER",
# help="Require USER")
# parser.add_argument("--password", metavar="SECRET",
# help="Require SECRET")
args = parser.parse_args()
broker = _Broker(args.host, args.port, id=args.id, ready_file=args.ready_file)
try:
broker.run()
except KeyboardInterrupt:
pass
| [
2,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
220,
... | 2.917293 | 798 |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import glob
import os
import shutil
from os import path
from setuptools import find_packages, setup
from typing import List
import torch
from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension
torch_ver = [int(x) for x in torch.__version__.split(".")[:2]]
assert torch_ver >= [1, 8], "Requires PyTorch >= 1.8"
def get_model_zoo_configs() -> List[str]:
"""
Return a list of configs to include in package for model zoo. Copy over these configs inside
detectron2/model_zoo.
"""
# Use absolute paths while symlinking.
source_configs_dir = path.join(path.dirname(path.realpath(__file__)), "configs")
destination = path.join(
path.dirname(path.realpath(__file__)), "detectron2", "model_zoo", "configs"
)
# Symlink the config directory inside package to have a cleaner pip install.
# Remove stale symlink/directory from a previous build.
if path.exists(source_configs_dir):
if path.islink(destination):
os.unlink(destination)
elif path.isdir(destination):
shutil.rmtree(destination)
if not path.exists(destination):
try:
os.symlink(source_configs_dir, destination)
except OSError:
# Fall back to copying if symlink fails: ex. on Windows.
shutil.copytree(source_configs_dir, destination)
config_paths = glob.glob("configs/**/*.yaml", recursive=True) + glob.glob(
"configs/**/*.py", recursive=True
)
return config_paths
# For projects that are relative small and provide features that are very close
# to detectron2's core functionalities, we install them under detectron2.projects
PROJECTS = {
"detectron2.projects.point_rend": "projects/PointRend/point_rend",
"detectron2.projects.deeplab": "projects/DeepLab/deeplab",
"detectron2.projects.panoptic_deeplab": "projects/Panoptic-DeepLab/panoptic_deeplab",
}
setup(
name="detectron2",
version=get_version(),
author="FAIR",
url="https://github.com/facebookresearch/detectron2",
description="Detectron2 is FAIR's next-generation research "
"platform for object detection and segmentation.",
packages=find_packages(exclude=("configs", "tests*")) + list(PROJECTS.keys()),
package_dir=PROJECTS,
package_data={"detectron2.model_zoo": get_model_zoo_configs()},
python_requires=">=3.6",
install_requires=[
# These dependencies are not pure-python.
# In general, avoid adding more dependencies like them because they are not
# guaranteed to be installable by `pip install` on all platforms.
# To tell if a package is pure-python, go to https://pypi.org/project/{name}/#files
"Pillow>=7.1", # or use pillow-simd for better performance
"matplotlib", # TODO move it to optional after we add opencv visualization
"pycocotools>=2.0.2", # corresponds to https://github.com/ppwwyyxx/cocoapi
# Do not add opencv here. Just like pytorch, user should install
# opencv themselves, preferrably by OS's package manager, or by
# choosing the proper pypi package name at https://github.com/skvark/opencv-python
# The following are pure-python dependencies that should be easily installable
"termcolor>=1.1",
"yacs>=0.1.8",
"tabulate",
"cloudpickle",
"tqdm>4.29.0",
"tensorboard",
# Lock version of fvcore/iopath because they may have breaking changes
# NOTE: when updating fvcore/iopath version, make sure fvcore depends
# on compatible version of iopath.
"fvcore>=0.1.5,<0.1.6", # required like this to make it pip installable
"iopath>=0.1.7,<0.1.10",
"future", # used by caffe2
"pydot", # used to save caffe2 SVGs
"dataclasses; python_version<'3.7'",
"omegaconf>=2.1,<=2.2.0",
"hydra-core>=1.1",
"black==21.4b2",
"scipy>1.5.1",
# If a new dependency is required at import time (in addition to runtime), it
# probably needs to exist in docs/requirements.txt, or as a mock in docs/conf.py
],
extras_require={
# optional dependencies, required by some features
"all": [
"shapely",
"pygments>=2.2",
"psutil",
"panopticapi @ https://github.com/cocodataset/panopticapi/archive/master.zip",
],
# dev dependencies. Install them by `pip install 'detectron2[dev]'`
"dev": [
"flake8==3.8.1",
"isort==4.3.21",
"flake8-bugbear",
"flake8-comprehensions",
],
},
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
198,
11748,
15095,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
6738,
28686,
1330,
3108,
198,
6738,
900,
37623,
... | 2.430808 | 1,980 |
SLICES_RES=[[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [0], [0], [0], [0], [0], [], [], [], [], [], [0, 1], [0], [0], [0], [0, 1], [], [], [], [], [], [0, 1, 2], [0, 2], [0], [0], [0, 1, 2], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [0], [0], [0], [0], [0], [], [], [], [], [], [0, 1], [0], [0], [0], [0, 1], [], [], [], [], [], [0, 1, 2], [0, 2], [0], [0], [0, 1, 2], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [0], [0], [0], [0], [0], [], [], [], [], [], [0, 1], [0], [0], [0], [0, 1], [], [], [], [], [], [0, 1, 2], [0, 2], [0], [0], [0, 1, 2], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [0], [0], [0], [0], [0], [], [], [], [], [], [0, 1], [0], [0], [0], [0, 1], [], [], [], [], [], [0, 1, 2], [0, 2], [0], [0], [0, 1, 2], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [0], [0], [0], [0], [0], [], [], [], [], [], [0, 1], [0], [0], [0], [0, 1], [], [], [], [], [], [0, 1, 2], [0, 2], [0], [0], [0, 1, 2], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [0], [0], [0], [0], [0], [], [], [], [], [], [0, 1], [0], [0], [0], [0, 1], [], [], [], [], [], [0, 1, 2], [0, 2], [0], [0], [0, 1, 2], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [0], [0], [0], [0], [0], [], [], [], [], [], [0], [0], [0], [0], [0], [], [], [], [], [], [0], [0], [0], [0], [0], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [0], [0], [0], [0], [0], [], [], [], [], [], [0, 1], [0], [0], [0], [0, 1], [], [], [], [], [], [0, 1, 2], [0, 2], [0], [0], [0, 1, 2], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [0], [0], [0], [0], [0], [], [], [], [], [], [0, 1], [0], [0], [0], [0, 1], [], [], [], [], [], [0, 1, 2], [0, 2], [0], [0], [0, 1, 2], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [0], [0], [0], [0], [0], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [1], [1], [1], [1], [1, 0], [], [], [], [], [], [1], [1], [1], [1], [1, 0], [], [], [], [], [], [1], [1], [1], [1], [1, 0], [], [], [], [], [], [1], [1], [1], [1], [1], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [1], [1], [1], [1], [1], [], [], [], [], [], [1, 2], [1], [1], [1], [1, 2], [1], [1], [1], [1], [1], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [1], [1], [1], [1], [1], [], [], [], [], [], [1, 2], [1], [1], [1], [1, 2], [], [], [], [], [], [1, 2, 3], [1, 3], [1], [1], [1, 2, 3], [], [], [], [], [], [1, 2, 3], [1, 3], [1], [1], [1, 2, 3], [], [], [], [], [], [1, 2, 3], [1, 3], [1], [1], [1, 2, 3], [1], [1], [1], [1], [1, 0], [1, 2, 3], [1, 3], [1], [1], [1, 2, 3], [2], [2], [2], [2, 0], [2, 1, 0], [], [], [], [], [], [2], [2], [2], [2, 0], [2, 1, 0], [], [], [], [], [], [2], [2], [2], [2, 0], [2, 1, 0], [], [], [], [], [], [2], [2], [2], [2], [2, 1], [], [], [], [], [], [2], [2], [2], [2], [2], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [2], [2], [2], [2], [2], [2], [2], [2], [2], [2, 1], [], [], [], [], [], [2], [2], [2], [2], [2], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [2], [2], [2], [2], [2], [], [], [], [], [], [2, 3], [2], [2], [2], [2, 3], [], [], [], [], [], [2, 3], [2], [2], [2], [2, 3], [], [], [], [], [], [2, 3], [2], [2], [2], [2, 3], [2], [2], [2], [2, 0], [2, 1, 0], [2, 3], [2], [2], [2], [2, 3], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3], [3, 1], [3, 2, 1], [], [], [], [], [], [3], [3], [3], [3], [3, 2], [], [], [], [], [], [3], [3], [3], [3], [3], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [3], [3], [3], [3, 1], [3, 2, 1], [], [], [], [], [], [3], [3], [3], [3], [3, 2], [], [], [], [], [], [3], [3], [3], [3], [3], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [3], [3], [3], [3], [3], [], [], [], [], [], [3], [3], [3], [3], [3], [], [], [], [], [], [3], [3], [3], [3], [3], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [3], [3], [3], [3], [3], [0], [0], [0], [0], [0], [], [], [], [], [], [0], [0], [0], [0], [0], [], [], [], [], [], [0], [0], [0], [0], [0], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [0], [0], [0], [0], [0], [], [], [], [], [], [0, 1], [0], [0], [0], [0, 1], [], [], [], [], [], [0, 1, 2], [0, 2], [0], [0], [0, 1, 2], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [0], [0], [0], [0], [0], [], [], [], [], [], [0, 1], [0], [0], [0], [0, 1], [], [], [], [], [], [0, 1, 2], [0, 2], [0], [0], [0, 1, 2], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [0], [0], [0], [0], [0], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [1], [1], [1], [1], [1, 0], [], [], [], [], [], [1], [1], [1], [1], [1, 0], [], [], [], [], [], [1], [1], [1], [1], [1, 0], [], [], [], [], [], [1], [1], [1], [1], [1], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [1], [1], [1], [1], [1], [], [], [], [], [], [1, 2], [1], [1], [1], [1, 2], [1], [1], [1], [1], [1], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [1], [1], [1], [1], [1], [], [], [], [], [], [1, 2], [1], [1], [1], [1, 2], [], [], [], [], [], [1, 2, 3], [1, 3], [1], [1], [1, 2, 3], [], [], [], [], [], [1, 2, 3], [1, 3], [1], [1], [1, 2, 3], [], [], [], [], [], [1, 2, 3], [1, 3], [1], [1], [1, 2, 3], [1], [1], [1], [1], [1, 0], [1, 2, 3], [1, 3], [1], [1], [1, 2, 3], [2], [2], [2], [2, 0], [2, 1, 0], [], [], [], [], [], [2], [2], [2], [2, 0], [2, 1, 0], [], [], [], [], [], [2], [2], [2], [2, 0], [2, 1, 0], [], [], [], [], [], [2], [2], [2], [2], [2, 1], [], [], [], [], [], [2], [2], [2], [2], [2], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [2], [2], [2], [2], [2], [2], [2], [2], [2], [2, 1], [], [], [], [], [], [2], [2], [2], [2], [2], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [2], [2], [2], [2], [2], [], [], [], [], [], [2, 3], [2], [2], [2], [2, 3], [], [], [], [], [], [2, 3], [2], [2], [2], [2, 3], [], [], [], [], [], [2, 3], [2], [2], [2], [2, 3], [2], [2], [2], [2, 0], [2, 1, 0], [2, 3], [2], [2], [2], [2, 3], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3], [3, 1], [3, 2, 1], [], [], [], [], [], [3], [3], [3], [3], [3, 2], [], [], [], [], [], [3], [3], [3], [3], [3], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [3], [3], [3], [3, 1], [3, 2, 1], [], [], [], [], [], [3], [3], [3], [3], [3, 2], [], [], [], [], [], [3], [3], [3], [3], [3], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [3], [3], [3], [3], [3], [], [], [], [], [], [3], [3], [3], [3], [3], [], [], [], [], [], [3], [3], [3], [3], [3], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [3], [3], [3], [3], [3], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3], [3, 1], [3, 2, 1], [], [], [], [], [], [3], [3], [3], [3], [3, 2], [], [], [], [], [], [3], [3], [3], [3], [3], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [3], [3], [3], [3, 1], [3, 2, 1], [], [], [], [], [], [3], [3], [3], [3], [3, 2], [], [], [], [], [], [3], [3], [3], [3], [3], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3], [3, 1], [3, 2, 1], [], [], [], [], [], [3], [3], [3], [3], [3, 2], [], [], [], [], [], [3], [3], [3], [3], [3], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [3], [3], [3], [3, 1], [3, 2, 1], [], [], [], [], [], [3], [3], [3], [3], [3, 2], [], [], [], [], [], [3], [3], [3], [3], [3], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3], [3, 1], [3, 2, 1], [], [], [], [], [], [3], [3], [3], [3], [3, 2], [], [], [], [], [], [3], [3], [3], [3], [3], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [3], [3], [3], [3, 1], [3, 2, 1], [], [], [], [], [], [3], [3], [3], [3], [3, 2], [], [], [], [], [], [3], [3], [3], [3], [3], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [], [], [], [], [], [3], [3], [3], [3, 1], [3, 2, 1], [], [], [], [], [], [3], [3], [3], [3], [3, 2], [0], [0], [0], [0], [0], [3], [3], [3], [3], [3], [0, 1], [0], [0], [0], [0, 1], [], [], [], [], [], [0, 1, 2], [0, 2], [0], [0], [0, 1, 2], [3], [3], [3], [3, 1], [3, 2, 1], [], [], [], [], [], [3], [3], [3], [3], [3, 2], [0], [0], [0], [0], [0], [3], [3], [3], [3], [3], [0, 1], [0], [0], [0], [0, 1], [], [], [], [], [], [0, 1, 2], [0, 2], [0], [0], [0, 1, 2], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [], [], [], [], [], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3], [3], [3], [3, 0], [3, 1], [3, 2, 1, 0], [0, 1, 2, 3], [0, 2], [0, 3], [0], [0, 1, 2, 3]]
START= [-7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, None]
END= [-7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, None]
STEP= [-5, -4, -3, -2, -1, 1, 2, 3, 4, None]
LL=[0, 1, 2, 3]
| [
8634,
34444,
62,
19535,
28,
30109,
4357,
685,
4357,
685,
4357,
685,
4357,
685,
4357,
685,
4357,
685,
4357,
685,
4357,
685,
4357,
685,
4357,
685,
4357,
685,
4357,
685,
4357,
685,
4357,
685,
4357,
685,
4357,
685,
4357,
685,
4357,
685,
... | 1.766407 | 6,674 |
from .plane_wave import PlaneWave
from .anisotropic_scattering import AnisotropicScattering
from .isotropic_scattering import IsotropicScattering
from .util import save_field, errornorm, norm
| [
6738,
764,
14382,
62,
19204,
1330,
36829,
39709,
198,
6738,
764,
272,
271,
46084,
62,
1416,
16475,
1330,
1052,
271,
46084,
3351,
16475,
198,
6738,
764,
271,
46084,
62,
1416,
16475,
1330,
1148,
46084,
3351,
16475,
198,
6738,
764,
22602,
... | 3.764706 | 51 |
import json
import logging
import boto3
from typing import Dict, Any, Optional, Tuple
logger = logging.getLogger(__name__)
| [
11748,
33918,
198,
11748,
18931,
198,
11748,
275,
2069,
18,
198,
6738,
19720,
1330,
360,
713,
11,
4377,
11,
32233,
11,
309,
29291,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628
] | 3.205128 | 39 |
#!/usr/bin/env python3
import amino
import os
import getpass
os.system('clear')
print("\033[1;32m ______ __ __ ")
print("\033[1;32m| \ | \ | \ ")
print("\033[1;32m \$$$$$$ _______ __ __ \$$ _| $$_ ______ ")
print("\033[1;32m | $$ | \| \ / \| \| $$ \ / \ ")
print("\033[1;32m | $$ | $$$$$$$\\$$\ / $$| $$ \$$$$$$ | $$$$$$\ ")
print("\033[1;32m | $$ | $$ | $$ \$$\ $$ | $$ | $$ __ | $$ $$")
print("\033[1;32m _| $$_ | $$ | $$ \$$ $$ | $$ | $$| \| $$$$$$$$")
print("\033[1;32m| $$ \| $$ | $$ \$$$ | $$ \$$ $$ \$$ \ ")
print("\033[1;32m \$$$$$$ \$$ \$$ \$ \$$ \$$$$ \$$$$$$$")
print("\033[1;32m ")
print("\033[1;32m ")
print("\033[1;31m ")
print("\033[1;31m __ __ ")
print("\033[1;31m| \ | \ ")
print("\033[1;31m| $$____ ______ _| $$_ ")
print("\033[1;31m| $$ \ / \| $$ \ ")
print("\033[1;31m| $$$$$$$\| $$$$$$\\$$$$$$ ")
print("\033[1;31m| $$ | $$| $$ | $$ | $$ __ ")
print("\033[1;31m| $$__/ $$| $$__/ $$ | $$| \ ")
print("\033[1;31m| $$ $$ \$$ $$ \$$ $$ ")
print("\033[1;31m \$$$$$$$ \$$$$$$ \$$$$ \033[1;32m script by \033[1;36mkira_xc ")
print('\n\033[0m')
client=amino.Client()
ss=0
sz=25
nuum=0
tst=False
while tst==False:
try:
email=input("\033[1;93m# your email : \033[0m")
password=getpass.getpass("\033[1;93m# your password : \033[0m")
client.login(email=email,password=password)
tst=True
except:
tst=False
print("\033[1;93m# verify email or password\033[0m")
exx=input("\033[1;93m# to be continue ?\033[1;92m y/n \033[0m: \033[0m")
if exx=='n' or exx=='N' or exx=='no':
os._exit(1)
tst=False
while tst==False:
try:
infoos=input("\033[1;93m#give me url of group : \033[0m")
infoo=client.get_from_code(infoos)
tst=True
if infoo.objectType!=12:
print ("\033[1;93m#not chat url !\033[0m")
tst=False
except:
tst=False
print("\033[1;93m# verify your url \033[0m")
if tst==False:
exx=input("\033[1;93m# to be continue ?\033[1;92m y/n \033[0m: \033[0m")
if exx=='n' or exx=='N' or exx=='no':
os._exit(1)
chatId=infoo.objectId
comId=infoo.path[1:infoo.path.index("/")]
sub_client=amino.SubClient(comId=comId,profile=client.profile)
swich=0
tst=False
while tst==False:
try:
tst=True
swich=int(input("\033[1;93mchoose : \n\033[1;92m1 \033[1;93m- online members \n\033[1;92m2\033[1;93m - followers of user \n\033[1;92m3 \033[1;93m- new members \n\033[1;92mwhich one \033[1;93m: \033[0m"))
if swich<0 or swich>3:
print("\033[1;93mplease ... choose 1 or 2 or 3 \033[0m")
tst=False
except :
print("\n\033[1;93mchoose a number\033[0m ")
tst=False
tst=False
while tst==False:
try:
tst=True
maxo=int(input("\n\033[1;93m# what maximum member ? : \033[0m"))
except:
tst=False
print("\033[1;93mno .... \n type a number exmple :\033[1;92m 400 \033[0m")
if tst==False:
tobb=input("to be continue ? y/n : ")
if tobb=="n" or tobb=="N":
os._exit(1)
cpt=0
if swich==1:
nemmm=0
cpt=0
while maxo>nemmm and len(sub_client.get_online_users(start=nemmm,size=25).profile.userId)!=0:
lista=sub_client.get_online_users(start= nemmm,size= 25)
for userId in Tass(lista):
try:
sub_client.invite_to_chat(userId=userId,chatId=chatId)
cpt=cpt+1
print(cpt , "\033[1;93m ) \033[1;92m- \033[1;93muser id\033[1;92m =\033[0m ",userId)
except:
ffffff=True
nemmm=nemmm+25
elif swich==2:
tst=False
while tst==False:
try:
link=input("\033[1;93m# give me link of profile \033[1;92m: \033[0m")
linko=client.get_from_code(link)
tst=True
if linko.objectType!=0:
print (" \033[1;93mnot profile url !\033[0m")
tst=False
fchg=linko.path[1:infoo.path.index("/")]
if fchg!=comId:
tst=False
print ("\033[1;93mis not profile of this community !\033[0m")
except:
tst=False
print("\033[1;93m# verify your url \033[0m")
if tst==False:
exx=input("\033[1;93m# to be continue ?\033[1;92m y/n \033[0m: \033[0m")
if exx=='n' or exx=='N' or exx=='no':
os._exit(1)
userIdf=linko.objectId
nemmm=0
cpt=0
while maxo>nemmm and len(sub_client.get_user_followers(userId=userIdf,start=nemmm,size=25).userId)!=0:
listf=sub_client.get_user_followers(userId=userIdf,start= nemmm,size= 25)
for userId in Tass2(listf):
try:
sub_client.invite_to_chat(userId=userId, chatId=chatId)
cpt=cpt+1
print(cpt , "\033[1;93m ) \033[1;92m- \033[1;93muser id \033[1;92m= \033[0m",userId)
except:
ffffff=True
nemmm=nemmm+25
elif swich==3:
nemmm=0
cpt=0
while maxo>nemmm and len(sub_client.get_all_users(start=nemmm,size=25).profile.userId)!=0:
listn=sub_client.get_all_users(start=0,size=25)
for userId in Tass(listn):
try:
sub_client.invite_to_chat(userId=userId,chatId=chatId)
cpt=cpt+1
print(cpt , "\033[1;93m ) \033[1;92m-\033[1;93m user id \033[1;92m= \033[0m",userId)
except:
ffffff=True
nemmm=nemmm+25
print("\033[1;92mall done !\033[0m")
os._exit(1) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
23206,
198,
11748,
28686,
198,
11748,
651,
6603,
198,
418,
13,
10057,
10786,
20063,
11537,
198,
4798,
7203,
59,
44427,
58,
16,
26,
2624,
76,
44435,
220,
220,
220,
220,
220,
... | 1.665766 | 3,701 |
"""Functions used repeatedly to improve the convenience and readability of source code are described here."""
from colors import color
from typing import Any
from typing import AnyStr
from typing import List
from typing import NoReturn
def colored_print(msg: Any, colorname: str, end='\n') -> NoReturn:
"""
Apply color to the print function.
Parameters:
msg (str): Message to print
color_name (str): Color
end (str) : Same as end parameter of print function
"""
print(color(msg, colorname), end=end)
def pretty_print(obj: Any) -> NoReturn:
"""
The object is processed according to the format and output.
Parameters:
obj(any): Any
"""
if equals(type(obj), dict, deep=True):
print('┌' + '―' * 15 + '┬' + '―' * 15 + '┐')
print('│ key │ value │')
_print()
elif equals(type(obj), list, deep=True) or equals(type(obj), tuple, deep=True) or equals(type(obj), set, deep=True):
obj = dict(zip([x for x in range(len(obj))], obj))
print('┌' + '―' * 15 + '┬' + '―' * 15 + '┐')
print('│ index │ value │')
_print()
def get_command_black_list() -> List[AnyStr]:
"""
Get a list of commands to exclude from execution.
Returns:
list[str]: List of commands to exclude from execution.
"""
return ['', 'dir']
def equals(obj1, obj2, *, deep=False) -> bool:
"""
Checks whether two input objects are the same and returns the result in Boolean.
Parameters:
obj1(object): Objects to compare for equality.
obj2(object): Objects to compare for equality.
deep(bool): Specifies the scope of the comparison.
The default value is false, and if true, compares two objects for equality,
and if false, compares the values of two objects for equality.
Returns:
bool: The comparison value of two objects.
"""
if not deep:
return True if obj1 == obj2 else False
else:
return True if obj1 is obj2 else False
if __name__ == "__main__":
pretty_print({'a': '1', 'b': '2', 'cdsmfefuiguh': 12348234723})
pretty_print([1111, 2434, 443, 43244, 235, 612233, 'asdjksdjfyj'])
pretty_print((1111, 2434, 443, 43244, 235, 612233, 'asdjksdjfyj'))
pretty_print({1111, 2434, 443, 43244, 235, 612233, 'asdjksdjfyj'})
| [
37811,
24629,
2733,
973,
7830,
284,
2987,
262,
15607,
290,
1100,
1799,
286,
2723,
2438,
389,
3417,
994,
526,
15931,
198,
198,
6738,
7577,
1330,
3124,
198,
6738,
19720,
1330,
4377,
198,
6738,
19720,
1330,
4377,
13290,
198,
6738,
19720,
1... | 2.406977 | 1,032 |
from model import *
from data import *
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from PIL import Image
from resizeimage import resizeimage
from skimage import color
from skimage import io
import cv2
from matplotlib import pyplot as plt
import numpy as np
import glob
from array import array
import statistics
from splitter import *
for filename in glob.glob('data/membrane/train/label/*.png'): #assuming gif
#cover.save(filename, im.format)
im = cv2.imread(filename)
ret,thresh1 = cv2.threshold(im,127,255,cv2.THRESH_BINARY)
cv2.imwrite(filename, thresh1)
for filename in glob.glob('data/membrane/train/image/*.png'): #assuming gif
#cover.save(filename, im.format)
im = cv2.imread(filename,0)
im = cv2.equalizeHist(im)
cv2.imwrite(filename, im)
for filename in glob.glob('data/membrane/test/*.png'): #assuming gif
#cover.save(filename, im.format)
im = cv2.imread(filename,0)
im = cv2.equalizeHist(im)
cv2.imwrite(filename, im)
"""upper is for contrast enhancement of images"""
data_gen_args = dict(rotation_range=0.6,
width_shift_range=0.07,
height_shift_range=0.07,
shear_range=0.09,
zoom_range=0.07,
horizontal_flip=True,
fill_mode='nearest')
target_size=(1024,1024)
myGene = trainGenerator(1,'data/membrane/train','image','label',data_gen_args,save_to_dir = 'data/membrane/train/aug',target_size=target_size)
model = unet()
model_checkpoint = ModelCheckpoint('unet_membrane.hdf5', monitor='loss',verbose=1, save_best_only=True)
model.fit_generator(myGene,steps_per_epoch=10000,epochs=4 ,callbacks=[model_checkpoint])
#predict using stored model
model.load_weights("unet_membrane.hdf5")
testGene = testGenerator("data/membrane/test",target_size=target_size)
results = model.predict_generator(testGene,23,verbose=1)
saveResult("data/membrane/test",results)
#black and white all predicted values
for filename in glob.glob('data/membrane/test/*_predict.png'): #assuming gif
#cover.save(filename, im.format)
im = cv2.imread(filename)
ret,thresh1 = cv2.threshold(im,127,255,cv2.THRESH_BINARY)
cv2.imwrite(filename, thresh1)
#measure lenght of path image
path="data/membrane/test/6"
left=array("i")
right=array("i")
image_in=cv2.imread(path+"_predict.png")
image_in=cv2.cvtColor(image_in,cv2.COLOR_BGR2GRAY)
cv2.imshow('image',image_in)
cv2.waitKey(0)
cv2.destroyWindow('image')
for i in range(image_in.shape[0]):
counter=0
counter2=0
for j in range(image_in.shape[1]):
if image_in[i,j] < 100:
if j>(image_in.shape[1])*.5 and j<(image_in.shape[1])*.75:
counter2 += 1#right pillar
elif j<(image_in.shape[1])*.5 and j>(image_in.shape[1])*.25:
counter += 1#left pillar
right.append(counter2)
left.append(counter)
elements = np.array(right)
mean = np.mean(elements, axis=0)
sd = np.std(elements, axis=0)
final_list_right = [x for x in right if (x > mean - 2 * sd)]
final_list_right = [x for x in final_list_right if (x < mean + 2 * sd)]
elements = np.array(left)
mean = np.mean(elements, axis=0)
sd = np.std(elements, axis=0)
final_list_left = [x for x in left if (x > mean - 2 * sd)]
final_list_left = [x for x in final_list_left if (x < mean + 2 * sd)]
#print(final_list_left,final_list_right)
print(np.mean(final_list_left)*.5,np.mean(final_list_right)*.5)
#display visual measurements
disp(path,target_size)
| [
6738,
2746,
1330,
1635,
198,
6738,
1366,
1330,
1635,
198,
198,
2,
418,
13,
268,
2268,
14692,
43633,
5631,
62,
29817,
34563,
62,
39345,
34444,
8973,
796,
366,
15,
1,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
47558,
9060,
1330,
47558... | 2.216249 | 1,637 |
__all__ = ['PythonModulesPanel']
from kivy.factory import Factory as F
from kivy.properties import StringProperty, ObjectProperty
from kivy.clock import Clock
from kivy.lang import Builder
from ncis_inspector.controller import ctl
from functools import partial
Builder.load_string('''
<PythonModuleEntry@ButtonBehavior+Label>:
callback: None
text_size: self.width - dp(20), None
on_release: root.callback()
<PythonModulesPanel>:
GridLayout:
cols: 1
GridLayout:
rows: 1
size_hint_y: None
height: dp(44)
Label:
text: "{} modules loaded".format(len(rv.data))
RecycleView:
id: rv
viewclass: "PythonModuleEntry"
RecycleBoxLayout:
id: bl
spacing: dp(4)
default_size: None, dp(44)
default_size_hint: 1, None
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
''')
| [
834,
439,
834,
796,
37250,
37906,
5841,
5028,
26639,
20520,
198,
198,
6738,
479,
452,
88,
13,
69,
9548,
1330,
19239,
355,
376,
198,
6738,
479,
452,
88,
13,
48310,
1330,
10903,
21746,
11,
9515,
21746,
198,
6738,
479,
452,
88,
13,
157... | 2.043564 | 505 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import signal
from .camera import make_camera
from .gstreamer import Display, run_gen
from .streaming.server import StreamingServer
from . import svg
EMPTY_SVG = str(svg.Svg())
| [
2,
15069,
13130,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 3.704225 | 213 |
# coding=utf-8
"""
create by pymu
on 2020/12/31
at 9:37
使用EQ开发示例:
"""
from sherry.view.activity.activity_welcome import WelcomeActivity
| [
2,
19617,
28,
40477,
12,
23,
198,
37811,
198,
220,
220,
220,
2251,
416,
279,
4948,
84,
198,
220,
220,
220,
319,
12131,
14,
1065,
14,
3132,
198,
220,
220,
220,
379,
860,
25,
2718,
198,
220,
220,
220,
220,
45635,
18796,
101,
36,
4... | 1.974359 | 78 |
import numpy as np
from numba import njit
from mchap.assemble.prior import log_genotype_prior
from mchap.jitutils import random_choice, normalise_log_probs
from .utils import allelic_dosage, count_allele
from .likelihood import log_likelihood_alleles, log_likelihood_alleles_cached
from .prior import log_genotype_allele_prior
@njit(cache=True)
def mh_options(
genotype_alleles,
variable_allele,
haplotypes,
reads,
read_counts,
inbreeding,
llks_array,
lpriors_array,
probabilities_array,
llk_cache=None,
):
"""Calculate transition probabilities for a Metropolis-Hastings step.
Parameters
----------
genotype_alleles : ndarray, int, shape (ploidy, )
Index of each haplotype in the genotype.
variable_allele : int
Index of allele that is variable in this step.
haplotypes : ndarray, int, shape (n_haplotypes, n_pos)
Integer encoded haplotypes.
reads : ndarray, float, shape (n_reads, n_pos, n_nucl)
Probabilistic reads.
read_counts : ndarray, int, shape (n_reads, )
Count of each read.
inbreeding : float
Expected inbreeding coefficient of sample.
llks_array : ndarray, float , shape (n_haplotypes, )
Array to be populated with log-likelihood of each step option.
lpriors_array : ndarray, float , shape (n_haplotypes, )
Array to be populated with log-prior of each step option.
probabilities_array : ndarray, float , shape (n_haplotypes, )
Array to be populated with transition probability of each step option.
llk_cache : dict
Cache of log-likelihoods mapping genotype index (int) to llk (float).
Returns
-------
None
Notes
-----
The `llks_array`, `lpriors_array`, and
`probabilities_array` arrays are updated in place.
"""
# save current allele
current_allele = genotype_alleles[variable_allele]
# stats of current genotype
n_alleles = len(haplotypes)
dosage = allelic_dosage(genotype_alleles)
allele_copies = count_allele(genotype_alleles, genotype_alleles[variable_allele])
lprior = log_genotype_prior(
dosage=dosage,
unique_haplotypes=n_alleles,
inbreeding=inbreeding,
)
llk = log_likelihood_alleles_cached(
reads=reads,
read_counts=read_counts,
haplotypes=haplotypes,
genotype_alleles=genotype_alleles,
cache=llk_cache,
)
# create array to hold proposal ratios for detailed balance
lproposals_array = np.empty(n_alleles)
# iterate over allele options
for a in range(n_alleles):
# handle case of current allele
if genotype_alleles[variable_allele] == a:
# proposed the same allele
lproposals_array[a] = 0.0
lpriors_array[a] = lprior
llks_array[a] = llk
# proposed new allele
else:
# set new allele
genotype_alleles[variable_allele] = a
# calculate prior
dosage_i = allelic_dosage(genotype_alleles)
lpriors_array[a] = log_genotype_prior(
dosage=dosage_i,
unique_haplotypes=n_alleles,
inbreeding=inbreeding,
)
# calculate likelihood
llks_array[a] = log_likelihood_alleles_cached(
reads=reads,
read_counts=read_counts,
haplotypes=haplotypes,
genotype_alleles=genotype_alleles,
cache=llk_cache,
)
# calculate proposal ratio = g(G|G') / g(G'|G)
allele_copies_i = count_allele(
genotype_alleles, genotype_alleles[variable_allele]
)
lproposals_array[a] = np.log(allele_copies_i / allele_copies)
# calculate transition ratios
mh_ratio = (llks_array - llk) + (lpriors_array - lprior) + lproposals_array
probabilities_array[:] = np.exp(np.minimum(0.0, mh_ratio))
# calculate probability of no step
probabilities_array[current_allele] = 0
probabilities_array /= n_alleles - 1
probabilities_array[current_allele] = 1 - probabilities_array.sum()
# reset current allele
genotype_alleles[variable_allele] = current_allele
return None
@njit(cache=True)
def gibbs_options(
genotype_alleles,
variable_allele,
haplotypes,
reads,
read_counts,
inbreeding,
llks_array,
lpriors_array,
probabilities_array,
llk_cache=None,
):
"""Calculate transition probabilities for a Gibbs step.
Parameters
----------
genotype_alleles : ndarray, int, shape (ploidy, )
Index of each haplotype in the genotype.
variable_allele : int
Index of allele that is variable in this step.
haplotypes : ndarray, int, shape (n_haplotypes, n_pos)
Integer encoded haplotypes.
reads : ndarray, float, shape (n_reads, n_pos, n_nucl)
Probabilistic reads.
read_counts : ndarray, int, shape (n_reads, )
Count of each read.
inbreeding : float
Expected inbreeding coefficient of sample.
llks_array : ndarray, float , shape (n_haplotypes, )
Array to be populated with log-likelihood of each step option.
lpriors_array : ndarray, float , shape (n_haplotypes, )
Array to be populated with log-prior of each step option.
probabilities_array : ndarray, float , shape (n_haplotypes, )
Array to be populated with transition probability of each step option.
llk_cache : dict
Cache of log-likelihoods mapping genotype index (int) to llk (float).
Returns
-------
None
Notes
-----
The `llks_array`, `lpriors_array`, and
`probabilities_array` arrays are updated in place.
"""
# save current allele
current_allele = genotype_alleles[variable_allele]
# iterate over allele options
n_alleles = len(haplotypes)
for a in range(n_alleles):
# set genotype allele
genotype_alleles[variable_allele] = a
# genotype prior
lpriors_array[a] = log_genotype_allele_prior(
genotype=genotype_alleles,
variable_allele=variable_allele,
unique_haplotypes=len(haplotypes),
inbreeding=inbreeding,
)
# genotype likelihood
llks_array[a] = log_likelihood_alleles_cached(
reads=reads,
read_counts=read_counts,
haplotypes=haplotypes,
genotype_alleles=genotype_alleles,
cache=llk_cache,
)
# gibbs step probabilities
probabilities_array[:] = normalise_log_probs(llks_array + lpriors_array)
# reset current allele
genotype_alleles[variable_allele] = current_allele
return None
@njit(cache=True)
def compound_step(
genotype_alleles,
haplotypes,
reads,
read_counts,
inbreeding,
llk_cache=None,
step_type=0,
):
"""MCMC sampler compound step for calling sample alleles from a set of known genotypes.
Parameters
----------
genotype_alleles : ndarray, int, shape (ploidy, )
Index of each haplotype in the genotype.
haplotypes : ndarray, int, shape (n_haplotypes, n_pos)
Integer encoded haplotypes.
reads : ndarray, float, shape (n_reads, n_pos, n_nucl)
Probabilistic reads.
read_counts : ndarray, int, shape (n_reads, )
Count of each read.
inbreeding : float
Expected inbreeding coefficient of sample.
llk_cache : dict
Cache of log-likelihoods mapping genotype index (int) to llk (float).
step_type : int
Step type with 0 for a Gibbs-step and 1 for a
Metropolis-Hastings step.
Returns
-------
llk : float
Log-likelihood of new genotype_alleles.
Notes
-----
The `genotype_alleles` array is updated in place. This is a compound step
in which samples each allele of the genotype in a random order.
"""
ploidy = len(genotype_alleles)
n_alleles = len(haplotypes)
# arrays for proposed genotype stats
lpriors = np.full(n_alleles, np.nan)
llks = np.full(n_alleles, np.nan)
probabilities = np.full(n_alleles, np.nan)
# random random order of haplotypes of genotype
order = np.arange(ploidy)
np.random.shuffle(order)
# iterate over every haplotype
for j in range(ploidy):
k = order[j]
# calculate transition probabilities
if step_type == 0:
gibbs_options(
genotype_alleles=genotype_alleles,
variable_allele=k,
haplotypes=haplotypes,
reads=reads,
read_counts=read_counts,
inbreeding=inbreeding,
llks_array=llks,
lpriors_array=lpriors,
probabilities_array=probabilities,
llk_cache=llk_cache,
)
elif step_type == 1:
mh_options(
genotype_alleles=genotype_alleles,
variable_allele=k,
haplotypes=haplotypes,
reads=reads,
read_counts=read_counts,
inbreeding=inbreeding,
llks_array=llks,
lpriors_array=lpriors,
probabilities_array=probabilities,
llk_cache=llk_cache,
)
else:
raise ValueError("Unknown MCMC step type.")
# make choice
choice = random_choice(probabilities)
# update genotype
genotype_alleles[k] = choice
# sort genotype and return llk of the final choice
genotype_alleles.sort()
return llks[choice]
@njit(cache=True)
def mcmc_sampler(
genotype_alleles,
haplotypes,
reads,
read_counts,
inbreeding,
n_steps=1000,
cache=False,
step_type=0,
):
"""MCMC simulation for calling sample alleles from a set of known genotypes.
Parameters
----------
genotype_alleles : ndarray, int, shape (ploidy, )
Index of each haplotype in the genotype.
haplotypes : ndarray, int, shape (n_haplotypes, n_pos)
Integer encoded haplotypes.
reads : ndarray, float, shape (n_reads, n_pos, n_nucl)
Probabilistic reads.
read_counts : ndarray, int, shape (n_reads, )
Count of each read.
inbreeding : float
Expected inbreeding coefficient of sample.
n_steps : int
Number of (compound) steps to simulate.
step_type : int
Step type with 0 for a Gibbs-step and 1 for a
Metropolis-Hastings step.
Returns
-------
genotype_alleles_trace : ndarray, int, shape (n_steps, ploidy)
Genotype alleles trace.
llk_trace : ndarray, float, shape (n_steps, )
Log-likelihood of new genotype_alleles.
"""
genotype_alleles = genotype_alleles.copy()
ploidy = len(genotype_alleles)
genotype_trace = np.empty((n_steps, ploidy), genotype_alleles.dtype)
llk_trace = np.empty(n_steps, np.float64)
if cache:
llk_cache = {}
llk_cache[-1] = np.nan
else:
llk_cache = None
for i in range(n_steps):
llk = compound_step(
genotype_alleles=genotype_alleles,
haplotypes=haplotypes,
reads=reads,
read_counts=read_counts,
inbreeding=inbreeding,
llk_cache=llk_cache,
step_type=step_type,
)
llk_trace[i] = llk
genotype_trace[i] = genotype_alleles.copy()
return genotype_trace, llk_trace
@njit(cache=True)
def greedy_caller(haplotypes, ploidy, reads, read_counts, inbreeding=0.0):
"""Greedy method for calling genotype from known haplotypes.
Parameters
----------
haplotypes : ndarray, int, shape (n_haplotypes, n_pos)
Integer encoded haplotypes.
ploidy : int
Ploidy of organism locus.
reads : ndarray, float, shape (n_reads, n_pos, n_nucl)
Probabilistic reads.
read_counts : ndarray, int, shape (n_reads, )
Count of each read.
inbreeding : float
Expected inbreeding coefficient of sample.
Returns
-------
genotype_alleles : ndarray, int, shape (ploidy, )
Index of each haplotype in the genotype.
"""
n_alleles = len(haplotypes)
previous_genotype = np.zeros(0, np.int32)
for i in range(ploidy):
# add new allele slot to genotype
k = i + 1
genotype = np.zeros(k, np.int32)
# copy alleles from previous loop
genotype[0:i] = previous_genotype[0:i]
best_lprob = -np.inf
best_allele = -1
for a in range(n_alleles):
genotype[i] = a
llk = log_likelihood_alleles(
reads=reads,
read_counts=read_counts,
haplotypes=haplotypes,
genotype_alleles=genotype,
)
lprior = log_genotype_prior(
dosage=allelic_dosage(genotype),
unique_haplotypes=len(haplotypes),
inbreeding=inbreeding,
)
lprob = llk + lprior
if lprob > best_lprob:
# update best
best_lprob = lprob
best_allele = a
# greedy choice
genotype[i] = best_allele
previous_genotype = genotype
genotype.sort()
return genotype
| [
11748,
299,
32152,
355,
45941,
198,
6738,
997,
7012,
1330,
299,
45051,
198,
198,
6738,
285,
354,
499,
13,
292,
15140,
13,
3448,
273,
1330,
2604,
62,
5235,
8690,
62,
3448,
273,
198,
6738,
285,
354,
499,
13,
45051,
26791,
1330,
4738,
... | 2.222241 | 6,016 |
from .draw import new_drawer
from .format import new_formatter
from .meta import DESCRIPTION as __doc__, VERSION as __version__
__all__ = ["new_drawer", "new_formatter"]
| [
6738,
764,
19334,
1330,
649,
62,
19334,
263,
198,
6738,
764,
18982,
1330,
649,
62,
687,
1436,
198,
6738,
764,
28961,
1330,
22196,
40165,
355,
11593,
15390,
834,
11,
44156,
2849,
355,
11593,
9641,
834,
198,
198,
834,
439,
834,
796,
146... | 3.166667 | 54 |
from numbers import Number
import numpy as onp
from numpy.testing import assert_allclose
import pytest
import scipy.special as osp_special
import scipy.stats as osp_stats
from jax import grad, jacobian, jit, lax, random, vmap
import jax.numpy as np
from jax.scipy.special import expit
from jax.util import partial
from numpyro.distributions.util import (
binary_cross_entropy_with_logits,
categorical,
cumprod,
cumsum,
multinomial,
standard_gamma,
vec_to_tril_matrix,
xlog1py,
xlogy
)
_zeros = partial(lax.full_like, fill_value=0)
@pytest.mark.parametrize('x, y', [
(np.array([1]), np.array([1, 2, 3])),
(np.array([0]), np.array([0, 0])),
(np.array([[0.], [0.]]), np.array([1., 2.])),
])
@pytest.mark.parametrize('jit_fn', [False, True])
@pytest.mark.parametrize('x, y, grad1, grad2', [
(np.array([1., 1., 1.]), np.array([1., 2., 3.]),
np.log(np.array([1, 2, 3])), np.array([1., 0.5, 1./3])),
(np.array([1.]), np.array([1., 2., 3.]),
np.sum(np.log(np.array([1, 2, 3]))), np.array([1., 0.5, 1./3])),
(np.array([1., 2., 3.]), np.array([2.]),
np.log(np.array([2., 2., 2.])), np.array([3.])),
(np.array([0.]), np.array([0, 0]),
np.array([-float('inf')]), np.array([0, 0])),
(np.array([[0.], [0.]]), np.array([1., 2.]),
np.array([[np.log(2.)], [np.log(2.)]]), np.array([0, 0])),
])
@pytest.mark.parametrize('x, y', [
(np.array([1]), np.array([0, 1, 2])),
(np.array([0]), np.array([-1, -1])),
(np.array([[0.], [0.]]), np.array([1., 2.])),
])
@pytest.mark.parametrize('jit_fn', [False, True])
@pytest.mark.parametrize('x, y, grad1, grad2', [
(np.array([1., 1., 1.]), np.array([0., 1., 2.]),
np.log(np.array([1, 2, 3])), np.array([1., 0.5, 1./3])),
(np.array([1., 1., 1.]), np.array([-1., 0., 1.]),
np.log(np.array([0, 1, 2])), np.array([float('inf'), 1., 0.5])),
(np.array([1.]), np.array([0., 1., 2.]),
np.sum(np.log(np.array([1, 2, 3]))), np.array([1., 0.5, 1./3])),
(np.array([1., 2., 3.]), np.array([1.]),
np.log(np.array([2., 2., 2.])), np.array([3.])),
(np.array([0.]), np.array([-1, -1]),
np.array([-float('inf')]), np.array([0, 0])),
(np.array([[0.], [0.]]), np.array([1., 2.]),
np.array([[np.log(6.)], [np.log(6.)]]), np.array([0, 0])),
])
@pytest.mark.parametrize('x, y', [
(0.2, 10.),
(0.6, -10.),
])
@pytest.mark.parametrize('shape', [
(3,),
(5, 3),
])
@pytest.mark.parametrize('shape', [
(3,),
(5, 3),
])
@pytest.mark.parametrize('alpha, shape', [
(1., ()),
(1., (2,)),
(np.array([1., 2.]), ()),
(np.array([1., 2.]), (3, 2)),
])
@pytest.mark.parametrize("alpha", [0.6, 2., 10.])
@pytest.mark.parametrize("alpha", [1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4])
@pytest.mark.parametrize('prim', [
xlogy,
xlog1py,
])
@pytest.mark.parametrize('prim', [
cumsum,
cumprod,
])
@pytest.mark.parametrize('p, shape', [
(np.array([0.1, 0.9]), ()),
(np.array([0.2, 0.8]), (2,)),
(np.array([[0.1, 0.9], [0.2, 0.8]]), ()),
(np.array([[0.1, 0.9], [0.2, 0.8]]), (3, 2)),
])
@pytest.mark.parametrize("p", [
np.array([0.2, 0.3, 0.5]),
np.array([0.8, 0.1, 0.1]),
])
@pytest.mark.parametrize('p, shape', [
(np.array([0.1, 0.9]), ()),
(np.array([0.2, 0.8]), (2,)),
(np.array([[0.1, 0.9], [0.2, 0.8]]), ()),
(np.array([[0.1, 0.9], [0.2, 0.8]]), (3, 2)),
])
@pytest.mark.parametrize("p", [
np.array([0.2, 0.3, 0.5]),
np.array([0.8, 0.1, 0.1]),
])
@pytest.mark.parametrize("n", [
10000,
np.array([10000, 20000]),
])
@pytest.mark.parametrize("shape", [
(6,),
(5, 10),
(3, 4, 3),
])
@pytest.mark.parametrize("diagonal", [
0,
-1,
-2,
])
| [
6738,
3146,
1330,
7913,
198,
198,
11748,
299,
32152,
355,
319,
79,
198,
6738,
299,
32152,
13,
33407,
1330,
6818,
62,
439,
19836,
198,
11748,
12972,
9288,
198,
11748,
629,
541,
88,
13,
20887,
355,
267,
2777,
62,
20887,
198,
11748,
629,... | 1.929563 | 1,945 |
from setuptools import setup
with open('README.md', 'r') as f:
readme = f.read()
setup(
name='s3_uri_to_url',
description='s3_uri_to_url - Convert S3 URI to URL',
long_description=readme,
long_description_content_type='text/markdown',
version='0.1.2',
author='Daniel Ron',
author_email='dron@alum.mit.edu',
url='https://github.com/its-dron/s3-uri-to-url',
packages=['s3_uri_to_url'],
install_requires=[
'click'
],
include_package_data=True,
python_requires=">=3.6.*",
license='Apache 2.0',
zip_safe=False,
entry_points={
'console_scripts': ['s3-uri-to-url=s3_uri_to_url.main:uri2url'],
}
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
4480,
1280,
10786,
15675,
11682,
13,
9132,
3256,
705,
81,
11537,
355,
277,
25,
198,
220,
220,
220,
1100,
1326,
796,
277,
13,
961,
3419,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
116... | 2.179487 | 312 |
#
# compile_usx_test_py.py [version 1.0]
#
# This is script which compiles Python interface to
# test of usleep calls (usx_test_py.cpp).
#
# read more on: www.orange-engineer.com
#
# (c) Jacek Pierzchlewski, 2017 jacek@pierzchlewski.com
# license: BSD-2-Clause.
#
from distutils.core import setup, Extension
usx_test = Extension('usxTest', sources=['usx_test_py.cpp'])
setup(name='usx_test', version='1.0',
description='Python interface to usleep() test',
ext_modules=[usx_test])
| [
2,
198,
2,
17632,
62,
385,
87,
62,
9288,
62,
9078,
13,
9078,
685,
9641,
352,
13,
15,
60,
198,
2,
198,
2,
770,
318,
4226,
543,
552,
2915,
11361,
7071,
284,
198,
2,
1332,
286,
514,
8892,
3848,
357,
385,
87,
62,
9288,
62,
9078,
... | 2.546392 | 194 |
# Copyright (C) 2017-2020 Trent Houliston <trent@houliston.me>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import hashlib
import io
import os
import warnings
import cv2
import matplotlib as mpl
import numpy as np
import tensorflow as tf
mpl.use("Agg")
import matplotlib.pyplot as plt # isort:skip
| [
2,
15069,
357,
34,
8,
2177,
12,
42334,
24269,
367,
2852,
36363,
1279,
83,
1156,
31,
71,
2852,
36363,
13,
1326,
29,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
286,
428,
... | 3.699717 | 353 |
# Generated by Django 3.1.4 on 2020-12-14 17:44
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
19,
319,
12131,
12,
1065,
12,
1415,
1596,
25,
2598,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
"""
Initialization file for search library module.
Note that searches are on the local DB. To call the Twitter Search API,
see lib/twitter/search.py script.
"""
| [
37811,
198,
24243,
1634,
2393,
329,
2989,
5888,
8265,
13,
198,
198,
6425,
326,
15455,
389,
319,
262,
1957,
20137,
13,
1675,
869,
262,
3009,
11140,
7824,
11,
198,
3826,
9195,
14,
6956,
14,
12947,
13,
9078,
4226,
13,
198,
37811,
198
] | 3.857143 | 42 |
import os
import subprocess
from termcolor import cprint as print
from ...tools.hooksmanager import hooksManager
from ...tools.configuration import ConfigurationTool
from ...tools.spinner import Spinner
from .ideploymentaction import IDeploymentAction
from ...enums import EnvironmentType, HookTypes
class ZappaDeployment(IDeploymentAction):
""" Deployment through Zappa
"""
@classmethod
def shouldExecute(cls):
""" Check if the Action should be deployed
"""
# Check if there is an zappa folder
return os.path.exists("zappa_settings.json")
def start(self, environType, **kwargs):
""" Start the deployment
"""
# Get the configs
ConfigurationTool.readConfig()
# Check if there is an template
if os.path.exists("zappa_settings.json") is False:
raise Exception("Cant find Zappa template: 'zappa_settings.json'")
# Remember the environment type for later
self.environment = environType
# Check if it is the production environment
if self.environment == EnvironmentType.PRODUCTION:
# Check of we een stagename override hebben
if ConfigurationTool.getExtraConfig("prod-zappa-stage") is not None:
stageName = ConfigurationTool.getExtraConfig("prod-zappa-stage")
else:
stageName = "production"
else:
# Check of we een stagename override hebben
if ConfigurationTool.getExtraConfig("dev-zappa-stage") is not None:
stageName = ConfigurationTool.getExtraConfig("dev-zappa-stage")
else:
stageName = "dev"
# Check of het een array is
if isinstance(stageName, list) is True:
for sName in stageName:
# Deploy the stage
self._deployStage(sName)
else:
# Deploy the stage
self._deployStage(stageName)
def _deployStage(self, stageName):
""" Run the zappa command
"""
# Execute the PRE_COMPILE hooks
hooksManager.executeHooks(self.environment, HookTypes.PRE_COMPILE)
# Create a temporary directory
with Spinner(text="Creating environment") as spinner:
self._createTempDir()
spinner.succeed()
# Execute the POST_COMPILE hooks
hooksManager.executeHooks(self.environment, HookTypes.POST_COMPILE)
# Execute the PRE_INSTALL hooks
hooksManager.executeHooks(self.environment, HookTypes.PRE_INSTALL)
# Check the dependencies
with Spinner(text="Installing dependencies") as spinner:
self._checkDependencies()
spinner.succeed()
# Execute the POST_INSTALL hooks
hooksManager.executeHooks(self.environment, HookTypes.POST_INSTALL)
# Execute the PRE_DEPLOYMENT hooks
hooksManager.executeHooks(self.environment, HookTypes.PRE_DEPLOYMENT)
# Zapping it
with Spinner(text="Zappa-ing: %s" % stageName) as spinner:
# Find how to run zappa
zappaCommand = self._findZappa()
# Check if we could find a zappa
if zappaCommand is None:
raise Exception("Could not find Zappa. Make sure it is installed.")
# Run the deployment command
try:
subprocess.check_output("cd %s && %s deploy %s" % (self.location, zappaCommand, stageName), stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
# Check if it was already deployed
if b"did you mean to call update" in e.output:
# Run the update command
try:
subprocess.check_output("cd %s && %s update %s" % (self.location, zappaCommand, stageName), stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
print(e.output, "red")
return False
else:
print("An error occured", "red")
return False
finally:
# Execute the POST_DEPLOYMENT hooks
hooksManager.executeHooks(self.environment, HookTypes.POST_DEPLOYMENT)
def _findZappa(self):
""" Search for Zappa
"""
# Check if zappa is available as a root level
try:
subprocess.check_output("zappa", shell=True)
except subprocess.CalledProcessError:
pass
else:
return "zappa"
# Check if poetry is available
if os.path.exists("poetry.lock") is True:
# Try zappa in poetry
try:
subprocess.check_output("poetry run zappa", shell=True)
except subprocess.CalledProcessError:
pass
else:
return "poetry run zappa"
return None
| [
11748,
28686,
198,
11748,
850,
14681,
198,
198,
6738,
3381,
8043,
1330,
269,
4798,
355,
3601,
198,
198,
6738,
2644,
31391,
13,
25480,
82,
37153,
1330,
26569,
13511,
198,
6738,
2644,
31391,
13,
11250,
3924,
1330,
28373,
25391,
198,
6738,
... | 2.829553 | 1,455 |
# -*- coding: utf-8 -*-
from setuptools import setup
from io import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="migrate-anything",
entry_points={"console_scripts": ["migrate-anything = migrate_anything.main:main"]},
version="0.1.6",
description="Helps manage migrations for databases and anything else",
long_description=long_description,
long_description_content_type="text/x-rst",
url="https://github.com/Lieturd/migrate-anything",
author="Lieturd OÜ",
author_email="janne@lieturd.com",
packages=["migrate_anything", "migrate_anything.storage"],
keywords="migrate database db release",
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
project_urls={
"Bug Reports": "https://github.com/Lieturd/migrate-anything/issues",
"Source": "https://github.com/Lieturd/migrate-anything/",
},
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
6738,
33245,
1330,
1280,
198,
6738,
28686,
1330,
3108,
198,
198,
1456,
796,
3108,
13,
397,
2777,
776,
7,
6978,
13,
15908,
3672,... | 2.587891 | 512 |
from MinkowskiEngine import SparseTensor
import numpy as np
import torch
from configs.common import DEBUG_CFG, DIM_PARSE
#OBJ_REP = DIM_PARSE.OBJ_REP
from tools.debug_utils import _show_3d_points_bboxes_ls, _show_lines_ls_points_ls
from tools.visual_utils import _show_3d_points_objs_ls, _show_objs_ls_points_ls
def update_img_shape_for_pcl(x, img_meta, point_strides):
'''
called in single_stage.py
'''
img_meta['feat_sizes'] = [np.array( [*xi.size()[2:]] ) for xi in x]
img_meta['pad_shape'] = img_meta['feat_sizes'][0] * point_strides[0]
img_meta['img_shape'] = img_meta['pad_shape']
if 0:
for e in ['raw_dynamic_vox_size', 'dynamic_vox_size_aug', 'dynamic_img_shape', 'pad_shape', 'img_shape']:
print(f'{e}: {img_meta[e]}')
pass
def get_pcl_topview(sinput, gt_bboxes):
'''
9 channels: [color, normal, coords]
'''
sinput.F[:,6:] = 1
dense_t , _, _ = sinput.dense()
zdim = dense_t.shape[-1]
bev_d = dense_t.mean(-1)
bev_d = bev_d[:, :7, ...]
batch_size = bev_d.shape[0]
#bev_d = bev_d.permute(0,1,3,2)
h, w = bev_d.shape[2:]
grid_y, grid_x = torch.meshgrid( torch.arange(h), torch.arange(w) )
bev_coords_base = torch.cat([grid_y[:,:,None], grid_x[:,:,None]], dim=2).view(-1, 2).int()
bev_coords = []
for i in range(batch_size):
batch_inds = (torch.ones(h*w,1)*i).int()
third_inds = (torch.ones(h*w,1)*0).int()
bev_coords_i = torch.cat([ batch_inds, bev_coords_base, third_inds ], dim=1)
bev_coords.append(bev_coords_i)
bev_coords = torch.cat(bev_coords, dim=0)
bev_sfeat = bev_d.permute(0,2,3, 1).reshape(-1, bev_d.shape[1])
mask = bev_sfeat[:,-1] > 1e-5
bev_coords = bev_coords[mask]
bev_sfeat = bev_sfeat[mask]
bev_sfeat = bev_sfeat[:, 3:][:, [3,0,1,2]]
bev_sparse = SparseTensor(bev_sfeat, bev_coords)
if 0:
for i in range(batch_size):
bev_i = bev_d[i]
bev_i = bev_i.permute(2,1,0)
lines2d = gt_bboxes[i].cpu().data.numpy()
density = bev_i[..., -1].cpu().data.numpy()
color = bev_i[..., :3].cpu().data.numpy()
normal = bev_i[..., 3:6].cpu().data.numpy()
_show_lines_ls_points_ls( density, [lines2d] )
_show_lines_ls_points_ls( color, [lines2d] )
_show_lines_ls_points_ls( normal, [lines2d] )
pass
return bev_sparse
| [
6738,
337,
676,
12079,
13798,
1330,
1338,
17208,
51,
22854,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
198,
6738,
4566,
82,
13,
11321,
1330,
16959,
62,
22495,
38,
11,
360,
3955,
62,
27082,
5188,
198,
2,
9864,
41,
62,
... | 2.061096 | 1,113 |
import sqlite3
conn = sqlite3.connect('databasesql.db')
cursor = conn.cursor()
| [
11748,
44161,
578,
18,
198,
198,
37043,
796,
44161,
578,
18,
13,
8443,
10786,
19608,
18826,
13976,
13,
9945,
11537,
198,
66,
21471,
796,
48260,
13,
66,
21471,
3419,
628
] | 2.7 | 30 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import cv_bridge
from jsk_topic_tools import ConnectionBasedTransport
import rospy
from sensor_msgs.msg import Image
if __name__ == '__main__':
rospy.init_node('image_to_label')
img2label = ImageToLabel()
rospy.spin()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
269,
85,
62,
9458,
198,
6738,
474,
8135,
62,
26652,
62,
31391,
1... | 2.542373 | 118 |
'''
lec 6
'''
for letter in ['a', 'b', 'c']:
print(letter)
demo_str = 'this is my string'
for c in demo_str:
print(c)
for word in demo_str.split():
print(word)
for i in range(5):
print(i)
for i in range(1,5):
print(i)
for i in range(1,5,2):
print(i)
num_list = [1,12,3,1]
max_item = num_list[0]
for num in num_list:
if max_item <= num:
max_item = num
print(max_item) | [
7061,
6,
198,
293,
66,
718,
198,
7061,
6,
198,
198,
1640,
3850,
287,
37250,
64,
3256,
705,
65,
3256,
705,
66,
6,
5974,
198,
220,
220,
220,
198,
220,
220,
220,
3601,
7,
9291,
8,
198,
220,
220,
220,
220,
198,
9536,
78,
62,
2536,... | 1.814961 | 254 |
from hdbscan import HDBSCAN
import sklearn.datasets as datasets
from plot import bokeh_plot_2d, plot_parallel_coordinates
digits = datasets.load_digits()
data = digits.data
hdb = HDBSCAN(min_cluster_size=15).fit(data)
bokeh_plot_2d(data, labels=hdb.labels_, probabilities=hdb.probabilities_, algorithm='pca')
#plot_parallel_coordinates(data, labels=hdb.labels_, n_components=10)
| [
6738,
289,
9945,
35836,
1330,
5572,
4462,
44565,
198,
11748,
1341,
35720,
13,
19608,
292,
1039,
355,
40522,
198,
198,
6738,
7110,
1330,
1489,
365,
71,
62,
29487,
62,
17,
67,
11,
7110,
62,
1845,
29363,
62,
37652,
17540,
198,
198,
12894... | 2.652778 | 144 |
from submission import Submission | [
6738,
14498,
1330,
42641
] | 8.25 | 4 |
execfile('izhiGUI.py')
| [
18558,
7753,
10786,
528,
5303,
40156,
13,
9078,
11537,
198
] | 2.3 | 10 |
from flask import Flask, request
from werkzeug import Response
from examples import logsetup
from flaat.flask import Flaat
from flaat.requirements import HasAARCEntitlement, HasGroup, ValidLogin
logger = logsetup.setup_logging()
##########
## Basic config
# FLASK
app = Flask(__name__)
# FLAAT
flaat = Flaat()
flaat.set_cache_lifetime(120) # seconds; default is 300
flaat.set_trusted_OP_list(
[
"https://aai-demo.egi.eu/oidc",
"https://aai-dev.egi.eu/oidc",
"https://aai.egi.eu/oidc/",
"https://accounts.google.com/",
"https://b2access-integration.fz-juelich.de/oauth2",
"https://b2access.eudat.eu/oauth2/",
"https://iam-test.indigo-datacloud.eu/",
"https://iam.deep-hybrid-datacloud.eu/",
"https://iam.extreme-datacloud.eu/",
"https://login-dev.helmholtz.de/oauth2/",
"https://login.elixir-czech.org/oidc/",
"https://login.helmholtz-data-federation.de/oauth2/",
"https://login.helmholtz.de/oauth2/",
"https://oidc.scc.kit.edu/auth/realms/kit/",
"https://orcid.org/",
"https://proxy.demo.eduteams.org",
"https://services.humanbrainproject.eu/oidc/",
"https://unity.eudat-aai.fz-juelich.de/oauth2/",
"https://unity.helmholtz-data-federation.de/oauth2/",
"https://wlcg.cloud.cnaf.infn.it/",
]
)
# flaat.set_trusted_OP_file('/etc/oidc-agent/issuer.config')
# flaat.set_OP_hint("helmholtz")
# flaat.set_OP_hint("google")
flaat.set_timeout(3)
# verbosity:
# 0: No output
# 1: Errors
# 2: More info, including token info
# 3: Max
# flaat.set_verbosity(0)
# flaat.set_verify_tls(True)
# # Required for using token introspection endpoint:
# flaat.set_client_id('')
# flaat.set_client_secret('')
@app.route("/")
@app.route("/info")
@flaat.inject_user_infos
@app.route("/valid_user/<int:id>", methods=["POST", "GET"])
@flaat.requires(ValidLogin())
@app.route("/valid_user")
@flaat.requires(ValidLogin())
@app.route("/valid_user_2")
@flaat.requires(ValidLogin(), on_failure=my_failure_callback)
@app.route("/group_test_kit")
@flaat.requires(
HasGroup(
required=["admins@kit.edu", "employee@kit.edu", "member@kit.edu"],
claim="eduperson_scoped_affiliation",
match=2,
),
on_failure=my_failure_callback,
)
@app.route("/group_test_iam")
@flaat.requires(HasGroup("KIT-Cloud", "groups"))
@app.route("/group_test_hdf")
@flaat.requires(
HasAARCEntitlement(
required=[
"urn:geant:h-df.de:group:m-team:feudal-developers",
"urn:geant:h-df.de:group:MyExampleColab#unity.helmholtz.de",
],
claim="eduperson_entitlement",
match="all",
)
)
@app.route("/group_test_hdf2")
@flaat.requires(
HasAARCEntitlement(
"urn:geant:h-df.de:group:MyExampleColab", "eduperson_entitlement"
)
)
@app.route("/group_test_hdf3")
@flaat.requires(
HasAARCEntitlement(
[
"urn:geant:h-df.de:group:MyExampleColab",
"urn:geant:h-df.de:group:m-team:feudal-developers",
],
"eduperson_entitlement",
)
)
##########
# Main
if __name__ == "__main__":
# app.run(host="127.0.0.1", port=8081, debug=True)
app.run(host="0.0.0.0", port=8081, debug=True)
| [
6738,
42903,
1330,
46947,
11,
2581,
198,
6738,
266,
9587,
2736,
1018,
1330,
18261,
198,
198,
6738,
6096,
1330,
2604,
40406,
198,
6738,
781,
64,
265,
13,
2704,
2093,
1330,
22026,
265,
198,
6738,
781,
64,
265,
13,
8897,
18883,
1330,
787... | 2.074495 | 1,584 |
import argparse
from cefevent import CEFSender, CEFEvent
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='CEF builder and replayer')
parser.add_argument('files', metavar='DEFINITION_FILE', type=str,
nargs='+', help='an file containing event definitions')
parser.add_argument('--host', type=str, help='Syslog destination address')
parser.add_argument('--port', type=int, help='Syslog destination port')
parser.add_argument('--auto_send', action='store_true',
help='Auto send logs')
parser.add_argument('--eps', type=int, default=100, help='Max EPS')
args = parser.parse_args()
cs = CEFSender(host=args.host, port=args.port, files=args.files)
if args.auto_send:
cs.auto_send_log(args.eps)
| [
11748,
1822,
29572,
198,
6738,
269,
891,
15596,
1330,
18671,
10652,
2194,
11,
18671,
37,
9237,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
30751,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
... | 2.603226 | 310 |
# Copyright (c) 2019, Moritz E. Beber.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Expect that namespaces function as designed."""
import re
import pytest
from sqlalchemy.exc import IntegrityError
from cobra_component_models.orm import Namespace
@pytest.mark.parametrize(
"attributes",
[
{
"miriam_id": "MIR:00000258",
"prefix": "combine.specifications",
"pattern": r"^\w+(\-|\.|\w)*$",
},
{
"id": 22,
"miriam_id": "MIR:00000258",
"prefix": "combine.specifications",
"pattern": r"^\w+(\-|\.|\w)*$",
},
{
"miriam_id": "MIR:00000258",
"prefix": "combine.specifications",
"pattern": r"^\w+(\-|\.|\w)*$",
"embedded_prefix": True,
},
{
"miriam_id": "MIR:00000258",
"prefix": "combine.specifications",
"pattern": r"^\w+(\-|\.|\w)*$",
"name": "COMBINE specifications",
},
{
"miriam_id": "MIR:00000258",
"prefix": "combine.specifications",
"pattern": r"^\w+(\-|\.|\w)*$",
"description": "The 'COmputational Modeling in BIology' NEtwork (COMBINE) is an initiative to coordinate the development of the various community standards and formats for computational models, initially in Systems Biology and related fields. This collection pertains to specifications of the standard formats developed by the Computational Modeling in Biology Network.", # noqa: E501
},
],
)
def test_init(attributes):
"""Expect that an object can be instantiated with the right attributes."""
instance = Namespace(**attributes)
for attr, value in attributes.items():
assert getattr(instance, attr) == value
@pytest.mark.parametrize(
"identifier",
[
pytest.param(None, marks=pytest.mark.raises(exception=TypeError)),
pytest.param("MIR:1234567", marks=pytest.mark.raises(exception=ValueError)),
pytest.param("MIR:123456789", marks=pytest.mark.raises(exception=ValueError)),
],
)
def test_miriam_constraints(session, identifier):
"""Expect that the MIRIAM identifier is validated according to the pattern."""
Namespace(
miriam_id=identifier,
prefix="combine.specifications",
pattern=r"^\w+(\-|\.|\w)*$",
)
def test_unique_miriam_id(session):
"""Expect that the same MIRIAM ID cannot be added twice to the same database."""
ns_1 = Namespace(miriam_id="MIR:00000258", prefix="combine.spec", pattern="pattern")
session.add(ns_1)
session.commit()
ns_2 = Namespace(
miriam_id="MIR:00000258", prefix="combine.specifications", pattern="pattern"
)
session.add(ns_2)
with pytest.raises(IntegrityError):
session.commit()
def test_prefix_not_null(session):
"""Expect that the prefix cannot be null."""
instance = Namespace(miriam_id="MIR:00000258", pattern=r"^\w+(\-|\.|\w)*$")
session.add(instance)
with pytest.raises(IntegrityError):
session.commit()
@pytest.mark.xfail(
reason="Length restriction is not implemented for in-memory SQLite.", strict=True
)
def test_prefix_cut_off(session):
"""Expect that the prefix has a maximum length of 22 characters."""
prefix = "What a crazy long prefix. This is not allowed."
instance = Namespace(
prefix=prefix, miriam_id="MIR:00000258", pattern=r"^\w+(\-|\.|\w)*$"
)
session.add(instance)
# FIXME: Maybe this is an integrity error instead.
session.commit()
loaded = session.query(Namespace).first()
assert loaded.prefix == prefix[:22]
def test_unique_prefix(session):
"""Expect that the same prefix cannot be added twice to the same database."""
ns_1 = Namespace(
miriam_id="MIR:00000258", prefix="combine.specifications", pattern="pattern"
)
session.add(ns_1)
session.commit()
ns_2 = Namespace(
miriam_id="MIR:00000259", prefix="combine.specifications", pattern="pattern"
)
session.add(ns_2)
with pytest.raises(IntegrityError):
session.commit()
def test_pattern_not_null():
"""Expect that the pattern cannot be null."""
with pytest.raises(TypeError):
Namespace(miriam_id="MIR:00000258", prefix="combine.specifications")
def test_pattern_is_compiled():
"""Expect that the pattern is compiled to a regex object."""
instance = Namespace(
miriam_id="MIR:00000258",
prefix="combine.specifications",
pattern=r"^(foo|bar)$",
)
assert isinstance(instance.compiled_pattern, re.Pattern)
assert instance.compiled_pattern.match("foo") is not None
def test_pattern_is_compiled_on_load(session):
"""Expect that the pattern is compiled on load."""
instance = Namespace(
miriam_id="MIR:00000258",
prefix="combine.specifications",
pattern=r"^(foo|bar)$",
)
session.add(instance)
session.commit()
instance = session.query(Namespace).first()
assert isinstance(instance.compiled_pattern, re.Pattern)
assert instance.compiled_pattern.match("foo") is not None
def test_embedded_prefix_default(session):
"""Expect that the default value is false."""
instance = Namespace(
miriam_id="MIR:00000258",
prefix="combine.specifications",
pattern=r"^\w+(\-|\.|\w)*$",
)
session.add(instance)
session.commit()
assert instance.embedded_prefix is False
def test_get_map(session, namespaces):
"""Expect that the namespace map contains all elements."""
mapping = Namespace.get_map(session)
assert mapping == namespaces
def test_get_partial_map(session, namespaces):
"""Expect that the namespace map contains only specified elements."""
prefixes = ["go", "chebi"]
mapping = Namespace.get_map(session, prefixes)
assert len(mapping) == len(prefixes)
for prefix in prefixes:
assert mapping[prefix] == namespaces[prefix]
| [
2,
15069,
357,
66,
8,
13130,
11,
3461,
4224,
412,
13,
1355,
527,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
3... | 2.581912 | 2,521 |
"""Check if Stieltjes method, both analytical and discretized works as expected."""
import numpy
import numpoly
import chaospy
def test_analytical_stieltjes(analytical_distribution):
"""Assert that Analytical Stieltjes produces orthogonality."""
coeffs, [orth], norms = chaospy.analytical_stieltjes(
order=4, dist=analytical_distribution)
assert orth[0] == 1
assert numpy.allclose(chaospy.E(orth[1:], analytical_distribution), 0)
covariance = chaospy.E(
numpoly.outer(orth[1:], orth[1:]), analytical_distribution)
assert numpy.allclose(numpy.diag(numpy.diag(covariance)), covariance)
assert numpy.allclose(numpoly.lead_coefficient(orth), 1)
def test_stieltjes_compared(analytical_distribution):
"""Assert that discretized and analytical approach are equivalent."""
(alpha0, beta0), [orth0], norms0 = chaospy.analytical_stieltjes(
order=3, dist=analytical_distribution)
(alpha1, beta1), [orth1], norms1 = chaospy.discretized_stieltjes(
order=3, dist=analytical_distribution)
assert numpy.allclose(alpha0, alpha1)
assert numpy.allclose(beta0, beta1)
assert numpy.allclose(orth0.coefficients, orth1.coefficients)
assert numpy.allclose(norms0, norms1)
| [
37811,
9787,
611,
520,
72,
2120,
73,
274,
2446,
11,
1111,
30063,
290,
1221,
1186,
1143,
2499,
355,
2938,
526,
15931,
198,
11748,
299,
32152,
198,
11748,
299,
931,
3366,
198,
11748,
17792,
2117,
88,
628,
198,
4299,
1332,
62,
38200,
228... | 2.599581 | 477 |
import numpy as np
import prob as pr
import bhvs as bv
import analysis as alys
# P = bv.ThreePDstrb()
P = bv.FourPDstrb()
P = pr.marginal(P, 3)
print('Initial distr:')
pr.PrintThreePDstrb(P)
print() # new line
# compute `iter` steps towards the uniform distribution
iter = 100
uniform = bv.ThreePNoise2_()
alys.testInfoAlongPath(P, uniform, iter=iter) | [
11748,
299,
32152,
355,
45941,
198,
11748,
1861,
355,
778,
198,
11748,
275,
71,
14259,
355,
275,
85,
198,
11748,
3781,
355,
257,
27385,
198,
198,
2,
350,
796,
275,
85,
13,
12510,
5760,
2536,
65,
3419,
198,
47,
796,
275,
85,
13,
15... | 2.681818 | 132 |
import tensorflow as tf
from active_learning_ts.query_selection.query_sampler import QuerySampler
| [
11748,
11192,
273,
11125,
355,
48700,
198,
198,
6738,
4075,
62,
40684,
62,
912,
13,
22766,
62,
49283,
13,
22766,
62,
37687,
20053,
1330,
43301,
16305,
20053,
628
] | 3.571429 | 28 |
#!/usr/bin/env python
"""
<Program Name>
test_util.py
<Author>
Konstantin Andrianov.
<Started>
February 1, 2013.
<Copyright>
See LICENSE for licensing information.
<Purpose>
Unit test for 'util.py'
"""
# Help with Python 3 compatibility, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import sys
import gzip
import shutil
import logging
import tempfile
import unittest
import tuf
import tuf.log
import tuf.hash
import tuf.util
import tuf.unittest_toolbox as unittest_toolbox
import tuf._vendor.six as six
logger = logging.getLogger('tuf.test_util')
# Run unit test.
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
27,
15167,
6530,
29,
198,
220,
1332,
62,
22602,
13,
9078,
198,
198,
27,
13838,
29,
198,
220,
17431,
18797,
259,
843,
4484,
709,
13,
198,
198,
27,
10434,
276,
29,
1... | 3.157718 | 298 |
# ------------------------------------------------------------------------------
# test_database.py - Database modul tests
#
# November 2015, Phil Connell
# ------------------------------------------------------------------------------
"""Database tests."""
__all__ = ()
import unittest
from .. import database
class DatabaseTest(unittest.TestCase):
"""Tests for the Database class."""
def test_get_target(self):
"""Test the get_target method."""
foo_a = self._db.get_target("foo")
self.assertEqual(foo_a.name, "foo")
foo_b = self._db.get_target("foo")
self.assertIs(foo_a, foo_b)
def test_repr(self):
"""Test that databases repr correctly."""
self._db.get_target("foo")
self._db.get_target("bar")
self.assertEqual(repr(self._db), "Database(2 targets)")
def test_find_targets(self):
"""Test the find_targets method."""
self._db.get_target("foo")
self._db.get_target("foo1")
self._db.get_target("foo2")
self._db.get_target("foo-bar")
self._db.get_target("<f>bar")
check_find("foo", ["foo", "foo1", "foo2", "foo-bar"])
check_find("foo\d", ["foo1", "foo2"])
check_find("f.*bar", ["foo-bar", "<f>bar"])
class TargetTest(unittest.TestCase):
"""Tests for the Target class."""
def test_repr(self):
"""Test that targets repr correctly."""
self.assertEqual(repr(self.first), "Target(abc)")
def test_add_dependency(self):
"""Test the add_dependency method."""
self.first.add_dependency(self.third)
self.first.add_dependency(self.second)
self.third.add_dependency(self.second)
# Ordering is maintained!
self._check_deps(self.first, [self.third, self.second])
self._check_deps(self.second, [])
self._check_deps(self.third, [self.second])
self._check_deps_rev(self.first, set())
self._check_deps_rev(self.second, {self.first, self.third})
self._check_deps_rev(self.third, {self.first})
def test_add_inclusion(self):
"""Test the add_inclusion method."""
self.first.add_inclusion(self.third)
self.first.add_inclusion(self.second)
self.third.add_inclusion(self.second)
# Ordering is maintained!
self._check_incs(self.first, [self.third, self.second])
self._check_incs(self.second, [])
self._check_incs(self.third, [self.second])
self._check_incs_rev(self.first, set())
self._check_incs_rev(self.second, {self.first, self.third})
self._check_incs_rev(self.third, {self.first})
def test_eq_hash(self):
"""Test target equality and hashing."""
x = database.Target("foo")
y = database.Target("foo")
self.assertIsNot(x, y)
self.assertEqual(x, y)
self.assertEqual(hash(x), hash(y))
z = database.Target("foo!")
self.assertNotEqual(x, z)
self.assertNotEqual(hash(x), hash(z))
def test_filename(self):
"""Test the filename method."""
tgt = database.Target("<grist nonsense>this_is-the_filename.abc")
self.assertEqual(tgt.filename(), "this_is-the_filename.abc")
def test_grist(self):
"""Test the grist method."""
tgt = database.Target("<grist!nonsense>this_is-the_filename.abc")
self.assertEqual(tgt.grist(), "<grist!nonsense>")
def test_brief_name(self):
"""Test the brief name method."""
tgt = database.Target("<blah!grist!ablah!bblah>some_filename xyz.foo")
self.assertEqual(tgt.brief_name(), "<blah!grist!...>some_filename xyz.foo")
# --------------------------------------------------------------------------
# Helpers
#
def _check_deps(self, target, expected):
"""Check that the deps attribute of target is as expected."""
self.assertEqual(list(target.deps), expected)
def _check_deps_rev(self, target, expected):
"""Check that the deps_rev attribute of target is as expected."""
self.assertEqual(set(target.deps_rev), expected)
def _check_incs(self, target, expected):
"""Check that the incs attribute of target is as expected."""
self.assertEqual(list(target.incs), expected)
def _check_incs_rev(self, target, expected):
"""Check that the incs_rev attribute of target is as expected."""
self.assertEqual(set(target.incs_rev), expected)
| [
2,
16529,
26171,
198,
2,
1332,
62,
48806,
13,
9078,
532,
24047,
953,
377,
5254,
198,
2,
198,
2,
3389,
1853,
11,
4543,
1482,
10076,
198,
2,
16529,
26171,
198,
198,
37811,
38105,
5254,
526,
15931,
198,
198,
834,
439,
834,
796,
7499,
... | 2.419285 | 1,846 |
"""Classes for writing ISC configuration files."""
def _write_indent(fh, indent):
"""Write whitespace to file.
Args:
fh: (file) file object
indent: (int) number of tabs ('\t') to be written
"""
for i in range(0, indent):
fh.write('\t')
class _Conf(object):
"""Base class for configuration objects."""
def add_element(self, element):
"""Add element to elements."""
if not isinstance(element, _Element):
raise TypeError('%s is not an _Element' % element)
self.elements.append(element)
def get_elements(self, label):
"""Return list of all items with label from elements."""
return [e for e in self.elements if e.label == label]
def remove_elements(self, label):
"""Remove all items with label from elements."""
# modify list in situ
self.elements[:] = [e for e in self.elements if e.label != label]
class ISCConf(_Conf):
"""Class for ISC software configuration.
Intended for named, but effort has been made (but not tested) to
accomodate dhcpd.
"""
def write_file(self, filename):
"""Write configuration to file.
Args:
filename: (str) path of file name to be written
"""
with open(filename, 'w') as fh:
self.write(fh)
fh.close()
def write(self, fh):
"""Write config to file.
Args:
fh: (file) file object
"""
for element in self.elements:
element.write(fh)
class _Element(object):
"""Base class for elements in _Conf.elements attribute."""
def __init__(self, label, comment=None):
"""Return an _Element object.
Args:
label: (str) first word of element
comment: (str) comment to precede element
"""
self.label = label
self.comment = comment
def write(self, fh, indent=0):
"""Write element to file.
Args:
fh: (file) file object
indent: (int) number of tabs ('\t') for leading whitespace
"""
if self.comment:
for line in self.comment.split('\n'):
_write_indent(fh, indent)
fh.write('# %s\n' % line)
_write_indent(fh, indent)
fh.write('%s' % self.label)
class Statement(_Element):
"""Class for ISC configuration statements."""
def __init__(self, label, value=None, stanza=None, comment=None):
"""Return a Statement object.
Args:
label: (str) type of statement
'allow-transfer'
value: (tuple) argument(s) for statement to be printed after label
stanza: (tuple) argument(s) for statement to be printed in
a separate stanza within braces
('10.1.1.1', '10.1.1.2')
comment: (str) comment to precede statement
This syntax allows for statements with all arguments on a single line
as well as statements with arguments within braces ('{}').
"""
_Element.__init__(self, label, comment)
self.value = value if value else ()
self.stanza = list(stanza) if stanza else []
def write(self, fh, indent=0):
"""Write statement to file.
Args:
fh: (file) file object
indent: (int) number of tabs ('\t') for leading whitespace
Statements are written in the following format:
# comment
label value {
stanza;
};
Embedded newlines in comments are used as line breaks,
implementing consistent indentation. Items in the value tuple
are separated by spaces. Items in the stanza tuple are written
on separate, indented lines terminated by semi-colons.
"""
_Element.write(self, fh, indent)
if self.value:
# write items on same line
for item in self.value:
fh.write(' %s' % item)
if self.stanza:
# write a stanza with one item per line
fh.write(' {\n')
for item in self.stanza:
_write_indent(fh, indent + 1)
fh.write('%s;\n' % item)
_write_indent(fh, indent)
fh.write('};\n')
else:
fh.write(';\n')
class Clause(_Conf, _Element):
"""Class for ISC configuration clauses."""
def __init__(self, label, additional, comment=None):
"""Return a Clause object.
Args:
label: (str) type of clause
'view'
additional: (tuple) additional data included between label and
clause's opening brace
('example_view',)
comment: (str) comment to precede clause
"""
_Conf.__init__(self)
_Element.__init__(self, label, comment)
self.additional = additional if additional else []
def write(self, fh, indent=0):
"""Write clause to file.
Args:
fh: (file) file object
indent: (int) number of tabs ('\t') for leading whitespace
Clauses are written in the following format:
# comment
label additional {
elements;
};
Embedded newlines in comments are used as line breaks,
implementing proper indentation. Items in the additional tuple
are separated by spaces. Items in the elements list are
written on separate, properly indented lines terminated by
semi-colons.
"""
_Element.write(self, fh, indent)
for item in self.additional:
fh.write(' %s' % item)
fh.write(' {\n')
# write each element to fh
for element in self.elements:
element.write(fh, indent + 1)
# close the clause
_write_indent(fh, indent)
fh.write('};\n')
if __name__ == '__main__':
run_tests()
| [
37811,
9487,
274,
329,
3597,
3180,
34,
8398,
3696,
526,
15931,
198,
198,
4299,
4808,
13564,
62,
521,
298,
7,
69,
71,
11,
33793,
2599,
198,
220,
220,
220,
37227,
16594,
13216,
10223,
284,
2393,
13,
628,
220,
220,
220,
943,
14542,
25,... | 2.266514 | 2,619 |
data = (
'Di ', # 0x00
'Zhuang ', # 0x01
'Le ', # 0x02
'Lang ', # 0x03
'Chen ', # 0x04
'Cong ', # 0x05
'Li ', # 0x06
'Xiu ', # 0x07
'Qing ', # 0x08
'Shuang ', # 0x09
'Fan ', # 0x0a
'Tong ', # 0x0b
'Guan ', # 0x0c
'Ji ', # 0x0d
'Suo ', # 0x0e
'Lei ', # 0x0f
'Lu ', # 0x10
'Liang ', # 0x11
'Mi ', # 0x12
'Lou ', # 0x13
'Chao ', # 0x14
'Su ', # 0x15
'Ke ', # 0x16
'Shu ', # 0x17
'Tang ', # 0x18
'Biao ', # 0x19
'Lu ', # 0x1a
'Jiu ', # 0x1b
'Shu ', # 0x1c
'Zha ', # 0x1d
'Shu ', # 0x1e
'Zhang ', # 0x1f
'Men ', # 0x20
'Mo ', # 0x21
'Niao ', # 0x22
'Yang ', # 0x23
'Tiao ', # 0x24
'Peng ', # 0x25
'Zhu ', # 0x26
'Sha ', # 0x27
'Xi ', # 0x28
'Quan ', # 0x29
'Heng ', # 0x2a
'Jian ', # 0x2b
'Cong ', # 0x2c
'[?] ', # 0x2d
'Hokuso ', # 0x2e
'Qiang ', # 0x2f
'Tara ', # 0x30
'Ying ', # 0x31
'Er ', # 0x32
'Xin ', # 0x33
'Zhi ', # 0x34
'Qiao ', # 0x35
'Zui ', # 0x36
'Cong ', # 0x37
'Pu ', # 0x38
'Shu ', # 0x39
'Hua ', # 0x3a
'Kui ', # 0x3b
'Zhen ', # 0x3c
'Zun ', # 0x3d
'Yue ', # 0x3e
'Zhan ', # 0x3f
'Xi ', # 0x40
'Xun ', # 0x41
'Dian ', # 0x42
'Fa ', # 0x43
'Gan ', # 0x44
'Mo ', # 0x45
'Wu ', # 0x46
'Qiao ', # 0x47
'Nao ', # 0x48
'Lin ', # 0x49
'Liu ', # 0x4a
'Qiao ', # 0x4b
'Xian ', # 0x4c
'Run ', # 0x4d
'Fan ', # 0x4e
'Zhan ', # 0x4f
'Tuo ', # 0x50
'Lao ', # 0x51
'Yun ', # 0x52
'Shun ', # 0x53
'Tui ', # 0x54
'Cheng ', # 0x55
'Tang ', # 0x56
'Meng ', # 0x57
'Ju ', # 0x58
'Cheng ', # 0x59
'Su ', # 0x5a
'Jue ', # 0x5b
'Jue ', # 0x5c
'Tan ', # 0x5d
'Hui ', # 0x5e
'Ji ', # 0x5f
'Nuo ', # 0x60
'Xiang ', # 0x61
'Tuo ', # 0x62
'Ning ', # 0x63
'Rui ', # 0x64
'Zhu ', # 0x65
'Chuang ', # 0x66
'Zeng ', # 0x67
'Fen ', # 0x68
'Qiong ', # 0x69
'Ran ', # 0x6a
'Heng ', # 0x6b
'Cen ', # 0x6c
'Gu ', # 0x6d
'Liu ', # 0x6e
'Lao ', # 0x6f
'Gao ', # 0x70
'Chu ', # 0x71
'Zusa ', # 0x72
'Nude ', # 0x73
'Ca ', # 0x74
'San ', # 0x75
'Ji ', # 0x76
'Dou ', # 0x77
'Shou ', # 0x78
'Lu ', # 0x79
'[?] ', # 0x7a
'[?] ', # 0x7b
'Yuan ', # 0x7c
'Ta ', # 0x7d
'Shu ', # 0x7e
'Jiang ', # 0x7f
'Tan ', # 0x80
'Lin ', # 0x81
'Nong ', # 0x82
'Yin ', # 0x83
'Xi ', # 0x84
'Sui ', # 0x85
'Shan ', # 0x86
'Zui ', # 0x87
'Xuan ', # 0x88
'Cheng ', # 0x89
'Gan ', # 0x8a
'Ju ', # 0x8b
'Zui ', # 0x8c
'Yi ', # 0x8d
'Qin ', # 0x8e
'Pu ', # 0x8f
'Yan ', # 0x90
'Lei ', # 0x91
'Feng ', # 0x92
'Hui ', # 0x93
'Dang ', # 0x94
'Ji ', # 0x95
'Sui ', # 0x96
'Bo ', # 0x97
'Bi ', # 0x98
'Ding ', # 0x99
'Chu ', # 0x9a
'Zhua ', # 0x9b
'Kuai ', # 0x9c
'Ji ', # 0x9d
'Jie ', # 0x9e
'Jia ', # 0x9f
'Qing ', # 0xa0
'Zhe ', # 0xa1
'Jian ', # 0xa2
'Qiang ', # 0xa3
'Dao ', # 0xa4
'Yi ', # 0xa5
'Biao ', # 0xa6
'Song ', # 0xa7
'She ', # 0xa8
'Lin ', # 0xa9
'Kunugi ', # 0xaa
'Cha ', # 0xab
'Meng ', # 0xac
'Yin ', # 0xad
'Tao ', # 0xae
'Tai ', # 0xaf
'Mian ', # 0xb0
'Qi ', # 0xb1
'Toan ', # 0xb2
'Bin ', # 0xb3
'Huo ', # 0xb4
'Ji ', # 0xb5
'Qian ', # 0xb6
'Mi ', # 0xb7
'Ning ', # 0xb8
'Yi ', # 0xb9
'Gao ', # 0xba
'Jian ', # 0xbb
'Yin ', # 0xbc
'Er ', # 0xbd
'Qing ', # 0xbe
'Yan ', # 0xbf
'Qi ', # 0xc0
'Mi ', # 0xc1
'Zhao ', # 0xc2
'Gui ', # 0xc3
'Chun ', # 0xc4
'Ji ', # 0xc5
'Kui ', # 0xc6
'Po ', # 0xc7
'Deng ', # 0xc8
'Chu ', # 0xc9
'[?] ', # 0xca
'Mian ', # 0xcb
'You ', # 0xcc
'Zhi ', # 0xcd
'Guang ', # 0xce
'Qian ', # 0xcf
'Lei ', # 0xd0
'Lei ', # 0xd1
'Sa ', # 0xd2
'Lu ', # 0xd3
'Li ', # 0xd4
'Cuan ', # 0xd5
'Lu ', # 0xd6
'Mie ', # 0xd7
'Hui ', # 0xd8
'Ou ', # 0xd9
'Lu ', # 0xda
'Jie ', # 0xdb
'Gao ', # 0xdc
'Du ', # 0xdd
'Yuan ', # 0xde
'Li ', # 0xdf
'Fei ', # 0xe0
'Zhuo ', # 0xe1
'Sou ', # 0xe2
'Lian ', # 0xe3
'Tamo ', # 0xe4
'Chu ', # 0xe5
'[?] ', # 0xe6
'Zhu ', # 0xe7
'Lu ', # 0xe8
'Yan ', # 0xe9
'Li ', # 0xea
'Zhu ', # 0xeb
'Chen ', # 0xec
'Jie ', # 0xed
'E ', # 0xee
'Su ', # 0xef
'Huai ', # 0xf0
'Nie ', # 0xf1
'Yu ', # 0xf2
'Long ', # 0xf3
'Lai ', # 0xf4
'[?] ', # 0xf5
'Xian ', # 0xf6
'Kwi ', # 0xf7
'Ju ', # 0xf8
'Xiao ', # 0xf9
'Ling ', # 0xfa
'Ying ', # 0xfb
'Jian ', # 0xfc
'Yin ', # 0xfd
'You ', # 0xfe
'Ying ', # 0xff
)
| [
7890,
796,
357,
198,
6,
18683,
46083,
220,
220,
220,
1303,
657,
87,
405,
198,
6,
57,
13415,
648,
46083,
220,
220,
220,
1303,
657,
87,
486,
198,
6,
3123,
46083,
220,
220,
220,
1303,
657,
87,
2999,
198,
6,
43,
648,
46083,
220,
220... | 1.519012 | 3,077 |
#!/usr/bin/python
# pylint: disable-msg=E1101
# a script that receives xmpp messages for an app engine app
# and forwards them to the app, which must have a route
# exposed at /_ah/xmpp/message/chat/ to receive them
# usage is ./xmpp_receiver.py appname login_ip app-password
# General-purpose Python libraries
import httplib
import logging
import os
import re
import select
import sys
import urllib
# Third-party libraries
# On AppScale VMs, we use Python 2.7 to run the XMPPReceiver, but because we
# install the xmpp library for the default Python (Python 2.6), we have to add
# it to our path.
try:
import xmpp
except ImportError:
PYTHON_PACKAGES = '/usr/local/lib/python2.7/dist-packages/'
sys.path.append(PYTHON_PACKAGES + 'xmpppy-0.5.0rc1-py2.7.egg')
import xmpp
class XMPPReceiver():
"""XMPPReceiver provides callers with a way to receive XMPP messages on
behalf of Google App Engine applications. The receiver will POST any
received message to an App Server that runs the app, and will respond
to presence notifications that users may send to it.
"""
# The headers necessary for posting XMPP messages to App Engine apps.
HEADERS = {
'Content-Type' : 'application/x-www-form-urlencoded'
}
def __init__(self, appid, login_ip, app_password):
"""Creates a new XMPPReceiver, which will listen for XMPP messages for
an App Engine app.
Args:
appid: A str representing the application ID that this XMPPReceiver
should poll on behalf of.
login_ip: A str representing the IP address or FQDN that runs the
full proxy nginx service, sitting in front of the app we'll be
posting messages to.
app_password: A str representing the password associated with the
XMPP user account for the Google App Engine app that the receiver
will log in on behalf of.
"""
self.appid = appid
self.login_ip = login_ip
self.app_password = app_password
with open("/etc/appscale/port-{0}.txt".format(self.appid)) as file_handle:
self.app_port = int(file_handle.read().strip())
self.my_jid = self.appid + "@" + self.login_ip
log_file = "/var/log/appscale/xmppreceiver-{0}.log".format(self.my_jid)
sys.stderr = open(log_file, 'a')
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s',
filename=log_file,
filemode='a')
logging.info("Started receiver script for {0}".format(self.my_jid))
def xmpp_message(self, _, event):
"""Responds to the receipt of an XMPP message, by finding an App Server that
hosts the given application and POSTing the message's payload to it.
Args:
_: The connection that the message was received on (not used).
event: The actual message that was received.
"""
logging.info("received a message from {0}, with body {1}" \
.format(event.getFrom().getStripped(), event.getBody()))
logging.info("message type is {0}".format(event.getType))
from_jid = event.getFrom().getStripped()
params = {}
params['from'] = from_jid
params['to'] = self.my_jid
params['body'] = event.getBody()
encoded_params = urllib.urlencode(params)
try:
logging.debug("Attempting to open connection to {0}:{1}".format(
self.login_ip, self.app_port))
connection = httplib.HTTPConnection(self.login_ip, self.app_port)
connection.request('POST', '/_ah/xmpp/message/chat/', encoded_params,
self.HEADERS)
response = connection.getresponse()
logging.info("POST XMPP message returned status of {0}".format(
response.status))
connection.close()
except Exception as e:
logging.exception(e)
def xmpp_presence(self, conn, event):
"""Responds to the receipt of a presence message, by telling the sender
that we are subscribing to their presence and that they should do the same.
Args:
conn: The connection that the message was received on.
event: The actual message that was received.
"""
logging.info("received a presence from {0}, with payload {1}" \
.format(event.getFrom().getStripped(), event.getPayload()))
prs_type = event.getType()
logging.info("presence type is {0}".format(prs_type))
who = event.getFrom()
if prs_type == "subscribe":
conn.send(xmpp.Presence(to=who, typ='subscribed'))
conn.send(xmpp.Presence(to=who, typ='subscribe'))
def listen_for_messages(self, messages_to_listen_for=-1):
"""Polls the XMPP server for messages, responding to any that are seen.
Args:
messages_to_listen_for: An int that represents how many messages we
should listen for. If set to the default value (-1), then we listen
for an infinite number of messages.
Returns:
An int that indicates how many messages were processed.
"""
jid = xmpp.protocol.JID(self.my_jid)
client = xmpp.Client(jid.getDomain(), debug=[])
if not client.connect():
logging.info("Could not connect")
raise SystemExit("Could not connect to XMPP server at {0}" \
.format(self.login_ip))
if not client.auth(jid.getNode(), self.app_password,
resource=jid.getResource()):
logging.info("Could not authenticate with username {0}, password {1}" \
.format(jid.getNode(), self.app_password))
raise SystemExit("Could not authenticate to XMPP server at {0}" \
.format(self.login_ip))
client.RegisterHandler('message', self.xmpp_message)
client.RegisterHandler('presence', self.xmpp_presence)
client.sendInitPresence(requestRoster=0)
socketlist = {client.Connection._sock:'xmpp'}
logging.info("About to begin processing requests")
messages_processed = 0
while messages_processed != messages_to_listen_for:
(input_data, _, __) = select.select(socketlist.keys(), [], [], 1)
for _ in input_data:
try:
client.Process(1)
messages_processed += 1
except xmpp.protocol.Conflict:
logging.info("Lost connection after processing {0} messages" \
.format(messages_processed))
return messages_processed
return messages_processed
if __name__ == "__main__":
RECEIVER = XMPPReceiver(sys.argv[1], sys.argv[2], sys.argv[3])
while True:
RECEIVER.listen_for_messages()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
279,
2645,
600,
25,
15560,
12,
19662,
28,
36,
1157,
486,
198,
198,
2,
257,
4226,
326,
11583,
2124,
76,
381,
6218,
329,
281,
598,
3113,
598,
198,
2,
290,
22052,
606,
284,
262,
598,
11... | 2.714408 | 2,332 |
#__all__ = ['py_smalloc']
| [
2,
834,
439,
834,
796,
37250,
9078,
62,
17470,
420,
20520,
198
] | 2.166667 | 12 |
# Influenced majorly by Codewouter and CI Boutique Ado Tutorial#
from django.shortcuts import render, get_object_or_404, redirect
from menu.models import Menu
from .models import Feedback
from .forms import FeedbackForm
# Create your views here.
def view_feedback(request, menu_id):
""""
Gets specific menu and displays comment
"""
menu = get_object_or_404(Menu, pk=menu_id)
feedback_list = Feedback.objects.all().filter(menu=menu)
template = 'feedback/comments.html'
context = {
'feedback_list': feedback_list,
'menu': menu
}
return render(request, template, context)
| [
2,
46631,
5864,
1688,
306,
416,
18720,
413,
39605,
290,
14514,
40808,
2350,
1215,
78,
36361,
2,
198,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
651,
62,
15252,
62,
273,
62,
26429,
11,
18941,
198,
6738,
6859,
13,
27530,... | 2.948357 | 213 |
# Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import cv2
import cv_bridge
import message_filters
import rclpy
from rclpy.node import Node
from rclpy.qos import QoSDurabilityPolicy
from rclpy.qos import QoSHistoryPolicy
from rclpy.qos import QoSProfile
from rclpy.qos import QoSReliabilityPolicy
from sensor_msgs.msg import Image
from vision_msgs.msg import Detection2DArray
| [
2,
15069,
13130,
4946,
8090,
47061,
5693,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.509294 | 269 |
# Import standard library
import random
from math import sqrt
from typing import Tuple
# Import modules
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib as mpl
import numpy as np
import seagull as sg
import seagull.lifeforms as lf
import streamlit as st
from loguru import logger
from scipy.signal import convolve2d
def make_sprite(
n_sprites: int,
n_iters: int,
repro_rate: int,
stasis_rate: int,
):
"""Main function for creating sprites
Parameters
----------
n_sprites : int
Number of sprites to generate
n_iters : int
Number of iterations to run the simulator
repro_rate : int
Inverse reproduction rate
stasis_rate : int
Stasis rate
"""
logger.info("Initializing board")
board = sg.Board(size=(8, 4))
logger.info("Running simulation")
sprator_list = []
for sprite in range(n_sprites):
noise = np.random.choice([0, 1], size=(8, 4))
custom_lf = lf.Custom(noise)
board.add(custom_lf, loc=(0, 0))
sim = sg.Simulator(board)
sim.run(
custom_rule,
iters=n_iters,
repro_rate=repro_rate,
stasis_rate=stasis_rate,
)
fstate = sim.get_history()[-1]
logger.info(f"Generating sprite/s: {sprite}")
sprator = np.hstack([fstate, np.fliplr(fstate)])
sprator = np.pad(
sprator, mode="constant", pad_width=1, constant_values=1
)
sprator_with_outline = add_outline(sprator)
sprator_gradient = get_gradient(sprator_with_outline)
sprator_final = combine(sprator_with_outline, sprator_gradient)
sprator_list.append(sprator_final)
# Generate plot based on the grid size
n_grid = int(sqrt(n_sprites))
# Generate random colors as cmap
r = lambda: "#%02X%02X%02X" % (
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
)
colors = ["black", "#f2f2f2", r(), r(), r()]
cm.register_cmap(
cmap=mpl.colors.LinearSegmentedColormap.from_list(
"custom", colors
).reversed()
)
if n_grid == 1:
fig, axs = plt.subplots(n_grid, n_grid, figsize=(5, 5))
axs = fig.add_axes([0, 0, 1, 1], xticks=[], yticks=[], frameon=False)
axs.imshow(sprator_list[0], cmap="custom_r", interpolation="nearest")
fig.text(0, -0.05, "bit.ly/CellularSprites", ha="left", color="black")
else:
fig, axs = plt.subplots(n_grid, n_grid, figsize=(5, 5))
for ax, sprator in zip(axs.flat, sprator_list):
# TODO: Remove duplicates
# Generate random colors as cmap
r = lambda: "#%02X%02X%02X" % (
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
)
colors = ["black", "#f2f2f2", r(), r(), r()]
cm.register_cmap(
cmap=mpl.colors.LinearSegmentedColormap.from_list(
"custom", colors
).reversed()
)
ax.imshow(sprator, cmap="custom_r", interpolation="nearest")
ax.set_axis_off()
fig.text(0.125, 0.05, "bit.ly/CellularSprites", ha="left")
return fig
def custom_rule(X, repro_rate=3, stasis_rate=3) -> np.ndarray:
"""Custom Sprator Rule"""
n = convolve2d(X, np.ones((3, 3)), mode="same", boundary="fill") - X
reproduction_rule = (X == 0) & (n <= repro_rate)
stasis_rule = (X == 1) & ((n == 2) | (n == stasis_rate))
return reproduction_rule | stasis_rule
def add_outline(mat: np.ndarray) -> np.ndarray:
"""Pad the matrix"""
m = np.ones(mat.shape)
for idx, orig_val in np.ndenumerate(mat):
x, y = idx
neighbors = [(x, y + 1), (x + 1, y), (x, y - 1), (x - 1, y)]
if orig_val == 0:
m[idx] = 0 # Set the coordinate in the new matrix as 0
for n_coord in neighbors:
try:
m[n_coord] = 0.5 if mat[n_coord] == 1 else 0
except IndexError:
pass
m = np.pad(m, mode="constant", pad_width=1, constant_values=1)
# Let's do a switcheroo, I know this isn't elegant but please feel free to
# do a PR to make this more efficient!
m[m == 1] = np.inf
m[m == 0.5] = 1
m[m == np.inf] = 0.5
return m
def get_gradient(mat: np.ndarray) -> np.ndarray:
"""Get gradient of an outline sprator"""
grad = np.gradient(mat)[0]
return _remap((0.2, 0.25), grad)
def combine(mat_outline: np.ndarray, mat_gradient: np.ndarray):
"""Combine the matrix with outline and the one with grads"""
mat_final = np.copy(mat_outline)
mask = mat_outline == 0
mat_final[mask] = mat_gradient[mask]
return mat_final
main()
| [
2,
17267,
3210,
5888,
198,
11748,
4738,
198,
6738,
10688,
1330,
19862,
17034,
198,
6738,
19720,
1330,
309,
29291,
198,
198,
2,
17267,
13103,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13... | 2.117724 | 2,285 |
from urllib import urlencode
from urllib2 import urlopen
from geopy import util
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
from django.utils import simplejson as json
from geopy.geocoders.base import Geocoder
| [
6738,
2956,
297,
571,
1330,
2956,
11925,
8189,
198,
6738,
2956,
297,
571,
17,
1330,
19016,
9654,
198,
6738,
4903,
11081,
1330,
7736,
198,
198,
28311,
25,
198,
220,
220,
220,
1330,
33918,
198,
16341,
17267,
12331,
25,
198,
220,
220,
22... | 2.84 | 100 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Produce shapefile for block groups that are contained in urban areas.
Data can be downloaded here:
https://www.census.gov/cgi-bin/geo/shapefiles/index.php?year=2015&layergroup=Block+Groups
"""
# Import modules
import geopandas as gpd
import os
import glob
# Define global filepath
path = '/home/johnny/Documents/Teaching/490_Geospatial_Data_Science_Applications/Applications/OSM_Parks_and_Golf/data/'
# Import urban areas shapefile
urban = gpd.read_file(path + 'urban_areas/tl_2020_us_uac10.shp')
# Import state codes
codes = gpd.read_file(path + 'state_codes.csv')
# Define list of block group shapefiles
bg_list = sorted(glob.glob(path + 'census_block_groups_2015/*/*.shp'))
for i in range(len(bg_list)):
# Get path and filename seperately
infilepath, infilename = os.path.split(bg_list[i])
# Get file name without extension
infilehortname, extension = os.path.splitext(infilename)
print('Processing number %.0f out of %.0f' %(i+1, len(bg_list)))
# Read file
bg = gpd.read_file(bg_list[i])
# Intersect
urban_bg = gpd.sjoin(bg, urban, how='inner', op='within')
# Drop some unnecessary columns
urban_bg.drop(columns=['UACE10', 'GEOID10', 'NAME10', 'NAMELSAD10', 'LSAD10', 'MTFCC10',
'UATYP10', 'FUNCSTAT10', 'ALAND10', 'AWATER10', 'INTPTLAT10',
'INTPTLON10', 'index_right'], inplace=True)
# Get state abbreviation
abv = codes['Postal Code'][codes['FIPS'].astype(int) == int(infilehortname[8:10])].values[0]
# Define new id column
urban_bg['code'] = abv
urban_bg['idx'] = urban_bg.index.values.astype(str)
urban_bg['id'] = urban_bg['code'] + urban_bg['idx']
# Drop some unnecessary columns
urban_bg.drop(columns=['code', 'idx'], inplace=True)
# Save to file
urban_bg.to_file(path + 'urban_block_groups/' + infilehortname + '_urban.shp')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
198,
11547,
344,
5485,
7753,
329,
2512,
2628,
326,
389,
7763,
287,
7876,
3006,
13,
198,
198,
6601,
46... | 2.229189 | 925 |
#!/usr/bin/env pyhton
'''Script that runs EBV in parallel using STAN.'''
import sys,os,string
from numpy import *
import pickle
import time
import priors
import pystan
import config
#import LMCMCMC
import EBV
import make_stan_model
import bspline2
from scipy.interpolate import splrep,splev
if len(sys.argv) < 2:
print "Usage: EBV_stan config-file [stan-file]"
sys.exit(1)
stanfile = None
if len(sys.argv) > 2:
stanfile = sys.argv[2]
if not os.path.isfile(stanfile): stanfile = None
if not os.path.isfile(sys.argv[1]):
print "Error, can't find config-file ",sys.argv[1]
sys.exit(1)
#nchains=int(sys.argv[2])
# poly coefficients from CCM+O'Donnel
f = open('Ia_A_poly.pickle')
d = pickle.load(f)
f.close()
t0 = time.time()
cf = EBV.get_config(sys.argv[1])
data = EBV.get_data(cf)
vinfo = EBV.setup_varinfo(cf, data)
nchains = cf.Sampler.chains
coefs = d[cf.Model.redlaw]
Amat = array([coefs[f] for f in data.filters])
Aorder = array([coefs['order'][f] for f in data.filters])
Ncoef = Amat.shape[1]
Nf = len(data.filters)
data['knots'] = array(cf.Model.knots)
#data['knots'] = [data.st.min()]*4 + [median(data.st)] + [data.st.max()*1.001]*4
#bs = bspline.Bspline(knot_vector=data.knots, order=3)
bs = bspline2.bspline_basis(data.knots, data.st, 3, gradient=cf.Model.gradient)
#data['Bs'] = array([bs(st) for st in data.st])
data['Bs'] = bs
print data['Bs'].shape
# first derivative of splines (for computing errors)
sts = linspace(data.st.min(),data.st.max(),100)
bss = bspline2.bspline_basis(data['knots'], sts, 3, gradient=cf.Model.gradient)
tcks = [splrep(sts, bss[:,i], s=0, k=3) for i in range(bss.shape[1])]
data['dBs'] = array([splev(data.st, tck, der=1) for tck in tcks]).T
print data['dBs'].shape
#data['dBs'] = array([bs.d(st) for st in data.st])
data_in = dict(
Nf = Nf,
NSNe = data.Nobj,
Nobs = len(data.ms),
Ncoef = Ncoef,
Nknots = data.Bs.shape[1],
Bs = data.Bs,
dBs = data.dBs,
m = data.ms,
vm = data.vms,
st = data.st,
vst = data.vst,# + 0.06**2, # RMS of dm15-s_BV relation
findex = data.fids+1,
sindex = data.oids+1,
bindex = data.bids+1,
f0 = data.f0 + 1,
findex0 = data.findex0 + 1,
Amat = Amat,
Al_order = Aorder)
if 'extra_data' in cf.sections.keys():
for key in cf.extra_data.options:
val = getattr(cf.extra_data, key)
if shape(val) > 0:
val = array(val)
data_in[key] = val
# Initial guesses
init = []
for i in range(nchains):
init.append({})
for var in vinfo.varnames:
if vinfo[var].vary:
#if var == 'a':
# val = random.uniform(-2,2, size=Nf-1)
#elif var == 'b':
# val = random.uniform(-1,1, size=Nf-1)
#elif var == 'c':
# val = random.uniform(-1,1, size=Nf-1)
if var == 'a':
val = random.uniform(-1,1, size=(Nf-1,data.Bs.shape[1]))
elif var == 'R_V':
if cf.Priors.Rv_global:
val = random.uniform(1.0,4.0)
else:
val = random.uniform(1.0,4.0, size=data.Nobj)
elif var == 'evar':
val = random.uniform(0.0001, 0.25, size=Nf-1)
elif var == 'EBV':
val = random.exponential(0.2, size=data.Nobj)
elif var == 'tau':
val = random.uniform(0.01, 1)
elif var == 'R0':
if cf.Priors.Rv_binned:
val = random.uniform(1.5,5.0, size=len(cf.Priors.Rv_bins))
else:
val = random.uniform(1.5,5.0, size=cf.Priors.NGauss)
elif var == 'eR0':
if cf.Priors.Rv_binned:
val = random.uniform(0, 2.0, size=len(cf.Priors.Rv_bins))
else:
val = random.uniform(0, 2.0, size=cf.Priors.NGauss)
elif var == 'theta':
val = random.dirichlet(ones(cf.Priors.NGauss,))
print val
elif var == 'muR':
val = random.uniform(1.0,4.0)
elif var == 'sigR':
val = random.uniform(0.0,2.0)
else:
raise ValueError, "Unknown var %s" % var
init[-1][var] = val
fit = make_stan_model.generate_stan(vinfo, data_in, outfile="test.stan",
iter=cf.Sampler.N_final, warmup=cf.Sampler.burn_final, chains=nchains,
init=init, stanfile=stanfile)
print "Finished sampling"
samples = fit.extract()
#d = dict(samples=samples,
# data=data)
samples['data'] = data
fout = open(cf.Sampler.outfile, 'w')
pickle.dump(samples, fout)
fout.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
12972,
71,
1122,
198,
198,
7061,
6,
7391,
326,
4539,
43374,
53,
287,
10730,
1262,
3563,
1565,
2637,
7061,
198,
198,
11748,
25064,
11,
418,
11,
8841,
198,
6738,
299,
32152,
1330,
1635,
198,
11748,
... | 1.958136 | 2,317 |
escreva('Bruno')
| [
198,
3798,
260,
6862,
10786,
33,
5143,
78,
11537,
198
] | 1.8 | 10 |
import binaryninja as bn
from .CryptoScan import CryptoScan
| [
11748,
13934,
35073,
6592,
355,
275,
77,
198,
6738,
764,
23919,
78,
33351,
1330,
36579,
33351,
628
] | 3.588235 | 17 |
#!/usr/bin/env python
# Filename: plot_accuracies
"""
introduction: plot accuracies of the results, including Receiver Operating Characteristic (ROC),
and Precision-Recall
authors: Huang Lingcao
email:huanglingcao@gmail.com
add time: 31 Dec, 2018
"""
import os, sys
from optparse import OptionParser
import matplotlib.pyplot as plt
import numpy as np
# import vector_features
import vector_features
from vector_features import shape_opeation
import parameters
import basic_src.io_function as io_function
import basic_src.basic as basic
from sklearn.metrics import f1_score
from sklearn.metrics import precision_recall_fscore_support
# plt.rc('xtick',labelsize=20)
# plt.rc('ytick',labelsize=20)
def get_iou_scores(result_shp, ground_truth_shp):
"""
get IoU scores of all the polygons in result_shp
Args:
result_shp: the path of result file
ground_truth_shp: the path of ground truth file
Returns: IoU values
"""
IoUs = vector_features.calculate_IoU_scores(result_shp, ground_truth_shp)
return IoUs
def get_y_true_prediction(input_shp,groud_truth_shp,iou_threshold):
"""
get ground truth and prediction array of polygons based on IoU values
Args:
input_shp: shape file of mapped polygons
groud_truth_shp: shape file of ground truth polygons
iou_threshold: iou threshold
Returns: y_true,y_prediction ( numpy array)
"""
# calculate the IoU of each predicted polygons
iou_pre = np.array(get_iou_scores(input_shp, groud_truth_shp))
# calculate the IoU of each ground truth, for false negative
iou_GT = np.array(get_iou_scores(groud_truth_shp, input_shp))
count_pre = len(iou_pre)
y_pred = np.ones(count_pre) # all the predicted output are considered as targets (1)
# set true positive and false positve
y_true = np.zeros(count_pre)
y_true[np.where(iou_pre > iou_threshold)] = 1
# modify y_true based on iou_GT for false negative
count_false_neg = len(iou_GT[np.where(iou_GT < iou_threshold)])
print(count_false_neg)
# idx = 0
# while (count_false_neg>0):
# print(idx)
# if y_true[idx]==0 and y_pred[idx] == 1: # all y_pred are 1
# y_true[idx] = 1
# y_pred[idx] = 0
# count_false_neg -= 1
# idx += 1
# add false negatives
y_true = np.append(y_true, np.ones(count_false_neg))
y_pred = np.append(y_pred, np.zeros(count_false_neg))
# tp = np.where(y_true==1 and y_pred==1)
tp = 0
fp = 0
fn = 0
for y_t, y_p in zip(y_true, y_pred):
if y_p == 1 and y_t == 1:
tp += 1
elif y_p == 1 and y_t == 0:
fp += 1
elif y_p == 0 and y_t == 1:
fn += 1
else:
pass
print('tp=%d, fp=%d, fn=%d' % (tp, fp, fn))
return y_true,y_pred
def get_y_true_and_scores(input_shp,groud_truth_shp):
"""
get ground truth and the scores (IoU values) array of polygons
Args:
input_shp: shape file of mapped polygons
groud_truth_shp: shape file of ground truth polygons
Returns: y_true,y_scores ( numpy array)
"""
# calculate the IoU of each predicted polygons
iou_pre = np.array(get_iou_scores(input_shp, groud_truth_shp))
# it seems unable to get it 1 Jan 2019
# return y_true,y_pred
def calculate_precision_recall_iou(IoU_prediction,IoU_ground_truth,iou_threshold):
"""
calculate precision, recall based on IoU values
Args:
IoU_prediction: IoU of each mapped polygons, should be 1d numpy array
IoU_ground_truth: IoU of each ground truth polygons: for count false negatives, should be 1d numpy array
iou_threshold: IoU threshold
Returns: precision, recall, f1 score
"""
true_pos_count = 0
false_pos_count = 0
# calculate precision, recall, F1 score
for iou in IoU_prediction:
if iou > iou_threshold:
true_pos_count += 1
else:
false_pos_count += 1
# val_polygon_count = len(IoU_ground_truth)
# false_neg_count = val_polygon_count - true_pos_count
# use the following method, because in beiluhe case, a mapped polygon can cover two or more thaw slumps
if iou_threshold <= 0:
false_neg_count = len(IoU_ground_truth[np.where(IoU_ground_truth ==0 )])
else:
false_neg_count = len(IoU_ground_truth[np.where(IoU_ground_truth < iou_threshold)])
if false_neg_count < 0:
basic.outputlogMessage('warning, false negative count is smaller than 0, recall can not be trusted')
precision = float(true_pos_count) / (float(true_pos_count) + float(false_pos_count))
recall = float(true_pos_count) / (float(true_pos_count) + float(false_neg_count))
if (true_pos_count > 0):
F1score = 2.0 * precision * recall / (precision + recall)
else:
F1score = 0
basic.outputlogMessage("iou_thr: %.3f,TP:%3d, FP:%3d, FN:%3d, TP+FP:%3d, TP+FN:%3d"%(iou_threshold,true_pos_count,false_pos_count,false_neg_count,
true_pos_count+false_pos_count,true_pos_count+false_neg_count))
return precision, recall, F1score
def calculate_average_precision(precision_list,recall_list):
"""
compute average_precision
Args:
precision_list: list of precision
recall_list: list of recall
Returns:
"""
count = len(precision_list)
if len(recall_list) != count:
raise ValueError("the number in precision_list and recall_list is inconsistent")
ap = 0
for idx in range(1,count):
ap += precision_list[idx]*(recall_list[idx] - recall_list[idx-1]) #abs
return ap
def precision_recall_curve_iou(input_shp,groud_truth_shp):
"""
instead of using precision_recall_curve in sklearn.metrics, here we calculate the precision recall based on IoU
Args:
input_shp: shape file of mapped polygons
groud_truth_shp:shape file of ground truth polygons
Returns: precision, recall, threshold
"""
basic.outputlogMessage('calculate precision recall curve for %s'%input_shp)
# calculate the IoU of each predicted polygons
iou_pre = np.array(get_iou_scores(input_shp, groud_truth_shp))
# calculate the IoU of each ground truth, for false negative
iou_GT = np.array(get_iou_scores(groud_truth_shp, input_shp))
precision_list = []
recall_list = []
iou_thr_list = []
f1score_list = []
# for iou_thr in np.arange(-0.01, 1.01, 0.05):
for iou_thr in np.arange(1, -0.01, -0.04): #-0.05
# abs(iou_thr) >=0, it is strange (0 > -0.000 return true), Jan 16 2019. hlc
# but it turns our that precision cannot be 1, so just keep it.
# iou_thr = abs(iou_thr)
if iou_thr < 0:
iou_thr = 0
precision, recall, f1score = calculate_precision_recall_iou(iou_pre, iou_GT, iou_thr) #abs(iou_thr)
basic.outputlogMessage("iou_thr: %.3f, precision: %.4f, recall: %.4f, f1score: %.4f"%(iou_thr,precision, recall, f1score))
precision_list.append(precision)
recall_list.append(recall)
f1score_list.append(f1score)
iou_thr_list.append(iou_thr)
return precision_list, recall_list, iou_thr_list
def plot_precision_recall_curve_multi(input_shp_list,groud_truth_shp,save_path,legend_loc='best'):
"""
plot precision_recall of multi shapefiles to a figure
Args:
input_shp_list: a list of shapefiles
groud_truth_shp: the ground truth file or a list
save_path: output figure path
Returns:
"""
precision_list = []
recall_list = []
average_precision_list = []
line_labels = []
# label_set = ['2017','2018','2019']
for idx,input_shp in enumerate(input_shp_list):
if isinstance(groud_truth_shp, list):
precision, recall, _ = precision_recall_curve_iou(input_shp, groud_truth_shp[idx])
else:
precision, recall, _ = precision_recall_curve_iou(input_shp, groud_truth_shp)
precision_list.append(precision)
recall_list.append(recall)
average_precision = calculate_average_precision(precision, recall)
average_precision_list.append(average_precision)
file_name = os.path.splitext(os.path.basename(input_shp))[0]
if 'fold' in file_name: # k-fold cross-validation
tmp = file_name.split('_')
if 'rmTimeiou' in file_name:
label = '_'.join(tmp[-4:-1])
else:
label = '_'.join(tmp[-3:])
elif 'imgAug' in file_name: # image augmentation test
tmp = file_name.split('_')
label = tmp[-1]
else:
label = str(idx)
# label = label_set[idx]
line_labels.append('%s: AP=%.2f'%(label,average_precision))
# save average_precision to txt file
txt_path = os.path.splitext(save_path)[0]+'_ap.txt'
with open(txt_path,'w') as f_obj:
f_obj.writelines('shape_file average_precision\n')
for shp_file,average_pre in zip(input_shp_list,average_precision_list):
f_obj.writelines('%s %.4lf\n'%(shp_file,average_pre))
# matplotlib build-in color
# b: blue
# g: green
# r: red
# c: cyan
# m: magenta
# y: yellow
# k: black
# w: white
line_color = ['b', 'g', 'r', 'c', 'y', 'k','m'] #
linestyle = ['-','--','-.',":",'+-','x-']
# linestyle = [ '+','x' ,'*','s', 'h', 'd', 'p', 'H', 'D'] #,
color_used_count = len(line_color)
line_used_count = len(linestyle)
for x in range(0,len(input_shp_list)):
recall = recall_list[x]
precision = precision_list[x]
outlook = line_color[x % color_used_count] + linestyle[x // color_used_count]
step_pos = 'mid'
plt.step(recall, precision, outlook, where=step_pos,label=line_labels[x])
# plt.plot(recall, precision, 'r--')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([-0.01, 1.05])
plt.xlim([-0.01, 1.01])
plt.title('Precision-Recall curve')
print('********legend_loc*************', legend_loc)
if legend_loc=='best':
plt.legend(loc='best', bbox_to_anchor=(1, 0.5), title="Average Precision", fontsize=9)
else:
plt.legend(loc=legend_loc, title="Average Precision", fontsize=9)
# plt.show()
plt.savefig(save_path, dpi=300)
basic.outputlogMessage("Output figures to %s" % os.path.abspath(save_path))
return True
if __name__ == '__main__':
usage = "usage: %prog [options] shapefile or shapefiles"
parser = OptionParser(usage=usage, version="1.0 2017-10-28")
parser.description = 'Introduction: plot accuracies of the results '
parser.add_option("-p", "--para",
action="store", dest="para_file",default='para.ini',
help="the parameters file")
parser.add_option("-o", "--output",
action="store", dest="output",default='P_R.jpg',
help="the parameters file")
(options, args) = parser.parse_args()
if len(sys.argv) < 2 or len(args) < 1:
parser.print_help()
sys.exit(2)
# set parameters files, mandatory for the path of ground truth polygons
if options.para_file is None:
print('error, no parameters file')
parser.print_help()
sys.exit(2)
else:
parameters.set_saved_parafile_path(options.para_file)
basic.setlogfile('accuracies_log.txt')
main(options, args)
pass
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
7066,
12453,
25,
7110,
62,
4134,
333,
13433,
198,
37811,
198,
27427,
596,
25,
7110,
4431,
13433,
286,
262,
2482,
11,
1390,
39106,
24850,
15684,
2569,
357,
49,
4503,
828,
198,
220,
... | 2.259332 | 5,117 |
from flask import Blueprint, redirect, render_template
mod_hello = Blueprint("mod_hello", __name__, template_folder="templates")
@mod_hello.route("/")
| [
6738,
42903,
1330,
39932,
11,
18941,
11,
8543,
62,
28243,
198,
198,
4666,
62,
31373,
796,
39932,
7203,
4666,
62,
31373,
1600,
11593,
3672,
834,
11,
11055,
62,
43551,
2625,
11498,
17041,
4943,
628,
198,
31,
4666,
62,
31373,
13,
38629,
... | 3.422222 | 45 |
#!/usr/bin/env python
import unittest
import os
from osdf import OSDF
if __name__ == "__main__":
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
555,
715,
395,
198,
11748,
28686,
198,
6738,
28686,
7568,
1330,
7294,
8068,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
555,
715,... | 2.553191 | 47 |
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow import keras
if __name__ == "__main__":
save_model("/home/liuyekuan/workspace/mine/AlphaZero-Renju/cpp/py/1")
# load_model("/home/liuyekuan/workspace/mine/AlphaZero-Renju/cpp/py/1")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
11192,
273,
11125,
1330,
41927,
292,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
2... | 2.354545 | 110 |
#!/usr/bin/python3
import csv
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
201,
198,
201,
198,
11748,
269,
21370,
201,
198
] | 2 | 17 |
import socket
import os
from nmeaserver import formatter
import time
from datetime import datetime, date
import threading
import logging
import signal
logger = logging.getLogger(__name__)
signal.signal(signal.SIGINT, signal.SIG_DFL)
# global buoy object 'buoy' holds light buoy state | [
11748,
17802,
198,
11748,
28686,
198,
6738,
299,
1326,
6005,
332,
1330,
1296,
1436,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
3128,
198,
11748,
4704,
278,
198,
11748,
18931,
198,
11748,
6737,
198,
198,
6404,
1362,
79... | 3.275862 | 87 |
import numpy as np | [
11748,
299,
32152,
355,
45941
] | 3.6 | 5 |
from lark import Lark, UnexpectedToken
from opus.lang.parser import parser
import pytest
from pathlib import Path
| [
6738,
300,
668,
1330,
406,
668,
11,
471,
42072,
30642,
198,
6738,
1034,
385,
13,
17204,
13,
48610,
1330,
30751,
198,
11748,
12972,
9288,
198,
6738,
3108,
8019,
1330,
10644,
628,
628,
628,
198
] | 3.529412 | 34 |
#
# locality.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# FoundationDB Python API
"""Documentation for this API can be found at
https://www.foundationdb.org/documentation/api-python.html"""
from fdb import impl as _impl
@_impl.transactional
| [
2,
198,
2,
48036,
13,
9078,
198,
2,
198,
2,
770,
2723,
2393,
318,
636,
286,
262,
5693,
11012,
1280,
2723,
1628,
198,
2,
198,
2,
15069,
2211,
12,
7908,
4196,
3457,
13,
290,
262,
5693,
11012,
1628,
7035,
198,
2,
198,
2,
49962,
739... | 3.706612 | 242 |
node1 = Node('A')
node2 = Node('B')
node3 = Node('C')
node4 = Node('D')
node5 = Node('E')
node6 = Node('F')
node7 = Node('G')
edge1 = Edge(5, node1, node2)
edge2 = Edge(9, node1, node5)
edge3 = Edge(4, node2, node5)
edge4 = Edge(12, node2, node3)
edge5 = Edge(7, node2, node4)
edge6 = Edge(3, node3, node4)
edge7 = Edge(1, node3, node6)
edge8 = Edge(9, node4, node7)
edge9 = Edge(6, node5, node3)
edge10 = Edge(4, node5, node6)
edge11 = Edge(2, node6, node7)
# edge12 = Edge(6, node7, node3)
edge12 = Edge(-6, node7, node3)
node1.adjacency_list.append(edge1)
node1.adjacency_list.append(edge2)
node2.adjacency_list.append(edge3)
node2.adjacency_list.append(edge4)
node2.adjacency_list.append(edge5)
node3.adjacency_list.append(edge6)
node3.adjacency_list.append(edge7)
node4.adjacency_list.append(edge8)
node5.adjacency_list.append(edge9)
node5.adjacency_list.append(edge10)
node6.adjacency_list.append(edge11)
node7.adjacency_list.append(edge12)
vertices = (node1, node2, node3, node4, node5, node6, node7)
edges = (edge1, edge2, edge3, edge4, edge5, edge6, edge7, edge8, edge9, edge10, edge11, edge12)
alg = BellmanFordAlgorithm(vertices, edges, node1)
alg.find_shortest_path()
alg.get_shortest_path(node7)
| [
628,
198,
198,
17440,
16,
796,
19081,
10786,
32,
11537,
198,
17440,
17,
796,
19081,
10786,
33,
11537,
198,
17440,
18,
796,
19081,
10786,
34,
11537,
198,
17440,
19,
796,
19081,
10786,
35,
11537,
198,
17440,
20,
796,
19081,
10786,
36,
1... | 2.283302 | 533 |
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2019-2020 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.xilinx import XilinxPlatform, VivadoProgrammer
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk125", 0, Pins("K17"), IOStandard("LVCMOS33")),
# Leds
("user_led", 0, Pins("M14"), IOStandard("LVCMOS33")),
("user_led", 1, Pins("M15"), IOStandard("LVCMOS33")),
("user_led", 2, Pins("G14"), IOStandard("LVCMOS33")),
("user_led", 3, Pins("D18"), IOStandard("LVCMOS33")),
("rgb_led", 0,
Subsignal("r", Pins("Y11")),
Subsignal("g", Pins("T5")),
Subsignal("b", Pins("Y12")),
IOStandard("LVCMOS33")
),
("rgb_led", 1,
Subsignal("r", Pins("V16")),
Subsignal("g", Pins("F17")),
Subsignal("b", Pins("M17")),
IOStandard("LVCMOS33")
),
# Switches
("user_sw", 0, Pins("G15"), IOStandard("LVCMOS33")),
("user_sw", 1, Pins("P15"), IOStandard("LVCMOS33")),
("user_sw", 2, Pins("W13"), IOStandard("LVCMOS33")),
("user_sw", 3, Pins("T16"), IOStandard("LVCMOS33")),
# Buttons
("user_btn", 0, Pins("R18"), IOStandard("LVCMOS33")),
("user_btn", 1, Pins("P16"), IOStandard("LVCMOS33")),
("user_btn", 2, Pins("V16"), IOStandard("LVCMOS33")),
("user_btn", 3, Pins("Y16"), IOStandard("LVCMOS33")),
# Serial
("serial", 0,
Subsignal("tx", Pins("T17")),
Subsignal("rx", Pins("Y17")),
IOStandard("LVCMOS33")
),
("usb_uart", 0,
Subsignal("tx", Pins("H15")),
Subsignal("rx", Pins("J15")),
IOStandard("LVCMOS33")
),
]
_ps7_io = [
# PS7
("ps7_clk", 0, Pins(1)),
("ps7_porb", 0, Pins(1)),
("ps7_srstb", 0, Pins(1)),
("ps7_mio", 0, Pins(54)),
("ps7_ddram", 0,
Subsignal("addr", Pins(15)),
Subsignal("ba", Pins(3)),
Subsignal("cas_n", Pins(1)),
Subsignal("ck_n", Pins(1)),
Subsignal("ck_p", Pins(1)),
Subsignal("cke", Pins(1)),
Subsignal("cs_n", Pins(1)),
Subsignal("dm", Pins(4)),
Subsignal("dq", Pins(32)),
Subsignal("dqs_n", Pins(4)),
Subsignal("dqs_p", Pins(4)),
Subsignal("odt", Pins(1)),
Subsignal("ras_n", Pins(1)),
Subsignal("reset_n", Pins(1)),
Subsignal("we_n", Pins(1)),
Subsignal("vrn", Pins(1)),
Subsignal("vrp", Pins(1)),
),
]
_usb_uart_pmod_io = [
# USB-UART PMOD on JB:
# - https://store.digilentinc.com/pmod-usbuart-usb-to-uart-interface/
("usb_uart", 0,
Subsignal("tx", Pins("pmodb:1")),
Subsignal("rx", Pins("pmodb:2")),
IOStandard("LVCMOS33")
),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors = [
("pmoda", "N15 L14 K16 K14 N16 L15 J16 J14"), # XADC
("pmodb", "T20 U20 V20 W20 Y18 Y19 W18 W19"),
("pmodc", "V15 W15 T11 T10 W14 Y14 T12 U12"),
("pmodd", "T14 T15 P14 R14 U14 U15 V17 V18"),
("pmode", "V12 W16 J15 H15 V13 U17 T17 Y17"),
]
# Platform -----------------------------------------------------------------------------------------
| [
2,
198,
2,
770,
2393,
318,
636,
286,
27395,
55,
12,
16635,
1371,
13,
198,
2,
198,
2,
15069,
357,
66,
8,
13130,
12,
42334,
23347,
429,
17337,
3876,
8344,
1279,
2704,
382,
429,
31,
268,
2633,
12,
34725,
13,
8310,
29,
198,
2,
30628... | 2.037193 | 1,667 |
from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
| [
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
1330,
869,
62,
21812,
198,
6738,
42625,
14208,
13,
9945,
13,
26791,
1330,
6564,
864,
12331,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208... | 3.636364 | 44 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from mock import Mock
from regparser.notice.amdparser import Amendment
from regparser.notice.amendments import fetch, section, subpart
from regparser.test_utils.xml_builder import XMLBuilder
from regparser.tree.xml_parser.preprocessors import preprocess_amdpars
@pytest.fixture
@pytest.mark.usefixtures('content_plugin_installed')
def test_multiple_changes(monkeypatch):
""" A notice can have two modifications to a paragraph. """
amdpar1 = ("2. Designate §§ 106.1 through 106.3 as subpart A under "
"the heading.")
amdpar2 = "3. In § 106.2, revise the introductory text to read:"
with XMLBuilder("ROOT") as ctx:
with ctx.REGTEXT(PART="106", TITLE="12"):
ctx.AMDPAR(amdpar1)
with ctx.REGTEXT(PART="106", TITLE="12"):
ctx.AMDPAR(amdpar2)
with ctx.SECTION():
ctx.SECTNO("§ 106.2")
ctx.SUBJECT(" Definitions ")
ctx.P(" Except as otherwise provided, the following "
"apply. ")
preprocess_amdpars(ctx.xml)
amd1, amd2 = fetch.fetch_amendments(ctx.xml)
changes1, changes2 = dict(amd1['changes']), dict(amd2['changes'])
assert amd1['instruction'] == amdpar1
assert amd1['cfr_part'] == '106'
assert amd2['instruction'] == amdpar2
assert amd2['cfr_part'] == '106'
assert len(changes1['106-2']) == 1
assert len(changes2['106-2']) == 1
@pytest.mark.usefixtures('content_plugin_installed')
@pytest.mark.usefixtures('content_plugin_installed')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
12972,
9288,
198,
6738,
15290,
1330,
44123,
198,
198,
6738,
842,
48610,
13,
42138,
13,
321,
... | 2.419549 | 665 |
import cv
import sys
import math
import curses
import signal
stdscr = curses.initscr()
signal.signal(signal.SIGINT, signal_handler)
width = int(sys.argv[1]) if len(sys.argv) > 1 else 50
# cv.NamedWindow("camera", 1)
capture = cv.CaptureFromCAM(0)
# Initialize colors and palette
palette = [' ', '.', '.', '/', 'c', '(', '@', '#', '8']
curses.start_color()
pair = 1
depth = 6
splitby = (depth - 1) / 1000.0
for R in xrange(depth):
for G in xrange(depth):
for B in xrange(depth):
curses.init_color(pair, int(R/splitby), int(G/splitby), int(B/splitby))
curses.init_pair(pair, pair, 0)
pair = pair + 1
while True:
# Capture the image
img = cv.QueryFrame(capture)
# Resize the image
size = cv.GetSize(img)
height = size[0] * width / size[1]
thumbnail = cv.CreateImage(
(height, width),
img.depth,
img.nChannels
)
cv.Resize(img, thumbnail)
img = thumbnail
# Print the output
for x in xrange(img.height):
for y in xrange(img.width):
b, g, r = img[x, y]
value = 0.1145 * b + g * 0.5866 + r * 0.2989
index = int(math.floor( value / (256.0 / (len(palette)))))
try:
stdscr.move(x,y)
r = int( r / 256.0 * 6)
g = int( g / 256.0 * 6)
b = int( b / 256.0 * 6)
pair = r * depth * depth + g * depth + b + 1
stdscr.attrset(curses.color_pair(pair))
stdscr.addch(palette[index])
except:
pass
stdscr.refresh() | [
11748,
269,
85,
198,
11748,
25064,
198,
11748,
10688,
198,
11748,
43878,
198,
11748,
6737,
198,
198,
19282,
1416,
81,
796,
43878,
13,
259,
896,
6098,
3419,
198,
198,
12683,
282,
13,
12683,
282,
7,
12683,
282,
13,
50,
3528,
12394,
11,
... | 1.951249 | 841 |
import os
configs = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig
}
| [
11748,
28686,
628,
628,
628,
198,
11250,
82,
796,
1391,
198,
220,
220,
220,
705,
31267,
10354,
7712,
16934,
11,
198,
220,
220,
220,
705,
33407,
10354,
23983,
16934,
11,
198,
220,
220,
220,
705,
25493,
10354,
19174,
16934,
198,
92,
198... | 3.166667 | 42 |
from gi.repository import Gtk
import BuilderObject, PillData, PillEdit
| [
6738,
308,
72,
13,
260,
1930,
37765,
1330,
402,
30488,
198,
11748,
35869,
10267,
11,
19770,
6601,
11,
19770,
18378,
198
] | 3.380952 | 21 |
import requests
import typing as t
from datetime import datetime
from .auth import auth, refresh
class StravaAuth(requests.auth.AuthBase):
"""AuthBase class adding the `access_token` Bearer Auth to a request."""
| [
11748,
7007,
198,
11748,
19720,
355,
256,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
764,
18439,
1330,
6284,
11,
14976,
628,
198,
4871,
520,
4108,
64,
30515,
7,
8897,
3558,
13,
18439,
13,
30515,
14881,
2599,
198,
22... | 3.453125 | 64 |
# -*- coding: utf-8 -*-
"""AutoML_AutoSklearn.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ykYX4EDuWeMKS91YVIxHNmBfGUYRclDa
# Auto Sklearn
## Installing
- NOTE: -
WINDOWS
- auto-sklearn relies heavily on the Python module resource. resource is part of Python’s Unix Specific Services and not available on a Windows machine. Therefore, it is not possible to run auto-sklearn on a Windows machine.
MAC OS X
- We currently do not know if auto-sklearn works on OSX. There are at least two issues holding us back from actively supporting OSX
LINUX
- use the below way
"""
!sudo apt-get install build-essential swig
!curl https://raw.githubusercontent.com/automl/auto-sklearn/master/requirements.txt | xargs -n 1 -L 1 pip install
!pip install auto-sklearn
"""## Time to Use"""
import sys,tempfile, urllib, os
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
BASE_DIR = '/tmp'
OUTPUT_FILE = os.path.join(BASE_DIR, 'churn_data.csv')
churn_data=urllib.request.urlretrieve('https://raw.githubusercontent.com/srivatsan88/YouTubeLI/master/dataset/WA_Fn-UseC_-Telco-Customer-Churn.csv', OUTPUT_FILE)
churn_df = pd.read_csv(OUTPUT_FILE)
churn_df.head()
churn_df['Churn'].value_counts()
"""# Data Cleaning
- Ahem, Ahem. Auto Sklearn will not clean the data for you.
- You need to clean it yourself.
- You need to convert string data to numerical values.
- It will not accept string values. Nor will it handle it.
"""
churn_df = churn_df.replace(r'^\s*$', np.nan, regex=True)
churn_df.head()
churn_df.iloc[: ,19] = pd.to_numeric(churn_df.iloc[:, 19], errors='coerce')
churn_df.head()
from sklearn.impute import SimpleImputer
imp_median = SimpleImputer(missing_values=np.nan, strategy='median')
churn_df.iloc[:, 19] = imp_median.fit_transform(churn_df.iloc[:, 19].values.reshape(-1, 1))
from sklearn.compose import make_column_transformer
from sklearn.preprocessing import OrdinalEncoder
categorical_columns = ['gender', 'Partner', 'Dependents','PhoneService','MultipleLines','InternetService',
'OnlineSecurity','OnlineBackup','DeviceProtection',
'TechSupport','StreamingTV','StreamingMovies','Contract','PaperlessBilling','PaymentMethod','Churn']
column_trans = make_column_transformer((OrdinalEncoder(), categorical_columns))
churn_transformed = column_trans.fit_transform(churn_df)
churn_df_trans = churn_df.copy()
churn_df_trans = pd.DataFrame(churn_transformed, columns=categorical_columns).astype(int)
churn_df.update(churn_df_trans)
churn_df.head()
churn_df_y = pd.DataFrame(churn_df['Churn'])
churn_df_x = churn_df.drop(['Churn'], axis=1, inplace=False)
churn_df_y['Churn'] = churn_df_y['Churn'].astype(np.int32)
churn_df_x.drop(['customerID'], axis=1, inplace=True)
for cols in churn_df_x.columns:
churn_df_x[cols] = churn_df_x[cols].astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(churn_df_x, churn_df_y, train_size=0.75, test_size=0.25, stratify=churn_df_y, random_state=31)
"""# Using the autosklearn module"""
import autosklearn.classification
"""Parameters to give
- time_left_for_task = time given for auto ml to do its job
- include estimators = algos that you want to execute
- exclude estimators = algos that you want to exclude
- include_preprocessors = denotes the preprocessing such as PCA. It does not preprocess the data like encoding or as such.
- per_run_time_limit = time limit per algorithm run.
- n_jobs = no of parallel processing threads that are required.
- exclude_preprocessirs = preprocessors that we do not want to take.
"""
clf = autosklearn.classification.AutoSklearnClassifier(time_left_for_this_task=120, per_run_time_limit=30, n_jobs=2,
include_estimators=["random_forest", "sgd", ], exclude_estimators=None, include_preprocessors=["no_preprocessing", ], exclude_preprocessors=None)
clf.fit(X_train, y_train)
# Shows statistics about models trained
print(clf.show_models())
print(clf.sprint_statistics())
clf.cv_results_['params'][np.argmax(clf.cv_results_['mean_test_score'])]
predictions = clf.predict(X_test)
from sklearn.metrics import accuracy_score
print("Accuracy score : %s" %(accuracy_score(y_test, predictions)))
"""# Validating"""
from sklearn.metrics import confusion_matrix, precision_score, recall_score
print(confusion_matrix(y_test, predictions))
import seaborn as sns
import matplotlib.pyplot as plt
sns.set()
sns.heatmap(pd.DataFrame(confusion_matrix(y_test, predictions)), annot=True, annot_kws={"size": 16}, fmt='')
plt.show()
print(precision_score(y_test, predictions))
print(recall_score(y_test, predictions))
"""# Dumping using Pickle"""
import pickle
x = clf.show_models()
# Dumps the ensemble of models
my_model = {"ensemble" : x}
pickle.dump(my_model, open("model.pickle", 'wb'))
!ls *.pickle
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
27722,
5805,
62,
27722,
15739,
35720,
13,
541,
2047,
65,
198,
198,
38062,
4142,
7560,
416,
1623,
4820,
2870,
13,
198,
198,
20556,
2393,
318,
5140,
379,
198,
220,
... | 2.754356 | 1,779 |