content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import os
import vtkAll as vtk
import math
import time
import numpy as np
from ddapp import callbacks
from ddapp import transformUtils
from ddapp import lcmUtils
from ddapp import objectmodel as om
from ddapp.utime import getUtime
from ddapp import robotstate
import drc as lcmdrc
| [
11748,
28686,
198,
11748,
410,
30488,
3237,
355,
410,
30488,
198,
11748,
10688,
198,
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
49427,
1324,
1330,
869,
10146,
198,
6738,
49427,
1324,
1330,
6121,
18274,
4487,
198,
6738... | 3.337209 | 86 |
import SocketServer
from SimpleTCPDispatcher import SimpleTCPDispatcher
from SimpleTCPRequestHandler import SimpleTCPRequestHandler
__author__ = 'umairghani'
class SimpleTCPServer(SocketServer.TCPServer, SimpleTCPDispatcher):
"""
Simple TCP Server
"""
allow_reuse_address = True
def __init__(self, address, request_handler=SimpleTCPRequestHandler,
logging=True, bind_and_activate=True):
"""
:constructor:
:param address:
:param request_handler (optional):
:param bind_and_activate (optional):
"""
self.logging = logging
SimpleTCPDispatcher.__init__(self)
SocketServer.TCPServer.__init__(self, address, request_handler, bind_and_activate)
| [
11748,
47068,
10697,
198,
198,
6738,
17427,
4825,
5760,
271,
8071,
2044,
1330,
17427,
4825,
5760,
271,
8071,
2044,
198,
6738,
17427,
4825,
47,
18453,
25060,
1330,
17427,
4825,
47,
18453,
25060,
198,
198,
834,
9800,
834,
796,
705,
388,
9... | 2.578767 | 292 |
import re
import json
import collections
import traceback
from pathlib import Path
import unicodedata as ucd
from tf.client.make.build import makeSearchClients
from .nena_parser import NenaLexerParser
from .build_tf import NenaTfBuilder
from .build_docs import DocsBuilder
| [
11748,
302,
198,
11748,
33918,
198,
11748,
17268,
198,
11748,
12854,
1891,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
28000,
9043,
1045,
355,
334,
10210,
198,
6738,
48700,
13,
16366,
13,
15883,
13,
11249,
1330,
787,
18243,
2601,
2334... | 3.592105 | 76 |
import tensorflow as tf
import numpy as np
x_inp = tf.placeholder(tf.float32, [5, 5])
w_inp = tf.placeholder(tf.float32, [3, 3])
x = tf.reshape(x_inp, [1, 5, 5, 1])
w = tf.reshape(w_inp, [3, 3, 1, 1])
x_valid = tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='VALID')
x_same = tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
x_valid_half = tf.nn.conv2d(x, w, strides=[1, 2, 2, 1], padding='VALID')
x_same_half = tf.nn.conv2d(x, w, strides=[1, 2, 2, 1], padding='SAME')
x = np.array([[0, 1, 2, 1, 0],
[4, 1, 0, 1, 0],
[2, 0, 1, 1, 1],
[1, 2, 3, 1, 0],
[0, 4, 3, 2, 0]])
w = np.array([[0, 1, 0],
[1, 0, 1],
[2, 1, 0]])
with tf.Session() as sess:
y_valid, y_same, y_valid_half, y_same_half = sess.run(
[x_valid, x_same, x_valid_half, x_same_half], feed_dict={x_inp: x, w_inp: w})
print("Padding=VALID:\n", y_valid[0, :, :, 0])
print("Padding=SAME:\n", y_same[0, :, :, 0])
print("Padding=VALID_half:\n", y_valid_half[0, :, :, 0])
print("Padding=SAME_half:\n", y_same_half[0, :, :, 0])
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
628,
198,
87,
62,
259,
79,
796,
48700,
13,
5372,
13829,
7,
27110,
13,
22468,
2624,
11,
685,
20,
11,
642,
12962,
198,
86,
62,
259,
79,
796,
48700,
13,
5372,
... | 1.819536 | 604 |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numba import jit, prange
@jit(nopython=True)
@jit(nopython=True)
def mas_width1(attn_map):
"""mas with hardcoded width=1"""
# assumes mel x text
opt = np.zeros_like(attn_map)
attn_map = np.log(attn_map)
attn_map[0, 1:] = -np.inf
log_p = np.zeros_like(attn_map)
log_p[0, :] = attn_map[0, :]
prev_ind = np.zeros_like(attn_map, dtype=np.int64)
for i in range(1, attn_map.shape[0]):
for j in range(attn_map.shape[1]): # for each text dim
prev_log = log_p[i-1, j]
prev_j = j
if j-1 >= 0 and log_p[i-1, j-1] >= log_p[i-1, j]:
prev_log = log_p[i-1, j-1]
prev_j = j-1
log_p[i, j] = attn_map[i, j] + prev_log
prev_ind[i, j] = prev_j
# now backtrack
curr_text_idx = attn_map.shape[1]-1
for i in range(attn_map.shape[0]-1, -1, -1):
opt[i, curr_text_idx] = 1
curr_text_idx = prev_ind[i, curr_text_idx]
opt[0, curr_text_idx] = 1
return opt
@jit(nopython=True, parallel=True)
| [
2,
15069,
357,
66,
8,
33448,
11,
15127,
23929,
44680,
6234,
13,
220,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393... | 2.232713 | 752 |
from aioftx.session import FTXClientSession
from .schemas import (
AcceptQuoteRequest,
AcceptQuoteResponse,
CancelQuoteRequest,
CancelQuoteResponse,
CreateQuoteRequest,
CreateQuoteResponse,
GetMyQuotesReponse,
GetMyQuotesRequest,
GetQuotesRequest,
GetQuotesResponse,
Quote,
)
async def get_quotes_for_request(
session: FTXClientSession,
*,
quote_request_id: str,
) -> Quote:
"""
Get the quotes for a quote request from the FTX API
"""
request = GetQuotesRequest(request_id=quote_request_id)
async with session.get(request.url) as resp:
data = await resp.json()
return GetQuotesResponse(**data).data()
async def get_my_quotes(
session: FTXClientSession,
) -> list[Quote]:
"""
Get my quotes from the FTX API
"""
request = GetMyQuotesRequest()
async with session.get(request.url) as resp:
data = await resp.json()
return GetMyQuotesReponse(**data).data()
async def create_quote(
session: FTXClientSession,
*,
quote_request_id: str,
price: float,
) -> Quote:
"""
Create a quote for a quote request from the FTX API
"""
request = CreateQuoteRequest(request_id=quote_request_id, price=price)
async with session.post(request.url, data=request.json()) as resp:
data = await resp.json()
return CreateQuoteResponse(**data).data()
async def cancel_quote(
session: FTXClientSession,
*,
quote_id: str,
) -> Quote:
"""
Cancel a quote from the FTX API
"""
request = CancelQuoteRequest(quote_id=quote_id)
async with session.delete(request.url, data=request.json()) as resp:
data = await resp.json()
return CancelQuoteResponse(**data).data()
async def accept_quote(
session: FTXClientSession,
*,
quote_id: str,
) -> Quote:
"""
Accept a quote from the FTX API
"""
request = AcceptQuoteRequest(quote_id=quote_id)
async with session.post(request.url, data=request.json()) as resp:
data = await resp.json()
return AcceptQuoteResponse(**data).data()
| [
6738,
257,
952,
701,
87,
13,
29891,
1330,
19446,
55,
11792,
36044,
198,
198,
6738,
764,
1416,
4411,
292,
1330,
357,
198,
220,
220,
220,
21699,
25178,
18453,
11,
198,
220,
220,
220,
21699,
25178,
31077,
11,
198,
220,
220,
220,
27910,
... | 2.638854 | 803 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: hapi/rudder/rudder.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from hapi.release import info_pb2 as hapi_dot_release_dot_info__pb2
from hapi.release import release_pb2 as hapi_dot_release_dot_release__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='hapi/rudder/rudder.proto',
package='hapi.services.rudder',
syntax='proto3',
serialized_pb=_b('\n\x18hapi/rudder/rudder.proto\x12\x14hapi.services.rudder\x1a\x17hapi/release/info.proto\x1a\x1ahapi/release/release.proto\"a\n\x06Result\x12\x0c\n\x04info\x18\x01 \x01(\t\x12\x0b\n\x03log\x18\x02 \x03(\t\"<\n\x06Status\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12\r\n\tUNCHANGED\x10\x02\x12\t\n\x05\x45RROR\x10\x03\"\x17\n\x15VersionReleaseRequest\"7\n\x16VersionReleaseResponse\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\"?\n\x15InstallReleaseRequest\x12&\n\x07release\x18\x01 \x01(\x0b\x32\x15.hapi.release.Release\"n\n\x16InstallReleaseResponse\x12&\n\x07release\x18\x01 \x01(\x0b\x32\x15.hapi.release.Release\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.hapi.services.rudder.Result\">\n\x14\x44\x65leteReleaseRequest\x12&\n\x07release\x18\x01 \x01(\x0b\x32\x15.hapi.release.Release\"m\n\x15\x44\x65leteReleaseResponse\x12&\n\x07release\x18\x01 \x01(\x0b\x32\x15.hapi.release.Release\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.hapi.services.rudder.Result\"\xa6\x01\n\x15UpgradeReleaseRequest\x12&\n\x07\x63urrent\x18\x01 \x01(\x0b\x32\x15.hapi.release.Release\x12%\n\x06target\x18\x02 \x01(\x0b\x32\x15.hapi.release.Release\x12\x0f\n\x07Timeout\x18\x03 \x01(\x03\x12\x0c\n\x04Wait\x18\x04 \x01(\x08\x12\x10\n\x08Recreate\x18\x05 \x01(\x08\x12\r\n\x05\x46orce\x18\x06 \x01(\x08\"n\n\x16UpgradeReleaseResponse\x12&\n\x07release\x18\x01 \x01(\x0b\x32\x15.hapi.release.Release\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.hapi.services.rudder.Result\"\xa7\x01\n\x16RollbackReleaseRequest\x12&\n\x07\x63urrent\x18\x01 \x01(\x0b\x32\x15.hapi.release.Release\x12%\n\x06target\x18\x02 \x01(\x0b\x32\x15.hapi.release.Release\x12\x0f\n\x07Timeout\x18\x03 \x01(\x03\x12\x0c\n\x04Wait\x18\x04 \x01(\x08\x12\x10\n\x08Recreate\x18\x05 \x01(\x08\x12\r\n\x05\x46orce\x18\x06 \x01(\x08\"o\n\x17RollbackReleaseResponse\x12&\n\x07release\x18\x01 \x01(\x0b\x32\x15.hapi.release.Release\x12,\n\x06result\x18\x02 \x01(\x0b\x32\x1c.hapi.services.rudder.Result\">\n\x14ReleaseStatusRequest\x12&\n\x07release\x18\x01 \x01(\x0b\x32\x15.hapi.release.Release\"a\n\x15ReleaseStatusResponse\x12&\n\x07release\x18\x01 \x01(\x0b\x32\x15.hapi.release.Release\x12 \n\x04info\x18\x02 \x01(\x0b\x32\x12.hapi.release.Info2\xa6\x05\n\x14ReleaseModuleService\x12\x66\n\x07Version\x12+.hapi.services.rudder.VersionReleaseRequest\x1a,.hapi.services.rudder.VersionReleaseResponse\"\x00\x12m\n\x0eInstallRelease\x12+.hapi.services.rudder.InstallReleaseRequest\x1a,.hapi.services.rudder.InstallReleaseResponse\"\x00\x12j\n\rDeleteRelease\x12*.hapi.services.rudder.DeleteReleaseRequest\x1a+.hapi.services.rudder.DeleteReleaseResponse\"\x00\x12p\n\x0fRollbackRelease\x12,.hapi.services.rudder.RollbackReleaseRequest\x1a-.hapi.services.rudder.RollbackReleaseResponse\"\x00\x12m\n\x0eUpgradeRelease\x12+.hapi.services.rudder.UpgradeReleaseRequest\x1a,.hapi.services.rudder.UpgradeReleaseResponse\"\x00\x12j\n\rReleaseStatus\x12*.hapi.services.rudder.ReleaseStatusRequest\x1a+.hapi.services.rudder.ReleaseStatusResponse\"\x00\x42\x08Z\x06rudderb\x06proto3')
,
dependencies=[hapi_dot_release_dot_info__pb2.DESCRIPTOR,hapi_dot_release_dot_release__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_RESULT_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='hapi.services.rudder.Result.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNCHANGED', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=140,
serialized_end=200,
)
_sym_db.RegisterEnumDescriptor(_RESULT_STATUS)
_RESULT = _descriptor.Descriptor(
name='Result',
full_name='hapi.services.rudder.Result',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='info', full_name='hapi.services.rudder.Result.info', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='log', full_name='hapi.services.rudder.Result.log', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_RESULT_STATUS,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=103,
serialized_end=200,
)
_VERSIONRELEASEREQUEST = _descriptor.Descriptor(
name='VersionReleaseRequest',
full_name='hapi.services.rudder.VersionReleaseRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=202,
serialized_end=225,
)
_VERSIONRELEASERESPONSE = _descriptor.Descriptor(
name='VersionReleaseResponse',
full_name='hapi.services.rudder.VersionReleaseResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='hapi.services.rudder.VersionReleaseResponse.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version', full_name='hapi.services.rudder.VersionReleaseResponse.version', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=227,
serialized_end=282,
)
_INSTALLRELEASEREQUEST = _descriptor.Descriptor(
name='InstallReleaseRequest',
full_name='hapi.services.rudder.InstallReleaseRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='release', full_name='hapi.services.rudder.InstallReleaseRequest.release', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=284,
serialized_end=347,
)
_INSTALLRELEASERESPONSE = _descriptor.Descriptor(
name='InstallReleaseResponse',
full_name='hapi.services.rudder.InstallReleaseResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='release', full_name='hapi.services.rudder.InstallReleaseResponse.release', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='result', full_name='hapi.services.rudder.InstallReleaseResponse.result', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=349,
serialized_end=459,
)
_DELETERELEASEREQUEST = _descriptor.Descriptor(
name='DeleteReleaseRequest',
full_name='hapi.services.rudder.DeleteReleaseRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='release', full_name='hapi.services.rudder.DeleteReleaseRequest.release', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=461,
serialized_end=523,
)
_DELETERELEASERESPONSE = _descriptor.Descriptor(
name='DeleteReleaseResponse',
full_name='hapi.services.rudder.DeleteReleaseResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='release', full_name='hapi.services.rudder.DeleteReleaseResponse.release', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='result', full_name='hapi.services.rudder.DeleteReleaseResponse.result', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=525,
serialized_end=634,
)
_UPGRADERELEASEREQUEST = _descriptor.Descriptor(
name='UpgradeReleaseRequest',
full_name='hapi.services.rudder.UpgradeReleaseRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='current', full_name='hapi.services.rudder.UpgradeReleaseRequest.current', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target', full_name='hapi.services.rudder.UpgradeReleaseRequest.target', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Timeout', full_name='hapi.services.rudder.UpgradeReleaseRequest.Timeout', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Wait', full_name='hapi.services.rudder.UpgradeReleaseRequest.Wait', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Recreate', full_name='hapi.services.rudder.UpgradeReleaseRequest.Recreate', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Force', full_name='hapi.services.rudder.UpgradeReleaseRequest.Force', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=637,
serialized_end=803,
)
_UPGRADERELEASERESPONSE = _descriptor.Descriptor(
name='UpgradeReleaseResponse',
full_name='hapi.services.rudder.UpgradeReleaseResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='release', full_name='hapi.services.rudder.UpgradeReleaseResponse.release', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='result', full_name='hapi.services.rudder.UpgradeReleaseResponse.result', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=805,
serialized_end=915,
)
_ROLLBACKRELEASEREQUEST = _descriptor.Descriptor(
name='RollbackReleaseRequest',
full_name='hapi.services.rudder.RollbackReleaseRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='current', full_name='hapi.services.rudder.RollbackReleaseRequest.current', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target', full_name='hapi.services.rudder.RollbackReleaseRequest.target', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Timeout', full_name='hapi.services.rudder.RollbackReleaseRequest.Timeout', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Wait', full_name='hapi.services.rudder.RollbackReleaseRequest.Wait', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Recreate', full_name='hapi.services.rudder.RollbackReleaseRequest.Recreate', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Force', full_name='hapi.services.rudder.RollbackReleaseRequest.Force', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=918,
serialized_end=1085,
)
_ROLLBACKRELEASERESPONSE = _descriptor.Descriptor(
name='RollbackReleaseResponse',
full_name='hapi.services.rudder.RollbackReleaseResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='release', full_name='hapi.services.rudder.RollbackReleaseResponse.release', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='result', full_name='hapi.services.rudder.RollbackReleaseResponse.result', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1087,
serialized_end=1198,
)
_RELEASESTATUSREQUEST = _descriptor.Descriptor(
name='ReleaseStatusRequest',
full_name='hapi.services.rudder.ReleaseStatusRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='release', full_name='hapi.services.rudder.ReleaseStatusRequest.release', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1200,
serialized_end=1262,
)
_RELEASESTATUSRESPONSE = _descriptor.Descriptor(
name='ReleaseStatusResponse',
full_name='hapi.services.rudder.ReleaseStatusResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='release', full_name='hapi.services.rudder.ReleaseStatusResponse.release', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='info', full_name='hapi.services.rudder.ReleaseStatusResponse.info', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1264,
serialized_end=1361,
)
_RESULT_STATUS.containing_type = _RESULT
_INSTALLRELEASEREQUEST.fields_by_name['release'].message_type = hapi_dot_release_dot_release__pb2._RELEASE
_INSTALLRELEASERESPONSE.fields_by_name['release'].message_type = hapi_dot_release_dot_release__pb2._RELEASE
_INSTALLRELEASERESPONSE.fields_by_name['result'].message_type = _RESULT
_DELETERELEASEREQUEST.fields_by_name['release'].message_type = hapi_dot_release_dot_release__pb2._RELEASE
_DELETERELEASERESPONSE.fields_by_name['release'].message_type = hapi_dot_release_dot_release__pb2._RELEASE
_DELETERELEASERESPONSE.fields_by_name['result'].message_type = _RESULT
_UPGRADERELEASEREQUEST.fields_by_name['current'].message_type = hapi_dot_release_dot_release__pb2._RELEASE
_UPGRADERELEASEREQUEST.fields_by_name['target'].message_type = hapi_dot_release_dot_release__pb2._RELEASE
_UPGRADERELEASERESPONSE.fields_by_name['release'].message_type = hapi_dot_release_dot_release__pb2._RELEASE
_UPGRADERELEASERESPONSE.fields_by_name['result'].message_type = _RESULT
_ROLLBACKRELEASEREQUEST.fields_by_name['current'].message_type = hapi_dot_release_dot_release__pb2._RELEASE
_ROLLBACKRELEASEREQUEST.fields_by_name['target'].message_type = hapi_dot_release_dot_release__pb2._RELEASE
_ROLLBACKRELEASERESPONSE.fields_by_name['release'].message_type = hapi_dot_release_dot_release__pb2._RELEASE
_ROLLBACKRELEASERESPONSE.fields_by_name['result'].message_type = _RESULT
_RELEASESTATUSREQUEST.fields_by_name['release'].message_type = hapi_dot_release_dot_release__pb2._RELEASE
_RELEASESTATUSRESPONSE.fields_by_name['release'].message_type = hapi_dot_release_dot_release__pb2._RELEASE
_RELEASESTATUSRESPONSE.fields_by_name['info'].message_type = hapi_dot_release_dot_info__pb2._INFO
DESCRIPTOR.message_types_by_name['Result'] = _RESULT
DESCRIPTOR.message_types_by_name['VersionReleaseRequest'] = _VERSIONRELEASEREQUEST
DESCRIPTOR.message_types_by_name['VersionReleaseResponse'] = _VERSIONRELEASERESPONSE
DESCRIPTOR.message_types_by_name['InstallReleaseRequest'] = _INSTALLRELEASEREQUEST
DESCRIPTOR.message_types_by_name['InstallReleaseResponse'] = _INSTALLRELEASERESPONSE
DESCRIPTOR.message_types_by_name['DeleteReleaseRequest'] = _DELETERELEASEREQUEST
DESCRIPTOR.message_types_by_name['DeleteReleaseResponse'] = _DELETERELEASERESPONSE
DESCRIPTOR.message_types_by_name['UpgradeReleaseRequest'] = _UPGRADERELEASEREQUEST
DESCRIPTOR.message_types_by_name['UpgradeReleaseResponse'] = _UPGRADERELEASERESPONSE
DESCRIPTOR.message_types_by_name['RollbackReleaseRequest'] = _ROLLBACKRELEASEREQUEST
DESCRIPTOR.message_types_by_name['RollbackReleaseResponse'] = _ROLLBACKRELEASERESPONSE
DESCRIPTOR.message_types_by_name['ReleaseStatusRequest'] = _RELEASESTATUSREQUEST
DESCRIPTOR.message_types_by_name['ReleaseStatusResponse'] = _RELEASESTATUSRESPONSE
Result = _reflection.GeneratedProtocolMessageType('Result', (_message.Message,), dict(
DESCRIPTOR = _RESULT,
__module__ = 'hapi.rudder.rudder_pb2'
# @@protoc_insertion_point(class_scope:hapi.services.rudder.Result)
))
_sym_db.RegisterMessage(Result)
VersionReleaseRequest = _reflection.GeneratedProtocolMessageType('VersionReleaseRequest', (_message.Message,), dict(
DESCRIPTOR = _VERSIONRELEASEREQUEST,
__module__ = 'hapi.rudder.rudder_pb2'
# @@protoc_insertion_point(class_scope:hapi.services.rudder.VersionReleaseRequest)
))
_sym_db.RegisterMessage(VersionReleaseRequest)
VersionReleaseResponse = _reflection.GeneratedProtocolMessageType('VersionReleaseResponse', (_message.Message,), dict(
DESCRIPTOR = _VERSIONRELEASERESPONSE,
__module__ = 'hapi.rudder.rudder_pb2'
# @@protoc_insertion_point(class_scope:hapi.services.rudder.VersionReleaseResponse)
))
_sym_db.RegisterMessage(VersionReleaseResponse)
InstallReleaseRequest = _reflection.GeneratedProtocolMessageType('InstallReleaseRequest', (_message.Message,), dict(
DESCRIPTOR = _INSTALLRELEASEREQUEST,
__module__ = 'hapi.rudder.rudder_pb2'
# @@protoc_insertion_point(class_scope:hapi.services.rudder.InstallReleaseRequest)
))
_sym_db.RegisterMessage(InstallReleaseRequest)
InstallReleaseResponse = _reflection.GeneratedProtocolMessageType('InstallReleaseResponse', (_message.Message,), dict(
DESCRIPTOR = _INSTALLRELEASERESPONSE,
__module__ = 'hapi.rudder.rudder_pb2'
# @@protoc_insertion_point(class_scope:hapi.services.rudder.InstallReleaseResponse)
))
_sym_db.RegisterMessage(InstallReleaseResponse)
DeleteReleaseRequest = _reflection.GeneratedProtocolMessageType('DeleteReleaseRequest', (_message.Message,), dict(
DESCRIPTOR = _DELETERELEASEREQUEST,
__module__ = 'hapi.rudder.rudder_pb2'
# @@protoc_insertion_point(class_scope:hapi.services.rudder.DeleteReleaseRequest)
))
_sym_db.RegisterMessage(DeleteReleaseRequest)
DeleteReleaseResponse = _reflection.GeneratedProtocolMessageType('DeleteReleaseResponse', (_message.Message,), dict(
DESCRIPTOR = _DELETERELEASERESPONSE,
__module__ = 'hapi.rudder.rudder_pb2'
# @@protoc_insertion_point(class_scope:hapi.services.rudder.DeleteReleaseResponse)
))
_sym_db.RegisterMessage(DeleteReleaseResponse)
UpgradeReleaseRequest = _reflection.GeneratedProtocolMessageType('UpgradeReleaseRequest', (_message.Message,), dict(
DESCRIPTOR = _UPGRADERELEASEREQUEST,
__module__ = 'hapi.rudder.rudder_pb2'
# @@protoc_insertion_point(class_scope:hapi.services.rudder.UpgradeReleaseRequest)
))
_sym_db.RegisterMessage(UpgradeReleaseRequest)
UpgradeReleaseResponse = _reflection.GeneratedProtocolMessageType('UpgradeReleaseResponse', (_message.Message,), dict(
DESCRIPTOR = _UPGRADERELEASERESPONSE,
__module__ = 'hapi.rudder.rudder_pb2'
# @@protoc_insertion_point(class_scope:hapi.services.rudder.UpgradeReleaseResponse)
))
_sym_db.RegisterMessage(UpgradeReleaseResponse)
RollbackReleaseRequest = _reflection.GeneratedProtocolMessageType('RollbackReleaseRequest', (_message.Message,), dict(
DESCRIPTOR = _ROLLBACKRELEASEREQUEST,
__module__ = 'hapi.rudder.rudder_pb2'
# @@protoc_insertion_point(class_scope:hapi.services.rudder.RollbackReleaseRequest)
))
_sym_db.RegisterMessage(RollbackReleaseRequest)
RollbackReleaseResponse = _reflection.GeneratedProtocolMessageType('RollbackReleaseResponse', (_message.Message,), dict(
DESCRIPTOR = _ROLLBACKRELEASERESPONSE,
__module__ = 'hapi.rudder.rudder_pb2'
# @@protoc_insertion_point(class_scope:hapi.services.rudder.RollbackReleaseResponse)
))
_sym_db.RegisterMessage(RollbackReleaseResponse)
ReleaseStatusRequest = _reflection.GeneratedProtocolMessageType('ReleaseStatusRequest', (_message.Message,), dict(
DESCRIPTOR = _RELEASESTATUSREQUEST,
__module__ = 'hapi.rudder.rudder_pb2'
# @@protoc_insertion_point(class_scope:hapi.services.rudder.ReleaseStatusRequest)
))
_sym_db.RegisterMessage(ReleaseStatusRequest)
ReleaseStatusResponse = _reflection.GeneratedProtocolMessageType('ReleaseStatusResponse', (_message.Message,), dict(
DESCRIPTOR = _RELEASESTATUSRESPONSE,
__module__ = 'hapi.rudder.rudder_pb2'
# @@protoc_insertion_point(class_scope:hapi.services.rudder.ReleaseStatusResponse)
))
_sym_db.RegisterMessage(ReleaseStatusResponse)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z\006rudder'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
class BetaReleaseModuleServiceServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
def InstallRelease(self, request, context):
"""InstallRelease requests installation of a chart as a new release.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def DeleteRelease(self, request, context):
"""DeleteRelease requests deletion of a named release.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def RollbackRelease(self, request, context):
"""RollbackRelease rolls back a release to a previous version.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def UpgradeRelease(self, request, context):
"""UpgradeRelease updates release content.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def ReleaseStatus(self, request, context):
"""ReleaseStatus retrieves release status.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaReleaseModuleServiceStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
Version.future = None
def InstallRelease(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""InstallRelease requests installation of a chart as a new release.
"""
raise NotImplementedError()
InstallRelease.future = None
def DeleteRelease(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""DeleteRelease requests deletion of a named release.
"""
raise NotImplementedError()
DeleteRelease.future = None
def RollbackRelease(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""RollbackRelease rolls back a release to a previous version.
"""
raise NotImplementedError()
RollbackRelease.future = None
def UpgradeRelease(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""UpgradeRelease updates release content.
"""
raise NotImplementedError()
UpgradeRelease.future = None
def ReleaseStatus(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""ReleaseStatus retrieves release status.
"""
raise NotImplementedError()
ReleaseStatus.future = None
def beta_create_ReleaseModuleService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('hapi.services.rudder.ReleaseModuleService', 'DeleteRelease'): DeleteReleaseRequest.FromString,
('hapi.services.rudder.ReleaseModuleService', 'InstallRelease'): InstallReleaseRequest.FromString,
('hapi.services.rudder.ReleaseModuleService', 'ReleaseStatus'): ReleaseStatusRequest.FromString,
('hapi.services.rudder.ReleaseModuleService', 'RollbackRelease'): RollbackReleaseRequest.FromString,
('hapi.services.rudder.ReleaseModuleService', 'UpgradeRelease'): UpgradeReleaseRequest.FromString,
('hapi.services.rudder.ReleaseModuleService', 'Version'): VersionReleaseRequest.FromString,
}
response_serializers = {
('hapi.services.rudder.ReleaseModuleService', 'DeleteRelease'): DeleteReleaseResponse.SerializeToString,
('hapi.services.rudder.ReleaseModuleService', 'InstallRelease'): InstallReleaseResponse.SerializeToString,
('hapi.services.rudder.ReleaseModuleService', 'ReleaseStatus'): ReleaseStatusResponse.SerializeToString,
('hapi.services.rudder.ReleaseModuleService', 'RollbackRelease'): RollbackReleaseResponse.SerializeToString,
('hapi.services.rudder.ReleaseModuleService', 'UpgradeRelease'): UpgradeReleaseResponse.SerializeToString,
('hapi.services.rudder.ReleaseModuleService', 'Version'): VersionReleaseResponse.SerializeToString,
}
method_implementations = {
('hapi.services.rudder.ReleaseModuleService', 'DeleteRelease'): face_utilities.unary_unary_inline(servicer.DeleteRelease),
('hapi.services.rudder.ReleaseModuleService', 'InstallRelease'): face_utilities.unary_unary_inline(servicer.InstallRelease),
('hapi.services.rudder.ReleaseModuleService', 'ReleaseStatus'): face_utilities.unary_unary_inline(servicer.ReleaseStatus),
('hapi.services.rudder.ReleaseModuleService', 'RollbackRelease'): face_utilities.unary_unary_inline(servicer.RollbackRelease),
('hapi.services.rudder.ReleaseModuleService', 'UpgradeRelease'): face_utilities.unary_unary_inline(servicer.UpgradeRelease),
('hapi.services.rudder.ReleaseModuleService', 'Version'): face_utilities.unary_unary_inline(servicer.Version),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_ReleaseModuleService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('hapi.services.rudder.ReleaseModuleService', 'DeleteRelease'): DeleteReleaseRequest.SerializeToString,
('hapi.services.rudder.ReleaseModuleService', 'InstallRelease'): InstallReleaseRequest.SerializeToString,
('hapi.services.rudder.ReleaseModuleService', 'ReleaseStatus'): ReleaseStatusRequest.SerializeToString,
('hapi.services.rudder.ReleaseModuleService', 'RollbackRelease'): RollbackReleaseRequest.SerializeToString,
('hapi.services.rudder.ReleaseModuleService', 'UpgradeRelease'): UpgradeReleaseRequest.SerializeToString,
('hapi.services.rudder.ReleaseModuleService', 'Version'): VersionReleaseRequest.SerializeToString,
}
response_deserializers = {
('hapi.services.rudder.ReleaseModuleService', 'DeleteRelease'): DeleteReleaseResponse.FromString,
('hapi.services.rudder.ReleaseModuleService', 'InstallRelease'): InstallReleaseResponse.FromString,
('hapi.services.rudder.ReleaseModuleService', 'ReleaseStatus'): ReleaseStatusResponse.FromString,
('hapi.services.rudder.ReleaseModuleService', 'RollbackRelease'): RollbackReleaseResponse.FromString,
('hapi.services.rudder.ReleaseModuleService', 'UpgradeRelease'): UpgradeReleaseResponse.FromString,
('hapi.services.rudder.ReleaseModuleService', 'Version'): VersionReleaseResponse.FromString,
}
cardinalities = {
'DeleteRelease': cardinality.Cardinality.UNARY_UNARY,
'InstallRelease': cardinality.Cardinality.UNARY_UNARY,
'ReleaseStatus': cardinality.Cardinality.UNARY_UNARY,
'RollbackRelease': cardinality.Cardinality.UNARY_UNARY,
'UpgradeRelease': cardinality.Cardinality.UNARY_UNARY,
'Version': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'hapi.services.rudder.ReleaseModuleService', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| [
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
387,
14415,
14,
81,
41686,
14,
81,
41686,
13,
1676,
1462,
198,
198,
11748,
25064,
198,
62,
65,
28,
17597,
13,
9641,
62,
10951,
58,
15,
... | 2.643583 | 13,776 |
from twisted.spread import pb
from twisted.internet import reactor
from twisted.python import util
factory = pb.PBClientFactory()
reactor.connectTCP("localhost", 8789, factory)
d = factory.getRootObject()
a = []
d.addCallback(lambda object: object.callRemote("nextQuote"))
d.addCallback(lambda echo: a.append(echo))
d.addErrback(lambda reason: 'error: '+str(reason.value))
d.addCallback(util.println)
reactor.run() | [
6738,
19074,
13,
43639,
1330,
279,
65,
198,
6738,
19074,
13,
37675,
1330,
21905,
198,
6738,
19074,
13,
29412,
1330,
7736,
198,
198,
69,
9548,
796,
279,
65,
13,
49079,
11792,
22810,
3419,
198,
260,
11218,
13,
8443,
4825,
47,
7203,
3675... | 3.167939 | 131 |
from subprocess import check_output
| [
6738,
850,
14681,
1330,
2198,
62,
22915,
628,
198
] | 4.222222 | 9 |
# -*- coding: utf-8 -*-
###################################################################
# Object detection - YOLOv5_6.0 - OpenCV dnn
# From : https://github.com/samsu2018/yolov5_6.0_opencvdnn_python
# Modify : Sam Su (1, 11, 2022)
##################################################################
import cv2
import argparse
import numpy as np
import time
# =============================================================================
# The following main functions are used for standalong testing
# =============================================================================
if __name__ == "__main__":
imgpath = 'bus.jpg'
net = '../../yolov5_6.0_opencvdnn_python/yolov5s'
confThreshold = '0.5'
nmsThreshold = '0.5'
inpWidth = 640
inpHeight = 640
tStart = time.time()
dets, frame = get_obj(imgpath, confThreshold, nmsThreshold, net, inpWidth, inpHeight)
print(dets)
cv2.imwrite('output.jpg', frame)
print('Story the result to output.jpg')
print('Spend time:{}'.format(time.time()-tStart))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
29113,
29113,
21017,
198,
2,
9515,
13326,
532,
575,
3535,
46,
85,
20,
62,
21,
13,
15,
532,
4946,
33538,
288,
20471,
198,
2,
3574,
1058,
3740,
1378,
12567,
13,
785,
1... | 3.163142 | 331 |
import os
import random
from PIL import Image
from argparse import ArgumentParser
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import torchvision.utils as vutils
from model.networks import Generator
from utils.tools import get_config, is_image_file, get_model_list
from data.dataset import gen_random_mask, mask_img, normalize
parser = ArgumentParser()
parser.add_argument('--config', type=str, default='configs/config.yaml',
help="training configuration")
parser.add_argument('--seed', type=int, help='manual seed')
parser.add_argument('--image', type=str)
parser.add_argument('--edge', type=str, default='')
parser.add_argument('--mask', type=str, default='')
parser.add_argument('--output', type=str, default='output.png')
parser.add_argument('--flow', type=str, default='')
parser.add_argument('--checkpoint_path', type=str, default='')
parser.add_argument('--iter', type=int, default=0)
if __name__ == '__main__':
main()
| [
11748,
28686,
198,
11748,
4738,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
1891,
2412,
13,
66,
463,
20471,
355... | 3.065672 | 335 |
from cassandra.cluster import Cluster
"""
Function to create a connection object
"""
| [
6738,
30606,
15918,
13,
565,
5819,
1330,
38279,
198,
198,
37811,
198,
220,
15553,
284,
2251,
257,
4637,
2134,
198,
37811,
628
] | 4.045455 | 22 |
""" This is a script to generate prime numbers from a range of numbers.
Or by checking to see if the input number is prime.
"""
#Calculate the prime numbers between two numbers
#Allow the user to enter in a range of numbers
#Secondly be able to have the user enter a number to check to see if it is prime
#thirdly have the user able to choose at the start which test they wish to be running.
#be able to restart the script or quit based on user input
#Calculating the prime numbers between two numbers
user_input1 = int(raw_input("Please enter value here: "))
user_input2 = int(raw_input("Please enter value here: "))
user_input3 = int(raw_input("> "))
user_input_intro= int(raw_input("> "))
print "What would you like to test."
print "1: Test a number to see if it is prime?"
print "2: Calculate the prime numbers between two numbers?"
choice = user_input_intro
if choice == 1:
return isprime
elif choice == 2:
return prime_r
else:
print "Sorry Please enter in a number between 1 and 2."
| [
37811,
770,
318,
257,
4226,
284,
7716,
6994,
3146,
422,
257,
2837,
286,
3146,
13,
198,
5574,
416,
10627,
284,
766,
611,
262,
5128,
1271,
318,
6994,
13,
198,
37811,
198,
198,
2,
9771,
3129,
378,
262,
6994,
3146,
1022,
734,
3146,
198,... | 3.462069 | 290 |
h, m = map(int, input().split(':'))
t = h*60+m
for i in range(120):
if (t >= 7*60 and t < 10*60) or (t >= 15*60 and t < 19*60):
t += 2
else:
t += 1
h = t//60%24
m = t%60
# print(f'{h:02d}:{m:02d}')
print('%02d:%02d' % (h, m)) | [
71,
11,
285,
796,
3975,
7,
600,
11,
5128,
22446,
35312,
7,
10354,
6,
4008,
201,
198,
83,
796,
289,
9,
1899,
10,
76,
201,
198,
1640,
1312,
287,
2837,
7,
10232,
2599,
201,
198,
197,
361,
357,
83,
18189,
767,
9,
1899,
290,
256,
1... | 1.737589 | 141 |
#!/usr/bin/env python
# This code is part of qiskit-runtime.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The worker in charge of running the Qiskit Runtime programs."""
import os
import sys
from rq import Connection, Worker
# Preload libraries
# pylint: disable=unused-import
import qiskit
import qiskit_nature
# pylint: enable=unused-import
# Look for modules in the directory from which the worker is run
sys.path.insert(0, os.getcwd())
# Provide queue names to listen to as arguments to this script,
# similar to rq worker
with Connection():
qs = sys.argv[1:] or ["default"]
w = Worker(qs)
w.work()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
201,
198,
2,
770,
2438,
318,
636,
286,
10662,
1984,
270,
12,
43282,
13,
201,
198,
2,
201,
198,
2,
357,
34,
8,
15069,
19764,
33448,
13,
201,
198,
2,
201,
198,
2,
770,
2438,
... | 3.084058 | 345 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of the Wapiti project (http://wapiti.sourceforge.net)
# Copyright (C) 2008-2018 Nicolas Surribas
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from urllib.parse import urlparse, quote
import posixpath
from copy import deepcopy
if __name__ == "__main__":
res1 = Request(
"http://httpbin.org/post?var1=a&var2=b",
post_params=[['post1', 'c'], ['post2', 'd']]
)
res2 = Request(
"http://httpbin.org/post?var1=a&var2=z",
post_params=[['post1', 'c'], ['post2', 'd']]
)
res3 = Request(
"http://httpbin.org/post?var1=a&var2=b",
post_params=[['post1', 'c'], ['post2', 'z']]
)
res4 = Request(
"http://httpbin.org/post?var1=a&var2=b",
post_params=[['post1', 'c'], ['post2', 'd']]
)
res5 = Request(
"http://httpbin.org/post?var1=z&var2=b",
post_params=[['post1', 'c'], ['post2', 'd']]
)
res6 = Request(
"http://httpbin.org/post?var3=z&var2=b",
post_params=[['post1', 'c'], ['post2', 'd']]
)
res7 = Request(
"http://httpbin.org/post?var1=z&var2=b&var4=e",
post_params=[['post1', 'c'], ['post2', 'd']]
)
res8 = Request(
"http://httpbin.org/post?var2=d&var1=z",
post_params=[['post1', 'c'], ['post2', 'd']]
)
res10 = Request(
"http://httpbin.org/post?qs0",
post_params=[['post1', 'c'], ['post2', 'd']]
)
res11 = Request(
"http://httpbin.org/post?qs1",
post_params=[['post1', 'c'], ['post2', 'd']]
)
res12 = Request(
"http://httpbin.org/post?qs1",
post_params=[['post1', 'c'], ['post2', 'd']],
file_params=[['file1', ['fname1', 'content']], ['file2', ['fname2', 'content']]]
)
res13 = Request("https://www.youtube.com/user/OneMinuteSilenceBand/videos")
res14 = Request("https://www.youtube.com/user/OneMinuteSilenceBand/")
res15 = Request("https://duckduckgo.com/")
res16 = Request("https://duckduckgo.com/", post_params=[['q', 'Kung Fury']])
res17 = Request("http://example.com:8080/dir/?x=3")
res18 = Request(
"http://httpbin.org/get?a=1",
get_params=[['get1', 'c'], ['get2', 'd']]
)
assert res1 < res2
assert res2 > res3
assert res1 < res3
assert res1 == res4
assert hash(res1) == hash(res4)
res4.link_depth = 5
assert hash(res1) == hash(res4)
assert res1 != res2
assert res2 >= res1
assert res1 <= res3
assert res13.file_name == "videos"
assert res10.path == "http://httpbin.org/post"
assert res10.file_name == "post"
assert res10.url == "http://httpbin.org/post?qs0"
assert res13.parent_dir == res14.url
assert res15.is_root
assert res15.parent_dir == res15.url
assert res13.dir_name == res14.url
assert res14.dir_name == res14.url
assert res15.dir_name == res15.url
assert res15 != res16
query_list = [res15]
assert res16 not in query_list
assert res17.dir_name == "http://example.com:8080/dir/"
assert res18.url == "http://httpbin.org/get?get1=c&get2=d"
assert res17.hostname == "example.com:8080"
assert res1.encoded_get_keys == res8.encoded_get_keys
assert res17.encoded_get_keys == "x"
assert res16.encoded_get_keys == ""
assert len(res12) == 5
assert res12.encoded_get_keys == "qs1"
assert res5.hash_params == res8.hash_params
assert res7.hash_params != res8.hash_params
print("Tests were successful, now launching representations")
print("=== Basic representation follows ===")
print(res1)
print("=== cURL representation follows ===")
print(res1.curl_repr)
print("=== HTTP representation follows ===")
print(res1.http_repr())
print("=== POST parameters as an array ===")
print(res1.post_params)
print("=== POST keys encoded as string ===")
print(res1.encoded_post_keys)
print("=== Upload HTTP representation ===")
print(res12.http_repr())
print("=== Upload basic representation ===")
print(res12)
print("=== Upload cURL representation ===")
print(res12.curl_repr)
print("=== HTTP GET keys as a tuple ===")
print(res1.get_keys)
print("=== HTTP POST keys as a tuple ===")
print(res1.post_keys)
print("=== HTTP files keys as a tuple ===")
print(res12.file_keys)
print('')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
770,
2393,
318,
636,
286,
262,
370,
499,
8846,
1628,
357,
4023,
1378,
86,
499,
8846,
13,
10459,
30293,
13,
3... | 2.41154 | 2,097 |
from learn import *
from server import *
| [
6738,
2193,
1330,
1635,
201,
198,
6738,
4382,
1330,
1635,
201,
198,
220,
220,
220,
220,
201,
198
] | 2.722222 | 18 |
from django import forms
| [
198,
6738,
42625,
14208,
1330,
5107,
628,
628
] | 3.625 | 8 |
#!/usr/bin/python3
import cv2
import datetime
import sys
import gflags
import glog
FLAGS = gflags.FLAGS
if __name__ == '__main__':
argv = FLAGS(sys.argv)
sys.exit(main(argv))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
269,
85,
17,
198,
11748,
4818,
8079,
198,
11748,
25064,
198,
198,
11748,
308,
33152,
198,
11748,
1278,
519,
198,
198,
38948,
50,
796,
308,
33152,
13,
38948,
50,
628,
198,
19... | 2.277108 | 83 |
from django.shortcuts import render
from django.contrib.auth.models import User
from django.http import HttpResponse
from .models import RestaurantReview, Restaurant
from random import randint
# Create your views here.
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
198,
6738,
764,
27530,
1330,
26078,
14832,
11,
... | 3.894737 | 57 |
# coding=utf-8
from typing import Union
| [
2,
19617,
28,
40477,
12,
23,
198,
198,
6738,
19720,
1330,
4479,
628
] | 3.230769 | 13 |
from __future__ import absolute_import
from .core import (ZmqSubscriber, ZmqPublisher)
from .core import load_config | [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
764,
7295,
1330,
357,
57,
76,
80,
7004,
1416,
24735,
11,
1168,
76,
80,
46471,
8,
198,
6738,
764,
7295,
1330,
3440,
62,
11250
] | 3.342857 | 35 |
import subprocess
import textwrap
import pytest
import signal
import socket
import time
import os
PORT = 9000
HOST = 'localhost'
KEY = "testtesttest"
@pytest.fixture(scope="session")
#def test_ancillary_on_s3(minio, tmp_path):
# f = tmp_path / "test.yaml"
# g = tmp_path / "clip.geojson"
# f.write_text(textwrap.dedent("""
# clipshpfn: {g}
# models:
# - name: NoOp
# output: nbr.tif
# inputs:
# - filename: s3://test/s2be.tif
# """))
# write_gdalconfig_for_minio(f)
# subprocess.check_call(['./nrtpredict.py', '-c', f, 's3://test/S2A_OPER_MSI_ARD_TL_VGS1_20210205T055002_A029372_T50HMK_N02.09'])
# #assert os.path.exists(g)
| [
11748,
850,
14681,
198,
11748,
2420,
37150,
198,
11748,
12972,
9288,
198,
11748,
6737,
198,
11748,
17802,
198,
11748,
640,
198,
11748,
28686,
198,
198,
15490,
796,
50138,
198,
39,
10892,
796,
705,
36750,
6,
198,
20373,
796,
366,
9288,
9... | 2.088685 | 327 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
pretrained_model_path = 'pretrained/resnet_gray_weights.pth.tar'
| [
11748,
28034,
201,
198,
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
201,
198,
11748,
28034,
10178,
13,
27530,
355,
4981,
201,
198,
201,
198,
5310,
13363,
62,
19849,
62,
6978,
796,
705... | 2.731343 | 67 |
"""Ellipsoid suspended in shear flow (2 hours).
An example to illustrate 3d pysph rigid_body framework
"""
from __future__ import print_function
import numpy as np
from scipy.integrate import odeint
from pysph.base.nnps import DomainManager
from pysph.base.utils import (get_particle_array_wcsph,
get_particle_array_rigid_body)
from pysph.solver.utils import load, remove_irrelevant_files
# PySPH base and carray imports
from pysph.base.kernels import CubicSpline
from pysph.solver.solver import Solver
from pysph.sph.integrator import EPECIntegrator
from pysph.sph.integrator_step import WCSPHStep
from pysph.sph.equation import Group
from pysph.sph.basic_equations import (XSPHCorrection, ContinuityEquation)
from pysph.sph.wc.basic import TaitEOSHGCorrection, MomentumEquation, TaitEOS
from pysph.sph.wc.viscosity import LaminarViscosity
from pysph.solver.application import Application
from pysph.sph.rigid_body import (NumberDensity, BodyForce, RigidBodyMoments,
ViscosityRigidBody, PressureRigidBody,
RigidBodyMotion, RK2StepRigidBody)
def jeffery_ode(phi, t, ar, G):
"""Jeffery's Equation for planar rotation of a rigid ellipsoid."""
lbd = (ar**2-1.0)/(ar**2+1.0)
return 0.5*G*(1.0+lbd*np.cos(2.0*phi))
class RigidFluidCoupling(Application):
"""Example of a rigid ellipsoid rotating in a shear flow."""
def initialize(self):
"""Set up of general variables."""
self.scale = 1000
self.L = 0.0012
self.dx = 0.000025
self.hdx = 1.2
self.rho = 1000*self.scale
self.alpha = 0.0
self.nu = 0.1/self.rho
self.co = 0.010
def create_particles(self):
"""Create particle arrays for fluis, ellipsiod and walls."""
# General box
_x = np.arange(-self.L/2+self.dx/2, self.L/2+self.dx/2, self.dx)
_y = np.arange(-self.L/2+self.dx/2, self.L/2+self.dx/2, self.dx)
_z = np.arange(-self.L/4+self.dx/2, self.L/4+self.dx/2, self.dx)
x, y, z = np.meshgrid(_x, _y, _z)
xf = x.ravel()
yf = y.ravel()
zf = z.ravel()
# Determine the size of dummy region
ghost_extend = 3*self.dx
# Create the wall particles at the top
_y = np.linspace(self.L/2+self.dx/2,
self.L/2-self.dx/2+ghost_extend, 3)
x, y, z = np.meshgrid(_x, _y, _z)
xt = x.ravel()
yt = y.ravel()
zt = z.ravel()
# Create the wall particles at the bottom
_y = np.linspace(-self.L/2+self.dx/2-ghost_extend,
-self.L/2-self.dx/2, 3)
x, y, z = np.meshgrid(_x, _y, _z)
xb = x.ravel()
yb = y.ravel()
zb = z.ravel()
# Concatenate the top and bottom arrays
xw = np.concatenate((xt, xb))
yw = np.concatenate((yt, yb))
zw = np.concatenate((zt, zb))
# Create particle array for fluid
m = self.rho * self.dx**3
h = self.hdx * self.dx
rad_s = self.dx/2
V = self.dx**3
cs = 0.0
fluid = get_particle_array_wcsph(x=xf, y=yf, z=zf, h=h, m=m,
rho=self.rho, name="fluid")
# Create particle array for walls
walls = get_particle_array_wcsph(x=xw, y=yw, z=zw, h=h, m=m,
rho=self.rho, rad_s=rad_s, V=V,
name="walls")
for name in ['fx', 'fy', 'fz']:
walls.add_property(name)
# Create particle array for ellipsoid
cond = (((xf/(self.L/12))**2 +
(yf/(self.L/4))**2 +
(zf/(self.L/12))**2) <= 1.0)
xe, ye, ze = xf[cond], yf[cond], zf[cond]
ellipsoid = get_particle_array_rigid_body(x=xe, y=ye, z=ze, h=h, m=m,
rho=self.rho, rad_s=rad_s,
V=V, cs=cs, body_id=0,
name="ellipsoid")
ellipsoid.total_mass[0] = np.sum(m)
ellipsoid.add_property('cs')
ellipsoid.add_property('arho')
ellipsoid.set_lb_props(list(ellipsoid.properties.keys()))
ellipsoid.set_output_arrays(
['x', 'y', 'z', 'u', 'v', 'w', 'fx', 'fy', 'fz',
'rho', 'm', 'h', 'p', 'tag', 'pid', 'gid'])
fluid.remove_particles([i for i, c in enumerate(cond) if c])
fluid.u[:] = fluid.y[:]
ellipsoid.u[:] = ellipsoid.y[:]
walls.u[:] = walls.y[:]
print(
fluid.get_number_of_particles(),
walls.get_number_of_particles(),
ellipsoid.get_number_of_particles(), )
return [fluid, walls, ellipsoid]
def create_domain(self):
"""Create the domain as periodic domain in x and z."""
return DomainManager(xmin=-self.L/2, xmax=self.L/2, zmin=-self.L/4,
zmax=self.L/4, periodic_in_x=True,
periodic_in_z=True)
def create_solver(self):
"""Create Solver with min. time step from CFL and viscous step."""
kernel = CubicSpline(dim=3)
integrator = EPECIntegrator(fluid=WCSPHStep(), walls=WCSPHStep(),
ellipsoid=RK2StepRigidBody())
h = self.hdx*self.dx
dt_cfl = 0.4 * h/(1.1*self.co)
dt_viscous = 0.125*h**2/self.nu
dt = min(dt_viscous, dt_cfl)
print("dt_cfl: %s" % dt_cfl)
print("dt_viscous: %s" % dt_viscous)
print("DT: %s" % dt)
tf = 12
solver = Solver(
kernel=kernel,
dim=3,
integrator=integrator,
dt=dt,
tf=tf,
adaptive_timestep=False, )
return solver
def create_equations(self):
"""Set up equations.
Body force is necessary to reset fx,fy,fz, although
not body force is applied.
"""
equations = [
Group(equations=[
BodyForce(dest='ellipsoid', sources=None),
NumberDensity(dest='ellipsoid', sources=['ellipsoid']),
NumberDensity(dest='walls', sources=['walls'])
]),
# Tait equation of state
Group(equations=[
TaitEOS(
dest='fluid', sources=None, rho0=self.rho, c0=self.co,
gamma=7.0),
TaitEOSHGCorrection(
dest='ellipsoid', sources=None, rho0=self.rho, c0=self.co,
gamma=7.0),
TaitEOSHGCorrection(
dest='walls', sources=None, rho0=self.rho, c0=self.co,
gamma=7.0),
], real=False),
Group(equations=[
ContinuityEquation(dest='fluid',
sources=['fluid', 'walls', 'ellipsoid']),
ContinuityEquation(dest='ellipsoid', sources=['fluid']),
ContinuityEquation(dest='walls', sources=['fluid']),
LaminarViscosity(dest='fluid', sources=['fluid', 'walls'],
nu=self.nu),
MomentumEquation(dest='fluid', sources=['fluid', 'walls'],
alpha=self.alpha, beta=0.0, c0=self.co),
ViscosityRigidBody(dest='fluid', sources=['ellipsoid'],
nu=self.nu, rho0=self.rho),
PressureRigidBody(dest='fluid', sources=['ellipsoid'],
rho0=self.rho),
XSPHCorrection(dest='fluid', sources=['fluid']),
]),
Group(equations=[RigidBodyMoments(dest='ellipsoid',
sources=None)]),
Group(equations=[RigidBodyMotion(dest='ellipsoid',
sources=None)]),
]
return equations
def post_process(self, info_fname):
"""Plot ellispoid angle and compare it to Jeffery's ODE."""
try:
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib import rc
rc('font', **{'family': 'serif',
'serif': ['Computer Modern'],
'size': 18})
rc('text', usetex=True)
except ImportError:
print("Post processing requires matplotlib.")
return
t = []
phi = []
output_files = remove_irrelevant_files(self.output_files)
# Going through output files
for i, fname in enumerate(output_files):
data = load(fname)
# Extract time
t.append(data['solver_data']['t'])
# extract relative positions of ellipsoid particles
ellipsoid = data['arrays']['ellipsoid']
x = ellipsoid.x-np.mean(ellipsoid.x)
y = ellipsoid.y-np.mean(ellipsoid.y)
# compute orienation as covariance matrix
coords = np.vstack([x, y])
cov = np.cov(coords)
evals, evecs = np.linalg.eig(cov)
sort_indices = np.argsort(evals)[::-1]
dx, dy = evecs[:, sort_indices[0]]
if abs(dx) < 1E-15:
phi.append(0.0)
else:
phi.append(np.pi/2.0-np.arctan(dy/dx))
# reference solution
t = np.array(t)
phi0 = 0.0
angle_jeffery = odeint(jeffery_ode, phi0, t, atol=1E-15,
args=(3.0, 1.0))
# open new plot
plt.figure()
# plot computed angle and Jeffery's solution
plt.plot(t, phi, '-k')
plt.plot(t, angle_jeffery, '--k')
# labels
plt.xlabel('Time $t$ in s')
plt.ylabel('Rotation angle $\phi$')
plt.legend(['SPH Simulation', 'Jeffery'])
plt.grid()
x1, x2, y1, y2 = plt.axis()
plt.axis((0, x2, 0, y2))
ax = plt.gca()
ax.set_yticks([0, 0.5*np.pi, np.pi, 1.5*np.pi])
ax.set_yticklabels(['0', '$\pi/2$', '$\pi$', '$3/2\pi$'])
plt.tight_layout()
plt.savefig("test.pdf", bbox_inches='tight')
if __name__ == '__main__':
app = RigidFluidCoupling()
app.run()
app.post_process(app.info_filename)
| [
37811,
30639,
541,
568,
312,
9951,
287,
673,
283,
5202,
357,
17,
2250,
737,
198,
198,
2025,
1672,
284,
19418,
513,
67,
279,
893,
746,
20831,
62,
2618,
9355,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
... | 1.794752 | 5,793 |
#tuning Parameter
import numpy as np
import glob
import os
import pandas as pd
import scipy
import matplotlib.pyplot as plt
from sklearn import datasets, linear_model
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
from scipy.stats import poisson
import math
from itertools import permutations
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image
import xml.etree.ElementTree as ET
path='/fs/project/PAS1263/src/models/research/object_detection/chairtable/Bndbox/train/'
stat=np.load('../prior/stat.npy')
cn=np.load('../prior/onlychair_num.npy')
tn=np.load('../prior/onlytable_num.npy')
get_indexes = lambda x, xs: [i for (y, i) in zip(xs, range(len(xs))) if x == y]
basicerror1,basicerror2=1000000000,100000000
for a in range(1,9):
for b in range(1,11):
for c in range(3,8):
error1,error2=0,0
for bndbox_path in glob.glob(path+'*.txt.npz'):
path,temp=os.path.split(bndbox_path)
file_name,rest1,rest2,rest3=temp.split(".")
chairindicator=np.zeros(300);tableindicator=np.zeros(300);
Data=np.load(bndbox_path)
position=Data['arr_0']
position=position[0]
prob=Data['arr_1']
prob=prob[0]
category=Data['arr_2']
category=category[0]
chairloc=get_indexes(1,category);tableloc=get_indexes(2,category);
chairindicator=np.zeros(len(chairloc))
tableindicator=np.zeros(len(tableloc))
chairprob=[prob[i] for i in chairloc]
tableprob=[prob[i] for i in tableloc]
counttable=0
countchair=0
temptable=[];tempchair=[];
for j in range(len(chairloc)):
if countchair==0:
chairindicator[j]=1
countchair=countchair+1
elif countchair<a and prob[chairloc[j]]>=(b*0.05):
truth=1
for n in tempchair:
if bb_intersection_over_union(n,position[chairloc[j],])>(c*0.1):
truth=0
if truth==1:
countchair=countchair+1
chairindicator[j]=1
tempchair.append(position[chairloc[j],])
else:
break
for k in range(len(tableloc)):
if counttable==0:
tableindicator[k]=1
counttable=counttable+1
temptable.append(position[tableloc[k],])
elif counttable<a and prob[tableloc[k]]>=(b*0.05):
truth=1
for n in temptable:
if bb_intersection_over_union(n,position[tableloc[k],])>(c*0.1):
truth=0
if truth==1:
counttable=counttable+1
tableindicator[k]=1
temptable.append(position[tableloc[k],])
else:
break
error1=error1+(countchair-cn)**2
error2=error2+(counttable-tn)**2
error1=error1*1.0/9209
error2=error2*1.0/9209
if error1<basicerror1:
basicerror1=error1
a1,b1,c1=a,(b*0.05),(c*0.1)
if error2<basicerror2:
basicerror2=error2
a2,b2,c2=a,(b*0.05),(c*0.1)
parameter=[a1,b1,c1,a2,b2,c2]
np.save('../prior/parameter',parameter)
| [
2,
28286,
278,
25139,
2357,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
15095,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
629,
541,
88,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6... | 1.689625 | 2,265 |
import json
import os
| [
11748,
33918,
198,
11748,
28686,
198
] | 3.666667 | 6 |
# -*- coding: UTF-8 -*-
# Code generated by lark suite oapi sdk gen
from typing import *
from ....api import Request as APIRequest, Response as APIResponse, set_timeout, set_tenant_key, set_user_access_token, set_path_params, \
set_query_params, set_response_stream, set_is_response_stream, FormData, FormDataFile
from ....config import Config
from ....consts import ACCESS_TOKEN_TYPE_TENANT, ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_APP
from .model import *
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
2,
6127,
7560,
416,
300,
668,
18389,
267,
15042,
264,
34388,
2429,
198,
198,
6738,
19720,
1330,
1635,
198,
198,
6738,
19424,
15042,
1330,
19390,
355,
7824,
18453,
11,
18261,
... | 2.837079 | 178 |
# import the necessary packages
from imutils import paths
import argparse
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--images", required=True,
help="path to input directory of images")
ap.add_argument("-t", "--threshold", type=float, default=100.0,
help="focus measures that fall below this value will be considered 'blurry'")
args = vars(ap.parse_args())
# loop over the input images
for imagePath in paths.list_images(args["images"]):
# load the image, convert it to grayscale, and compute the
# focus measure of the image using the Variance of Laplacian
# method
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
fm = variance_of_laplacian(gray)
text = "Not Blurry"
# if the focus measure is less than the supplied threshold,
# then the image should be considered "blurry"
if fm < args["threshold"]:
text = "Blurry"
# show the image
cv2.putText(image, "{}: {:.2f}".format(text, fm), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 3)
cv2.imshow("Image", image)
key = cv2.waitKey(0) | [
2,
1330,
262,
3306,
10392,
198,
6738,
545,
26791,
1330,
13532,
198,
11748,
1822,
29572,
198,
11748,
269,
85,
17,
628,
198,
198,
2,
5678,
262,
4578,
21136,
290,
21136,
262,
7159,
198,
499,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419... | 2.847328 | 393 |
import tfchain.tests.encoding.rivbin as testrivbin
import tfchain.tests.encoding.siabin as testsiabin
| [
11748,
48700,
7983,
13,
41989,
13,
12685,
7656,
13,
15104,
8800,
355,
1332,
15104,
8800,
198,
11748,
48700,
7983,
13,
41989,
13,
12685,
7656,
13,
13396,
6014,
355,
5254,
72,
6014,
198
] | 3.1875 | 32 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
198
] | 3.166667 | 6 |
""""writer module handle writing the images to disk"""
import webdataset as wds
import json
import pandas as pd
import pyarrow.parquet as pq
import pyarrow as pa
import fsspec
class BufferedParquetWriter:
"""Write samples to parquet files incrementally with a buffer"""
def flush(self, force=False):
"""Write the buffer to disk"""
if len(self.buffer) == 0:
return
if self.schema is None:
df = pa.Table.from_pandas(pd.DataFrame(self.buffer))
# if a column is None, keep accumulating in the hope to get at least one non None value
if not force and len([True for t in df.schema if t.type == pa.null()]) > 0:
return
self.schema = df.schema
else:
df = pa.Table.from_pandas(pd.DataFrame(self.buffer), self.schema)
if self.parquet_writer is None:
self.parquet_writer = pq.ParquetWriter(self.output_fd, df.schema)
self.parquet_writer.write_table(df)
self.buffer = []
class ParquetSampleWriter:
"""ParquetSampleWriter is a image+caption writer to parquet"""
def write(self, img_str, key, caption, meta):
"""Keep sample in memory then write to disk when close() is called"""
if img_str is not None:
sample = {"key": key, "jpg": img_str}
if self.save_caption:
sample["txt"] = str(caption) if caption is not None else ""
else:
sample = {"key": key, "jpg": None}
if self.save_caption:
sample["txt"] = None
sample.update(meta)
self.buffered_parquet_writer.write(sample)
class WebDatasetSampleWriter:
"""WebDatasetSampleWriter is a image+caption writer to webdataset"""
class FilesSampleWriter:
"""FilesSampleWriter is a caption+image writer to files"""
def write(self, img_str, key, caption, meta):
"""Write sample to disk"""
if img_str is not None:
filename = f"{self.subfolder}/{key}.jpg"
with self.fs.open(filename, "wb") as f:
f.write(img_str)
if self.save_caption:
caption = str(caption) if caption is not None else ""
caption_filename = f"{self.subfolder}/{key}.txt"
with self.fs.open(caption_filename, "w") as f:
f.write(str(caption))
j = json.dumps(meta, indent=4)
meta_filename = f"{self.subfolder}/{key}.json"
with self.fs.open(meta_filename, "w") as f:
f.write(j)
self.buffered_parquet_writer.write(meta)
| [
15931,
15931,
16002,
8265,
5412,
3597,
262,
4263,
284,
11898,
37811,
198,
198,
11748,
3992,
19608,
292,
316,
355,
266,
9310,
198,
11748,
33918,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
12972,
6018,
13,
1845,
21108,
355,
279,
80... | 2.233134 | 1,171 |
import unittest
import mock
from mopidy_gmusic import (
GMusicExtension, backend as backend_lib, scrobbler_frontend)
| [
11748,
555,
715,
395,
198,
198,
11748,
15290,
198,
198,
6738,
285,
404,
19325,
62,
70,
28965,
1330,
357,
198,
220,
220,
220,
6951,
385,
291,
11627,
3004,
11,
30203,
355,
30203,
62,
8019,
11,
629,
305,
11848,
1754,
62,
8534,
437,
8,
... | 2.818182 | 44 |
from PyQt6 import QtCore, QtWidgets
import sys
# ф-ция с основными настройками окна
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
InfoWindow = QtWidgets.QMainWindow()
ui = UiInfoWindow()
ui.setup_ui(InfoWindow)
InfoWindow.show()
sys.exit(app.exec())
| [
6738,
9485,
48,
83,
21,
1330,
33734,
14055,
11,
33734,
54,
312,
11407,
201,
198,
11748,
25064,
201,
198,
201,
198,
201,
198,
220,
220,
220,
1303,
220,
141,
226,
12,
141,
228,
18849,
40623,
220,
21727,
12466,
122,
21727,
22177,
25443,
... | 1.83908 | 174 |
# -*- coding: utf-8 -*-
# spearmint
#
# Academic and Non-Commercial Research Use Software License and Terms
# of Use
#
# Spearmint is a software package to perform Bayesian optimization
# according to specific algorithms (the “Software”). The Software is
# designed to automatically run experiments (thus the code name
# 'spearmint') in a manner that iteratively adjusts a number of
# parameters so as to minimize some objective in as few runs as
# possible.
#
# The Software was developed by Ryan P. Adams, Michael Gelbart, and
# Jasper Snoek and at Harvard University, Kevin Swersky and Richard
# Zemel at the University of Toronto (“Toronto”), and Hugo Larochelle
# at the Université de Sherbrooke (“Sherbrooke”), which assigned its
# rights in the Software to Socpra Sciences et Génie
# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement
# between the parties, it is distributed for free academic and
# non-commercial research use by the President and Fellows of Harvard
# College (“Harvard”).
#
# Using the Software indicates your agreement to be bound by the terms
# of this Software Use Agreement (“Agreement”). Absent your agreement
# to the terms below, you (the “End User”) have no rights to hold or
# use the Software whatsoever.
#
# Harvard agrees to grant hereunder the limited non-exclusive license
# to End User for the use of the Software in the performance of End
# User’s internal, non-commercial research and academic use at End
# User’s academic or not-for-profit research institution
# (“Institution”) on the following terms and conditions:
#
# 1. NO REDISTRIBUTION. The Software remains the property Harvard,
# Toronto and Socpra, and except as set forth in Section 4, End User
# shall not publish, distribute, or otherwise transfer or make
# available the Software to any other party.
#
# 2. NO COMMERCIAL USE. End User shall not use the Software for
# commercial purposes and any such use of the Software is expressly
# prohibited. This includes, but is not limited to, use of the
# Software in fee-for-service arrangements, core facilities or
# laboratories or to provide research services to (or in collaboration
# with) third parties for a fee, and in industry-sponsored
# collaborative research projects where any commercial rights are
# granted to the sponsor. If End User wishes to use the Software for
# commercial purposes or for any other restricted purpose, End User
# must execute a separate license agreement with Harvard.
#
# Requests for use of the Software for commercial purposes, please
# contact:
#
# Office of Technology Development
# Harvard University
# Smith Campus Center, Suite 727E
# 1350 Massachusetts Avenue
# Cambridge, MA 02138 USA
# Telephone: (617) 495-3067
# Facsimile: (617) 495-9568
# E-mail: otd@harvard.edu
#
# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own
# all intellectual property in the Software. End User shall gain no
# ownership to the Software. End User shall not remove or delete and
# shall retain in the Software, in any modifications to Software and
# in any Derivative Works, the copyright, trademark, or other notices
# pertaining to Software as provided with the Software.
#
# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,
# as such term is defined under U.S. copyright laws, provided that any
# such Derivative Works shall be restricted to non-commercial,
# internal research and academic use at End User’s Institution. End
# User may distribute Derivative Works to other Institutions solely
# for the performance of non-commercial, internal research and
# academic use on terms substantially similar to this License and
# Terms of Use.
#
# 5. FEEDBACK. In order to improve the Software, comments from End
# Users may be useful. End User agrees to provide Harvard with
# feedback on the End User’s use of the Software (e.g., any bugs in
# the Software, the user experience, etc.). Harvard is permitted to
# use such information provided by End User in making changes and
# improvements to the Software without compensation or an accounting
# to End User.
#
# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or
# Sherbrooke or Socpra may develop modifications to the Software that
# may be based on the feedback provided by End User under Section 5
# above. Harvard, Toronto and Sherbrooke/Socpra shall not be
# restricted in any way by End User regarding their use of such
# information. End User acknowledges the right of Harvard, Toronto
# and Sherbrooke/Socpra to prepare, publish, display, reproduce,
# transmit and or use modifications to the Software that may be
# substantially similar or functionally equivalent to End User’s
# modifications and/or improvements if any. In the event that End
# User obtains patent protection for any modification or improvement
# to Software, End User agrees not to allege or enjoin infringement of
# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,
# or any of the researchers, medical or research staff, officers,
# directors and employees of those institutions.
#
# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,
# present, or share results from the use of the Software. In
# accordance with customary academic practice, End User will
# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers
# of the Software and may cite the relevant reference(s) from the
# following list of publications:
#
# Practical Bayesian Optimization of Machine Learning Algorithms
# Jasper Snoek, Hugo Larochelle and Ryan Prescott Adams
# Neural Information Processing Systems, 2012
#
# Multi-Task Bayesian Optimization
# Kevin Swersky, Jasper Snoek and Ryan Prescott Adams
# Advances in Neural Information Processing Systems, 2013
#
# Input Warping for Bayesian Optimization of Non-stationary Functions
# Jasper Snoek, Kevin Swersky, Richard Zemel and Ryan Prescott Adams
# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013
#
# Bayesian Optimization and Semiparametric Models with Applications to
# Assistive Technology Jasper Snoek, PhD Thesis, University of
# Toronto, 2013
#
# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS." TO THE FULLEST
# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA
# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR
# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND
# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,
# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE
# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT
# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.
#
# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT
# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,
# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL
# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR
# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,
# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER
# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH
# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS
# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,
# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES.
#
# 10. INDEMNIFICATION. To the extent permitted by law, End User shall
# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke
# and Socpra, their corporate affiliates, current or future directors,
# trustees, officers, faculty, medical and professional staff,
# employees, students and agents and their respective successors,
# heirs and assigns (the "Indemnitees"), against any liability,
# damage, loss or expense (including reasonable attorney's fees and
# expenses of litigation) incurred by or imposed upon the Indemnitees
# or any one of them in connection with any claims, suits, actions,
# demands or judgments arising from End User’s breach of this
# Agreement or its Institution’s use of the Software except to the
# extent caused by the gross negligence or willful misconduct of
# Harvard, Toronto or Sherbrooke or Socpra. This indemnification
# provision shall survive expiration or termination of this Agreement.
#
# 11. GOVERNING LAW. This Agreement shall be construed and governed by
# the laws of the Commonwealth of Massachusetts regardless of
# otherwise applicable choice of law standards.
#
# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall
# be construed as granting End Users or their Institutions any rights
# or licenses to use any trademarks, service marks or logos associated
# with the Software. You may not use the terms “Harvard” or
# “University of Toronto” or “Université de Sherbrooke” or “Socpra
# Sciences et Génie S.E.C.” (or a substantially similar term) in any
# way that is inconsistent with the permitted uses described
# herein. You agree not to use any name or emblem of Harvard, Toronto
# or Sherbrooke, or any of their subdivisions for any purpose, or to
# falsely suggest any relationship between End User (or its
# Institution) and Harvard, Toronto and/or Sherbrooke, or in any
# manner that would infringe or violate any of their rights.
#
# 13. End User represents and warrants that it has the legal authority
# to enter into this License and Terms of Use on behalf of itself and
# its Institution.
import os
import sys
import importlib
import imp
import optparse
import numpy as np
import numpy.random as npr
import numpy.linalg as npla
import matplotlib as mpl
mpl.use('Agg')
mpl.rcParams['ps.useafm'] = True
mpl.rcParams['pdf.use14corefonts'] = True
mpl.rcParams['axes.unicode_minus'] = False
import matplotlib.pyplot as plt
from spearmint.visualizations import plots_2d
from spearmint.utils.parsing import parse_config_file
from spearmint.utils.parsing import parse_tasks_from_jobs
from spearmint.utils.parsing import repeat_experiment_name
from spearmint.utils.parsing import get_objectives_and_constraints
from spearmint.utils.parsing import DEFAULT_TASK_NAME
from spearmint.utils.database.mongodb import MongoDB
from spearmint.tasks.input_space import InputSpace
from spearmint.tasks.input_space import paramify_no_types
from spearmint.main import load_jobs
# Returns the violation value if the constraints are violated, else the objective
# computes the l2 norm between x and y assuming x and y are stores as dicts of params
# and also the first one (x) has this extra 'values' nonsense
# compute error bars with the bootstrap
# if log=True, we compute the standard deviation of the log values
# Usage:
# python progress_curve.py dir1 [dir2] ... [dirN] [repeat]
if __name__ == '__main__':
option_names = ['n_repeat', 'average', 'rec_type', 'log_scale',\
'n_iter_spec', 'violation_value', 'make_dist_plot', 'mainfile',\
'stretch_x', 'task_comp_x', 'labels', "y_axis_label", 'x_axis_label', 'retro']
parser = optparse.OptionParser(usage="usage: %prog [options] dir1 dir2")
parser.add_option("--repeat", dest="n_repeat",
help="Number of repeated experiments.",
type="int", default=-1)
parser.add_option("--rec-type", dest="rec_type",
help="model, observations, or mixed?",
default="model")
parser.add_option("--median", action="store_true",
help="Use the median instead of the mean.",
dest="average")
parser.add_option("--logscale", action="store_true", dest="log_scale",
help="Whether to plot the y axis on a log scale.")
parser.add_option("--retro", action="store_true", dest="retro",
help="Load recommandations-retro instead of recommendations.")
parser.add_option("--iter", dest="n_iter_spec",
help="Uesd to specify a certain number of iterations to plot.",
type="int", default=None)
parser.add_option("--violation-value", dest="violation_value",
help="The penalty value for violating the constraints.",
type="float", default=1.0)
parser.add_option("--make_dist_plot", action="store_true",
help="Whether to also make a plot of the L2 distance from the true solution.")
parser.add_option("--mainfile", dest="mainfile",
help="Explicity store the location of the main file.",
type="string", default=None)
parser.add_option("--stretch", action="store_true", dest="stretch_x",
help="Only use this if you really know what you are doing.")
parser.add_option("--task_x", dest="task_comp_x",
help="A particular task whose num complete will make up the x-axis of the plot.",
type="string", default=None)
parser.add_option("--labels", dest="labels",
help="For non-default legend labels on the curves. If >1, separate with SEMICOLON.",
type="string", default=None)
parser.add_option("--y-axis-label", dest="y_axis_label",
help="For non-default y-axis label.",
type="string", default=None)
parser.add_option("--x-axis-label", dest="x_axis_label",
help="For non-default x-axis label.",
type="string", default=None)
""" when you add a new options, make sure to add it to the list above"""
# Stretches the x-axis of one plot to match the other- use to compare coupled and
# decoupled algs
(args, dirs) = parser.parse_args()
# parse this weird args thing into a dict
options = dict()
for option_name in option_names:
if hasattr(args, option_name):
options[option_name] = getattr(args, option_name)
else:
options[option_name] = False
if options["average"]:
options["average"] = "median"
else:
options["average"] = "mean"
main(dirs, **options)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
693,
1670,
600,
198,
2,
198,
2,
31421,
290,
8504,
12,
48401,
4992,
5765,
10442,
13789,
290,
17637,
198,
2,
286,
5765,
198,
2,
198,
2,
2531,
1670,
600,
318,
257,
... | 3.162663 | 4,537 |
import os
import networkx as nx
import pandas as pd
import numpy as np
from stellargraph.mapper import (
CorruptedGenerator,
FullBatchNodeGenerator,
)
from stellargraph import StellarGraph
from stellargraph.layer import GCN, DeepGraphInfomax
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
import tensorflow as tf
from tensorflow.keras import Model
# number of dimensions to embed into
# this is only going to be two for this experiment
dims = int(os.getenv("DIMENSIONS"))
task_id = int(os.getenv("SGE_TASK_ID")) - 1
number_of_sub_jobs = 128
# Defines the grid that will be loaded and also the indices that will be embedded
job_params = np.array(np.meshgrid(["IEEE_14", "IEEE_30", "IEEE_57", "IEEE_118", "IEEE_300", "UK_high_voltage"],
np.arange(1,number_of_sub_jobs + 1))).T.reshape(-1, 2)
grid_name = job_params[task_id-1, 0]
grid_sub_task_id = int(job_params[task_id-1, 1])
job_indices = list(np.arange(0,3457, 3456/number_of_sub_jobs, dtype=int))
# The index of the files that will be used in this sub job
start_index = job_indices[0:number_of_sub_jobs][grid_sub_task_id-1]
end_index = job_indices[1:(number_of_sub_jobs+1)][grid_sub_task_id-1]
# Get the working directory and store it so I can save the embedding there
project_folder = os.getcwd()
# The folder the data is stored in
grid_folder = "/home/ucabbou/power_grid_graphs/graphml" + "/" + grid_name + "_graphml"
# save path of the embedded data
save_path = project_folder + "/" + grid_name + "_" + str(grid_sub_task_id) + ".csv"
files_to_embed = os.listdir(grid_folder)[start_index:end_index]
list_of_dataframes = []
for grid_instance in files_to_embed:
G_graphml = nx.read_graphml(grid_folder + "/" + grid_instance)
# get the node features as a dataframe, these will then be added to the stellar graph.
# This seems to work better than trying to put them in directly
nodefeatures = pd.DataFrame.from_dict(dict(G_graphml.nodes(data=True)), orient='index')
# Convert the networkx graph to a Stellargraph
G = StellarGraph.from_networkx(G_graphml, node_features=nodefeatures)
# The features aren't used by node2vec but it makes changing to DGI easier
fullbatch_generator = FullBatchNodeGenerator(G, sparse=False, weighted=True)
gcn_model = GCN(layer_sizes=[dims], activations=["relu"], generator=fullbatch_generator)
corrupted_generator = CorruptedGenerator(fullbatch_generator)
gen = corrupted_generator.flow(G.nodes())
infomax = DeepGraphInfomax(gcn_model, corrupted_generator)
x_in, x_out = infomax.in_out_tensors()
model = Model(inputs=x_in, outputs=x_out)
model.compile(loss=tf.nn.sigmoid_cross_entropy_with_logits, optimizer=Adam(lr=1e-3))
epochs = 100
es = EarlyStopping(monitor="loss", min_delta=0, patience=20)
history = model.fit(gen, epochs=epochs, verbose=0, callbacks=[es])
# plot_history(history)
x_emb_in, x_emb_out = gcn_model.in_out_tensors()
# for full batch models, squeeze out the batch dim (which is 1)
x_out = tf.squeeze(x_emb_out, axis=0)
emb_model = Model(inputs=x_emb_in, outputs=x_out)
all_embeddings = emb_model.predict(fullbatch_generator.flow(G.nodes()))
node_embeddings_df = pd.DataFrame(all_embeddings)
# uses a list comprehension inside a list comprehension to append "n" to all values
# betweem 0 and the number of nodes in the network
node_names = ["n" + i for i in [str(i) for i in range(len(G.nodes()))]]
#matches the node in the edge list to the rows in the node embedding
from_index = [node_names.index(i) for i in G.edge_arrays()[0]]
to_index = [node_names.index(i) for i in G.edge_arrays()[1]]
# subtract one edge df from the other. I can only make it work
# converting to arrays though
df_edge_diff = node_embeddings_df.loc[from_index,].to_numpy() - \
node_embeddings_df.loc[to_index,].to_numpy()
# get the euclidean length of each edge
df_edge_diff = np.square(df_edge_diff)
df_edge_diff = np.sum(df_edge_diff, axis = 1)
df_edge_diff = np.sqrt(df_edge_diff)
mean_df = pd.DataFrame(node_embeddings_df.abs().mean())
mean_df['vars'] = pd.Series(range(0, dims))
mean_df['id'] = 1
mean_df = mean_df.pivot(index='id', columns='vars', values=0)
abs_mean_df = pd.DataFrame(node_embeddings_df.abs().mean())
abs_mean_df['vars'] = pd.Series(range(0, dims))
abs_mean_df['id'] = 1
abs_mean_df = abs_mean_df.pivot(index='id', columns='vars', values=0)
# concatencate the dataframes together by column
network_embeds = pd.concat([mean_df, abs_mean_df], axis=1)
# add the mean edge length from the embeddings together
network_embeds["edge_mean_length"] = np.mean(df_edge_diff)
# add this iteration to the dataframe of embeddings
list_of_dataframes.append(network_embeds)
# exit the inner loop
# make a single dataframe from the list
out = pd.concat(list_of_dataframes)
# Add the file names as column to allow easy identification
out['file'] = files_to_embed
# Save it all as a CSV to be loaded back into R
out.to_csv(save_path)
| [
11748,
28686,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
336,
695,
853,
1470,
13,
76,
11463,
1330,
357,
198,
220,
220,
220,
2744,
31590,
8645,
1352,
11,
1... | 2.605648 | 1,983 |
# Ignore pictures where the tagged concept has a probability below the threshold
THRESHOLD = 0.9
# Number of pictures to index
MAX_MEDIA = 1000
# Clarifai API details - http://www.clarifai.com/api
CLARIFAI_APP_ID = 'xxx'
CLARIFAI_APP_SECRET = 'xxx'
# Algolia API details = http://www.clarifai.com/api
ALGOLIA_APP_ID = 'xxx'
ALGOLIA_APP_KEY = 'xxx'
ALGOLIA_INDEX_NAME = 'Pictures'
# Flickr API details - https://www.flickr.com/services/api/
FLICKR_API_KEY = 'xxx'
FLICKR_GROUP_ID = 'xxx' | [
2,
41032,
5986,
810,
262,
30509,
3721,
468,
257,
12867,
2174,
262,
11387,
198,
4221,
19535,
39,
15173,
796,
657,
13,
24,
198,
2,
7913,
286,
5986,
284,
6376,
198,
22921,
62,
30733,
3539,
796,
8576,
198,
198,
2,
15420,
361,
1872,
7824... | 2.573684 | 190 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
628,
198
] | 3 | 7 |
# DESAFIO 067
# Faça um programa que mostre a tabuada de vários números, um de cada vez, para cada valor digitado
# pelo usuário. O programa será interrompido quando o número solicitado for negativo.
while True:
num = int(input('Digite um número para ver sua tabuada: '))
print('-' * 30)
if num < 0:
break
for c in range(1, 11):
print(f'{num} x {c:2} = {num * c}')
print('-' * 30)
print('PROGRAMA TABUADA ENCERRADO. Volte Sempre!')
| [
2,
22196,
8579,
9399,
657,
3134,
198,
2,
18350,
50041,
23781,
1430,
64,
8358,
749,
260,
257,
7400,
84,
4763,
390,
410,
6557,
380,
418,
299,
21356,
647,
418,
11,
23781,
390,
269,
4763,
1569,
89,
11,
31215,
269,
4763,
1188,
273,
16839... | 2.212264 | 212 |
from __future__ import division #brings in Python 3.0 mixed type calculations
import numpy as np
import os
import pandas as pd
import sys
#find parent directory and import model
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
class BeerexInputs(ModelSharedInputs):
"""
Input class for Beerex
"""
def __init__(self):
"""Class representing the inputs for Beerex"""
super(BeerexInputs, self).__init__()
#self.incorporation_depth = pd.Series([], dtype="float")
self.application_rate = pd.Series([], dtype="float")
self.application_method = pd.Series([], dtype="object")
self.crop_type = pd.Series([], dtype="object")
# self.application_units = pd.Series([], dtype="object")
self.empirical_residue = pd.Series([], dtype="object")
self.empirical_pollen = pd.Series([], dtype="float")
self.empirical_nectar = pd.Series([], dtype="float")
self.empirical_jelly = pd.Series([], dtype="float")
self.adult_contact_ld50 = pd.Series([], dtype="float")
self.adult_oral_ld50 = pd.Series([], dtype="float")
self.adult_oral_noael = pd.Series([], dtype="float")
self.larval_ld50 = pd.Series([], dtype="float")
self.larval_noael = pd.Series([], dtype="float")
self.log_kow = pd.Series([], dtype="float")
self.koc = pd.Series([], dtype="float")
self.mass_tree_vegetation = pd.Series([], dtype="float")
self.lw1_jelly = pd.Series([], dtype="float")
self.lw2_jelly = pd.Series([], dtype="float")
self.lw3_jelly = pd.Series([], dtype="float")
self.lw4_nectar = pd.Series([], dtype="float")
self.lw4_pollen = pd.Series([], dtype="float")
self.lw5_nectar = pd.Series([], dtype="float")
self.lw5_pollen = pd.Series([], dtype="float")
self.ld6_nectar = pd.Series([], dtype="float")
self.ld6_pollen = pd.Series([], dtype="float")
self.lq1_jelly = pd.Series([], dtype="float")
self.lq2_jelly = pd.Series([], dtype="float")
self.lq3_jelly = pd.Series([], dtype="float")
self.lq4_jelly = pd.Series([], dtype="float")
self.aw_cell_nectar = pd.Series([], dtype="float")
self.aw_cell_pollen = pd.Series([], dtype="float")
self.aw_brood_nectar = pd.Series([], dtype="float")
self.aw_brood_pollen = pd.Series([], dtype="float")
self.aw_comb_nectar = pd.Series([], dtype="float")
self.aw_comb_pollen = pd.Series([], dtype="float")
self.aw_fpollen_nectar = pd.Series([], dtype="float")
self.aw_fpollen_pollen = pd.Series([], dtype="float")
self.aw_fnectar_nectar = pd.Series([], dtype="float")
self.aw_fnectar_pollen = pd.Series([], dtype="float")
self.aw_winter_nectar = pd.Series([], dtype="float")
self.aw_winter_pollen = pd.Series([], dtype="float")
self.ad_nectar = pd.Series([], dtype="float")
self.ad_pollen = pd.Series([], dtype="float")
self.aq_jelly = pd.Series([], dtype="float")
class BeerexOutputs(object):
"""
Output class for Beerex
"""
def __init__(self):
"""Class representing the outputs for Beerex"""
super(BeerexOutputs, self).__init__()
self.out_eec_spray = pd.Series(name="out_eec_spray", dtype="float")
self.out_eec_soil = pd.Series(name="out_eec_soil", dtype="float")
self.out_eec_seed = pd.Series(name="out_eec_seed", dtype="float")
self.out_eec_tree = pd.Series(name="out_eec_tree", dtype="float")
self.out_eec = pd.Series(name="out_eec", dtype="float")
self.out_lw1_total_dose = pd.Series(name="out_lw1_total_dose", dtype="float")
self.out_lw2_total_dose = pd.Series(name="out_lw2_total_dose", dtype="float")
self.out_lw3_total_dose = pd.Series(name="out_lw3_total_dose", dtype="float")
self.out_lw4_total_dose = pd.Series(name="out_lw4_total_dose", dtype="float")
self.out_lw5_total_dose = pd.Series(name="out_lw5_total_dose", dtype="float")
self.out_ld6_total_dose = pd.Series(name="out_ld6_total_dose", dtype="float")
self.out_lq1_total_dose = pd.Series(name="out_lq1_total_dose", dtype="float")
self.out_lq2_total_dose = pd.Series(name="out_lq2_total_dose", dtype="float")
self.out_lq3_total_dose = pd.Series(name="out_lq3_total_dose", dtype="float")
self.out_lq4_total_dose = pd.Series(name="out_lq4_total_dose", dtype="float")
self.out_aw_cell_total_dose = pd.Series(name="out_aw_cell_total_dose", dtype="float")
self.out_aw_brood_total_dose = pd.Series(name="out_aw_brood_total_dose", dtype="float")
self.out_aw_comb_total_dose = pd.Series(name="out_aw_comb_total_dose", dtype="float")
self.out_aw_pollen_total_dose = pd.Series(name="out_aw_pollen_total_dose", dtype="float")
self.out_aw_nectar_total_dose = pd.Series(name="out_aw_nectar_total_dose", dtype="float")
self.out_aw_winter_total_dose = pd.Series(name="out_aw_winter_total_dose", dtype="float")
self.out_ad_total_dose = pd.Series(name="out_ad_total_dose", dtype="float")
self.out_aq_total_dose = pd.Series(name="out_aq_total_dose", dtype="float")
self.out_lw1_acute_rq = pd.Series(name="out_lw1_acute_rq", dtype="float")
self.out_lw2_acute_rq = pd.Series(name="out_lw2_acute_rq", dtype="float")
self.out_lw3_acute_rq = pd.Series(name="out_lw3_acute_rq", dtype="float")
self.out_lw4_acute_rq = pd.Series(name="out_lw4_acute_rq", dtype="float")
self.out_lw5_acute_rq = pd.Series(name="out_lw5_acute_rq", dtype="float")
self.out_ld6_acute_rq = pd.Series(name="out_ld6_acute_rq", dtype="float")
self.out_lq1_acute_rq = pd.Series(name="out_lq1_acute_rq", dtype="float")
self.out_lq2_acute_rq = pd.Series(name="out_lq2_acute_rq", dtype="float")
self.out_lq3_acute_rq = pd.Series(name="out_lq3_acute_rq", dtype="float")
self.out_lq4_acute_rq = pd.Series(name="out_lq4_acute_rq", dtype="float")
self.out_aw_cell_acute_rq = pd.Series(name="out_aw_cell_acute_rq", dtype="float")
self.out_aw_brood_acute_rq = pd.Series(name="out_aw_brood_acute_rq", dtype="float")
self.out_aw_comb_acute_rq = pd.Series(name="out_aw_comb_acute_rq", dtype="float")
self.out_aw_pollen_acute_rq = pd.Series(name="out_aw_pollen_acute_rq", dtype="float")
self.out_aw_nectar_acute_rq = pd.Series(name="out_aw_nectar_acute_rq", dtype="float")
self.out_aw_winter_acute_rq = pd.Series(name="out_aw_winter_acute_rq", dtype="float")
self.out_ad_acute_rq = pd.Series(name="out_ad_acute_rq", dtype="float")
self.out_aq_acute_rq = pd.Series(name="out_aq_acute_rq", dtype="float")
self.out_lw1_chronic_rq = pd.Series(name="out_lw1_chronic_rq", dtype="float")
self.out_lw2_chronic_rq = pd.Series(name="out_lw2_chronic_rq", dtype="float")
self.out_lw3_chronic_rq = pd.Series(name="out_lw3_chronic_rq", dtype="float")
self.out_lw4_chronic_rq = pd.Series(name="out_lw4_chronic_rq", dtype="float")
self.out_lw5_chronic_rq = pd.Series(name="out_lw5_chronic_rq", dtype="float")
self.out_ld6_chronic_rq = pd.Series(name="out_ld6_chronic_rq", dtype="float")
self.out_lq1_chronic_rq = pd.Series(name="out_lq1_chronic_rq", dtype="float")
self.out_lq2_chronic_rq = pd.Series(name="out_lq2_chronic_rq", dtype="float")
self.out_lq3_chronic_rq = pd.Series(name="out_lq3_chronic_rq", dtype="float")
self.out_lq4_chronic_rq = pd.Series(name="out_lq4_chronic_rq", dtype="float")
self.out_aw_cell_chronic_rq = pd.Series(name="out_aw_cell_chronic_rq", dtype="float")
self.out_aw_brood_chronic_rq = pd.Series(name="out_aw_brood_chronic_rq", dtype="float")
self.out_aw_comb_chronic_rq = pd.Series(name="out_aw_comb_chronic_rq", dtype="float")
self.out_aw_pollen_chronic_rq = pd.Series(name="out_aw_pollen_chronic_rq", dtype="float")
self.out_aw_nectar_chronic_rq = pd.Series(name="out_aw_nectar_chronic_rq", dtype="float")
self.out_aw_winter_chronic_rq = pd.Series(name="out_aw_winter_chronic_rq", dtype="float")
self.out_ad_chronic_rq = pd.Series(name="out_ad_chronic_rq", dtype="float")
self.out_aq_chronic_rq = pd.Series(name="out_aq_chronic_rq", dtype="float")
# self.out_adult_acute_contact = pd.Series(name="out_adult_acute_contact", dtype="float")
# self.out_adult_acute_dietary = pd.Series(name="out_adult_acute_dietary", dtype="float")
# self.out_adult_chronic_dietary = pd.Series(name="out_adult_chronic_dietary", dtype="float")
# self.out_larvae_acute_dietary = pd.Series(name="out_larvae_acute_dietary", dtype="float")
# self.out_larvae_chronic_dietary = pd.Series(name="out_larvae_chronic_dietary", dtype="float")
class Beerex(UberModel, BeerexInputs, BeerexOutputs):
"""
Individual-based model estimates exposures of bees to pesticides
"""
def __init__(self, pd_obj, pd_obj_exp):
"""Class representing the Beerex model and containing all its methods"""
super(Beerex, self).__init__()
self.pd_obj = pd_obj
self.pd_obj_exp = pd_obj_exp
self.pd_obj_out = None
def execute_model(self):
"""
Callable to execute the running of the model:
1) Populate input parameters
2) Create output DataFrame to hold the model outputs
3) Run the model's methods to generate outputs
4) Fill the output DataFrame with the generated model outputs
"""
boolog = True
if boolog:
print('execute_model start ============================')
print('populate inputs')
self.populate_inputs(self.pd_obj)
if boolog:
print('populate outputs')
self.pd_obj_out = self.populate_outputs()
if boolog:
print('run methods')
self.run_methods()
if boolog:
print('fill output dataframe')
self.fill_output_dataframe()
def run_methods(self):
"""Execute the model's methods to generate the model output"""
self.set_global_constants()
self.eec()
self.lw1_total_dose()
self.lw2_total_dose()
self.lw3_total_dose()
self.lw4_total_dose()
self.lw5_total_dose()
self.ld6_total_dose()
self.lq1_total_dose()
self.lq2_total_dose()
self.lq3_total_dose()
self.lq4_total_dose()
self.aw_cell_total_dose()
self.aw_brood_total_dose()
self.aw_comb_total_dose()
self.aw_pollen_total_dose()
self.aw_nectar_total_dose()
self.aw_winter_total_dose()
self.ad_total_dose()
self.aq_total_dose()
self.lw1_acute_rq()
self.lw2_acute_rq()
self.lw3_acute_rq()
self.lw4_acute_rq()
self.lw5_acute_rq()
self.ld6_acute_rq()
self.lq1_acute_rq()
self.lq2_acute_rq()
self.lq3_acute_rq()
self.lq4_acute_rq()
self.aw_cell_acute_rq()
self.aw_brood_acute_rq()
self.aw_comb_acute_rq()
self.aw_pollen_acute_rq()
self.aw_nectar_acute_rq()
self.aw_winter_acute_rq()
self.ad_acute_rq()
self.aq_acute_rq()
self.lw1_chronic_rq()
self.lw2_chronic_rq()
self.lw3_chronic_rq()
self.lw4_chronic_rq()
self.lw5_chronic_rq()
self.ld6_chronic_rq()
self.lq1_chronic_rq()
self.lq2_chronic_rq()
self.lq3_chronic_rq()
self.lq4_chronic_rq()
self.aw_cell_chronic_rq()
self.aw_brood_chronic_rq()
self.aw_comb_chronic_rq()
self.aw_pollen_chronic_rq()
self.aw_nectar_chronic_rq()
self.aw_winter_chronic_rq()
self.ad_chronic_rq()
self.aq_chronic_rq()
# except TypeError:
#
def eec_spray(self, i):
"""
EEC for foliar spray
"""
self.out_eec_spray[i] = (110. * self.application_rate[i]) / 1000
self.out_eec_soil[i] = np.nan
self.out_eec_seed[i] = np.nan
self.out_eec_tree[i] = np.nan
return # self.out_eec_spray[i]
def eec_soil(self, i):
"""
EEC for soil application
"""
self.out_eec_soil[i] = ((10.**(0.95*self.log_kow[i]-2.05)+0.82) *
(-0.0648*(self.log_kow[i]**2)+0.2431*self.log_kow[i]+0.5822) *
(1.5/(0.2+1.5*self.koc[i]*0.01)) * (0.5 * self.application_rate[i])) / 1000.
self.out_eec_spray[i] = np.nan
self.out_eec_seed[i] = np.nan
self.out_eec_tree[i] = np.nan
return # self.out_eec_soil[i]
def eec_seed(self, i):
"""
EEC for seed treatment
"""
self.out_eec_seed[i] = 1./1000.
self.out_eec_soil[i] = np.nan
self.out_eec_spray[i] = np.nan
self.out_eec_tree[i] = np.nan
return # self.out_eec_seed[i]
def eec_tree(self, i):
"""
EEC for tree trunk
"""
self.out_eec_tree[i] = (self.application_rate[i]/self.mass_tree_vegetation[i]) / 1000.
self.out_eec_soil[i] = np.nan
self.out_eec_seed[i] = np.nan
self.out_eec_spray[i] = np.nan
return # self.out_eec_tree[i]
def eec(self):
"""
determine which application method is used for subsequent EEC and RQ calculations
"""
print('eec method')
print(self.n_runs)
for i in range(self.n_runs):
if self.application_method[i] == 'foliar spray':
self.eec_spray(i)
self.out_eec[i] = self.out_eec_spray[i]
elif self.application_method[i] == 'soil application':
print('running beerex soil application')
self.eec_soil(i)
self.out_eec[i] = self.out_eec_soil[i]
elif self.application_method[i] == 'seed treatment':
self.eec_seed(i)
self.out_eec[i] = self.out_eec_seed[i]
elif self.application_method[i] == 'tree trunk':
self.eec_tree(i)
self.out_eec[i] = self.out_eec_tree[i]
#print(self.out_eec)
return self.out_eec
def lw1_total_dose(self):
"""
Pesticide dose in ug a.i./bee for larval worker day 1
"""
for i in range(self.n_runs):
if self.empirical_residue[i] == "yes":
self.out_lw1_total_dose[i] = (self.empirical_jelly[i]/1000.) * self.lw1_jelly[i]
elif self.empirical_residue[i] == "no":
self.out_lw1_total_dose[i] = (self.out_eec[i]/100.) * self.lw1_jelly[i]
return # self.out_lw1_total_dose[i]
def lw2_total_dose(self):
"""
Pesticide dose in ug a.i./bee for larval worker day 2
"""
for i in range(self.n_runs):
if self.empirical_residue[i] == "yes":
self.out_lw2_total_dose[i] = (self.empirical_jelly[i]/1000.) * self.lw2_jelly[i]
elif self.empirical_residue[i] == "no":
self.out_lw2_total_dose[i] = (self.out_eec[i]/100.) * self.lw2_jelly[i]
return # self.out_lw2_total_dose[i]
def lw3_total_dose(self):
"""
Pesticide dose in ug a.i./bee for larval worker day 3
"""
for i in range(self.n_runs):
if self.empirical_residue[i] == "yes":
self.out_lw3_total_dose[i] = (self.empirical_jelly[i]/1000.) * self.lw3_jelly[i]
elif self.empirical_residue[i] == "no":
self.out_lw3_total_dose[i] = (self.out_eec[i]/100.) * self.lw3_jelly[i]
return # self.out_lw3_total_dose[i]
def lw4_total_dose(self):
"""
Pesticide dose in ug a.i./bee for larval worker day 4
"""
for i in range(self.n_runs):
if self.empirical_residue[i] == "yes":
self.out_lw4_total_dose[i] = ((self.empirical_pollen[i]/1000.) * self.lw4_pollen[i]) + ((self.empirical_nectar[i]/1000.) * self.lw4_nectar[i])
elif self.empirical_residue[i] == "no":
self.out_lw4_total_dose[i] = (self.out_eec[i] * self.lw4_pollen[i]) + (self.out_eec[i] * self.lw4_nectar[i])
return # self.out_lw4_total_dose[i]
def lw5_total_dose(self):
"""
Pesticide dose in ug a.i./bee for larval worker day 5
"""
for i in range(self.n_runs):
if self.empirical_residue[i] == "yes":
self.out_lw5_total_dose[i] = ((self.empirical_pollen[i]/1000.) * self.lw5_pollen[i]) + ((self.empirical_nectar[i]/1000.) * self.lw5_nectar[i])
elif self.empirical_residue[i] == "no":
self.out_lw5_total_dose[i] = (self.out_eec[i] * self.lw5_pollen[i]) + (self.out_eec[i] * self.lw5_nectar[i])
return # self.out_lw5_total_dose[i]
def ld6_total_dose(self):
"""
Pesticide dose in ug a.i./bee for larval drone aged 6+ days
"""
for i in range(self.n_runs):
if self.empirical_residue[i] == "yes":
self.out_ld6_total_dose[i] = ((self.empirical_pollen[i]/1000.) * self.ld6_pollen[i]) + ((self.empirical_nectar[i]/1000.) * self.ld6_nectar[i])
elif self.empirical_residue[i] == "no":
self.out_ld6_total_dose[i] = (self.out_eec[i] * self.ld6_pollen[i]) + (self.out_eec[i] * self.ld6_nectar[i])
return # self.out_ld6_total_dose[i]
def lq1_total_dose(self):
"""
Pesticide dose in ug a.i./bee for larval queen day 1
"""
for i in range(self.n_runs):
if self.empirical_residue[i] == "yes":
self.out_lq1_total_dose[i] = (self.empirical_jelly[i]/1000.) * self.lq1_jelly[i]
elif self.empirical_residue[i] == "no":
self.out_lq1_total_dose[i] = (self.out_eec[i]/100.) * self.lq1_jelly[i]
return # self.out_lq1_total_dose[i]
def lq2_total_dose(self):
"""
Pesticide dose in ug a.i./bee for larval queen day 2
"""
for i in range(self.n_runs):
if self.empirical_residue[i] == "yes":
self.out_lq2_total_dose[i] = (self.empirical_jelly[i]/1000.) * self.lq2_jelly[i]
elif self.empirical_residue[i] == "no":
self.out_lq2_total_dose[i] = (self.out_eec[i]/100.) * self.lq2_jelly[i]
return # self.out_lq2_total_dose[i]
def lq3_total_dose(self):
"""
Pesticide dose in ug a.i./bee for larval queen day 3
"""
for i in range(self.n_runs):
if self.empirical_residue[i] == "yes":
self.out_lq3_total_dose[i] = (self.empirical_jelly[i]/1000.) * self.lq3_jelly[i]
elif self.empirical_residue[i] == "no":
self.out_lq3_total_dose[i] = (self.out_eec[i]/100.) * self.lq3_jelly[i]
return # self.out_lq3_total_dose[i]
def lq4_total_dose(self):
"""
Pesticide dose in ug a.i./bee for larval queen aged 4+ days
"""
for i in range(self.n_runs):
if self.empirical_residue[i] == "yes":
self.out_lq4_total_dose[i] = (self.empirical_jelly[i]/1000.) * self.lq4_jelly[i]
elif self.empirical_residue[i] == "no":
self.out_lq4_total_dose[i] = (self.out_eec[i]/100.) * self.lq4_jelly[i]
return # self.out_lq4_total_dose[i]
def aw_cell_total_dose(self):
"""
Pesticide dose in ug a.i./bee for adult worker (cell cleaning and capping)
"""
for i in range(self.n_runs):
if self.empirical_residue[i] == "yes":
self.out_aw_cell_total_dose[i] = ((self.empirical_nectar[i]/1000.) * self.aw_cell_nectar[i]) + ((self.empirical_pollen[i]/1000.) * self.aw_cell_pollen[i])
elif self.empirical_residue[i] == "no":
self.out_aw_cell_total_dose[i] = (self.out_eec[i] * self.aw_cell_nectar[i]) + (self.out_eec[i] * self.aw_cell_pollen[i])
return # self.out_aw_cell_total_dose[i]
def aw_brood_total_dose(self):
"""
Pesticide dose in ug a.i./bee for adult worker (brood and queen tending, nurse bees)
"""
for i in range(self.n_runs):
if self.empirical_residue[i] == "yes":
self.out_aw_brood_total_dose[i] = ((self.empirical_nectar[i]/1000.) * self.aw_brood_nectar[i]) + ((self.empirical_pollen[i]/1000.) * self.aw_brood_pollen[i])
elif self.empirical_residue[i] == "no":
self.out_aw_brood_total_dose[i] = (self.out_eec[i] * self.aw_brood_nectar[i]) + (self.out_eec[i] * self.aw_brood_pollen[i])
return # self.out_aw_brood_total_dose[i]
def aw_comb_total_dose(self):
"""
Pesticide dose in ug a.i./bee for adult worker (comb building, cleaning, and food handling)
"""
for i in range(self.n_runs):
if self.empirical_residue[i] == "yes":
self.out_aw_comb_total_dose[i] = ((self.empirical_nectar[i]/1000.) * self.aw_comb_nectar[i]) + ((self.empirical_pollen[i]/1000.) * self.aw_comb_pollen[i])
elif self.empirical_residue[i] == "no":
self.out_aw_comb_total_dose[i] = (self.out_eec[i] * self.aw_comb_nectar[i]) + (self.out_eec[i] * self.aw_comb_pollen[i])
return # self.out_aw_comb_total_dose[i]
def aw_pollen_total_dose(self):
"""
Pesticide dose in ug a.i./bee for adult worker (foraging for pollen)
"""
for i in range(self.n_runs):
if self.empirical_residue[i] == "yes":
self.out_aw_pollen_total_dose[i] = ((self.empirical_nectar[i]/1000.) * self.aw_fpollen_nectar[i]) + ((self.empirical_pollen[i]/1000.) * self.aw_fpollen_pollen[i])
elif self.empirical_residue[i] == "no":
self.out_aw_pollen_total_dose[i] = (self.out_eec[i] * self.aw_fpollen_nectar[i]) + (self.out_eec[i] * self.aw_fpollen_pollen[i])
return # self.out_aw_pollen_total_dose[i]
def aw_nectar_total_dose(self):
"""
Pesticide dose in ug a.i./bee for adult worker (foraging for nectar)
"""
for i in range(self.n_runs):
if self.empirical_residue[i] == "yes":
self.out_aw_nectar_total_dose[i] = ((self.empirical_nectar[i]/1000.) * self.aw_fnectar_nectar[i]) + ((self.empirical_pollen[i]/1000.) * self.aw_fnectar_pollen[i])
elif self.empirical_residue[i] == "no":
self.out_aw_nectar_total_dose[i] = (self.out_eec[i] * self.aw_fnectar_nectar[i]) + (self.out_eec[i] * self.aw_fnectar_pollen[i])
return # self.out_aw_nectar_total_dose[i]
def aw_winter_total_dose(self):
"""
Pesticide dose in ug a.i./bee for adult worker (maintenance of hive in winter)
"""
for i in range(self.n_runs):
if self.empirical_residue[i] == "yes":
self.out_aw_winter_total_dose[i] = ((self.empirical_nectar[i]/1000.) * self.aw_winter_nectar[i]) + ((self.empirical_pollen[i]/1000.) * self.aw_winter_pollen[i])
elif self.empirical_residue[i] == "no":
self.out_aw_winter_total_dose[i] = (self.out_eec[i] * self.aw_winter_nectar[i]) + (self.out_eec[i] * self.aw_winter_pollen[i])
return # self.out_aw_winter_total_dose[i]
def ad_total_dose(self):
"""
Pesticide dose in ug a.i./bee for adult drone
"""
for i in range(self.n_runs):
if self.empirical_residue[i] == "yes":
self.out_ad_total_dose[i] = ((self.empirical_nectar[i]/1000.) * self.ad_nectar[i]) + ((self.empirical_pollen[i]/1000.) * self.ad_pollen[i])
elif self.empirical_residue[i] == "no":
self.out_ad_total_dose[i] = (self.out_eec[i] * self.ad_nectar[i]) + (self.out_eec[i] * self.ad_pollen[i])
return # self.out_ad_total_dose[i]
def aq_total_dose(self):
"""
Pesticide dose in ug a.i./bee for adult queen (laying 1500 eggs/day)
"""
for i in range(self.n_runs):
if self.empirical_residue[i] == "yes":
self.out_aq_total_dose[i] = ((self.empirical_jelly[i]/1000.) * self.aq_jelly[i])
elif self.empirical_residue[i] == "no":
self.out_aq_total_dose[i] = (self.out_eec[i]/100.) * self.aq_jelly[i]
return # self.out_aq_total_dose[i]
def lw1_acute_rq(self):
"""
Acute risk quotient for larval worker day 1
"""
self.out_lw1_acute_rq = self.out_lw1_total_dose/self.larval_ld50
return self.out_lw1_acute_rq
def lw2_acute_rq(self):
"""
Acute risk quotient for larval worker day 2
"""
self.out_lw2_acute_rq = self.out_lw2_total_dose/self.larval_ld50
return self.out_lw2_acute_rq
def lw3_acute_rq(self):
"""
Acute risk quotient for larval worker day 3
"""
self.out_lw3_acute_rq = self.out_lw3_total_dose/self.larval_ld50
return self.out_lw3_acute_rq
def lw4_acute_rq(self):
"""
Acute risk quotient for larval worker day 4
"""
self.out_lw4_acute_rq = self.out_lw4_total_dose/self.larval_ld50
return self.out_lw4_acute_rq
def lw5_acute_rq(self):
"""
Acute risk quotient for larval worker day 5
"""
self.out_lw5_acute_rq = self.out_lw5_total_dose/self.larval_ld50
return self.out_lw5_acute_rq
def ld6_acute_rq(self):
"""
Acute risk quotient for larval drone aged day 6+
"""
self.out_ld6_acute_rq = self.out_ld6_total_dose/self.larval_ld50
return self.out_ld6_acute_rq
def lq1_acute_rq(self):
"""
Acute risk quotient for larval queen day 1
"""
self.out_lq1_acute_rq = self.out_lq1_total_dose/self.larval_ld50
return self.out_lq1_acute_rq
def lq2_acute_rq(self):
"""
Acute risk quotient for larval queen day 2
"""
self.out_lq2_acute_rq = self.out_lq2_total_dose/self.larval_ld50
return self.out_lq2_acute_rq
def lq3_acute_rq(self):
"""
Acute risk quotient for larval queen day 3
"""
self.out_lq3_acute_rq = self.out_lq3_total_dose/self.larval_ld50
return self.out_lq3_acute_rq
def lq4_acute_rq(self):
"""
Acute risk quotient for larval queen day 4
"""
self.out_lq4_acute_rq = self.out_lq4_total_dose/self.larval_ld50
return self.out_lq4_acute_rq
def aw_cell_acute_rq(self):
"""
Acute risk quotient for adult worker (cell cleaning and capping)
"""
self.out_aw_cell_acute_rq = self.out_aw_cell_total_dose/self.adult_oral_ld50
return self.out_aw_cell_acute_rq
def aw_brood_acute_rq(self):
"""
Acute risk quotient for adult worker (brood and queen tending, nurse bees)
"""
self.out_aw_brood_acute_rq = self.out_aw_brood_total_dose/self.adult_oral_ld50
return self.out_aw_brood_acute_rq
def aw_comb_acute_rq(self):
"""
Acute risk quotient for adult worker (comb building, cleaning, and food handling)
"""
self.out_aw_comb_acute_rq = self.out_aw_comb_total_dose/self.adult_oral_ld50
return self.out_aw_comb_acute_rq
def aw_pollen_acute_rq(self):
"""
Acute risk quotient for adult worker (foraging for pollen)
"""
self.out_aw_pollen_acute_rq = self.out_aw_pollen_total_dose/self.adult_oral_ld50
return self.out_aw_pollen_acute_rq
def aw_nectar_acute_rq(self):
"""
Acute risk quotient for adult worker (foraging for nectar)
"""
self.out_aw_nectar_acute_rq = self.out_aw_nectar_total_dose/self.adult_oral_ld50
return self.out_aw_nectar_acute_rq
def aw_winter_acute_rq(self):
"""
Acute risk quotient for adult worker (maintenance of hive in winter)
"""
self.out_aw_winter_acute_rq = self.out_aw_winter_total_dose/self.adult_oral_ld50
return self.out_aw_winter_acute_rq
def ad_acute_rq(self):
"""
Acute risk quotient for adult drone
"""
self.out_ad_acute_rq = self.out_ad_total_dose/self.adult_oral_ld50
return self.out_ad_acute_rq
def aq_acute_rq(self):
"""
Acute risk quotient for adult queen
"""
self.out_aq_acute_rq = self.out_aq_total_dose/self.adult_oral_ld50
return self.out_aq_acute_rq
def lw1_chronic_rq(self):
"""
Chronic risk quotient for larval worker day 1
"""
self.out_lw1_chronic_rq = self.out_lw1_total_dose/self.larval_noael
return self.out_lw1_chronic_rq
def lw2_chronic_rq(self):
"""
Chronic risk quotient for larval worker day 2
"""
self.out_lw2_chronic_rq = self.out_lw2_total_dose/self.larval_noael
return self.out_lw2_chronic_rq
def lw3_chronic_rq(self):
"""
Chronic risk quotient for larval worker day 3
"""
self.out_lw3_chronic_rq = self.out_lw3_total_dose/self.larval_noael
return self.out_lw3_chronic_rq
def lw4_chronic_rq(self):
"""
Chronic risk quotient for larval worker day 4
"""
self.out_lw4_chronic_rq = self.out_lw4_total_dose/self.larval_noael
return self.out_lw4_chronic_rq
def lw5_chronic_rq(self):
"""
Chronic risk quotient for larval worker day 5
"""
self.out_lw5_chronic_rq = self.out_lw5_total_dose/self.larval_noael
return self.out_lw5_chronic_rq
def ld6_chronic_rq(self):
"""
Chronic risk quotient for larval drone aged 6+ days
"""
self.out_ld6_chronic_rq = self.out_ld6_total_dose/self.larval_noael
return self.out_ld6_chronic_rq
def lq1_chronic_rq(self):
"""
Chronic risk quotient for larval queen day 1
"""
self.out_lq1_chronic_rq = self.out_lq1_total_dose/self.larval_noael
return self.out_lq1_chronic_rq
def lq2_chronic_rq(self):
"""
Chronic risk quotient for larval queen day 2
"""
self.out_lq2_chronic_rq = self.out_lq2_total_dose/self.larval_noael
return self.out_lq2_chronic_rq
def lq3_chronic_rq(self):
"""
Chronic risk quotient for larval queen day 3
"""
self.out_lq3_chronic_rq = self.out_lq3_total_dose/self.larval_noael
return self.out_lq3_chronic_rq
def lq4_chronic_rq(self):
"""
Chronic risk quotient for larval queen aged 4+ days
"""
self.out_lq4_chronic_rq = self.out_lq4_total_dose/self.larval_noael
return self.out_lq4_chronic_rq
def aw_cell_chronic_rq(self):
"""
Chronic risk quotient for adult worker (cell cleaning and capping)
"""
self.out_aw_cell_chronic_rq = self.out_aw_cell_total_dose/self.adult_oral_noael
return self.out_aw_cell_chronic_rq
def aw_brood_chronic_rq(self):
"""
Chronic risk quotient for adult worker (brood and queen tending, nurse bees)
"""
self.out_aw_brood_chronic_rq = self.out_aw_brood_total_dose/self.adult_oral_noael
return self.out_aw_brood_chronic_rq
def aw_comb_chronic_rq(self):
"""
Chronic risk quotient for adult worker (comb building, cleaning, and food handling)
"""
self.out_aw_comb_chronic_rq = self.out_aw_comb_total_dose/self.adult_oral_noael
return self.out_aw_comb_chronic_rq
def aw_pollen_chronic_rq(self):
"""
Chronic risk quotient for adult worker (foraging for pollen)
"""
self.out_aw_pollen_chronic_rq = self.out_aw_pollen_total_dose/self.adult_oral_noael
return self.out_aw_pollen_chronic_rq
def aw_nectar_chronic_rq(self):
"""
Chronic risk quotient for adult worker (foraging for nectar)
"""
self.out_aw_nectar_chronic_rq = self.out_aw_nectar_total_dose/self.adult_oral_noael
return self.out_aw_nectar_chronic_rq
def aw_winter_chronic_rq(self):
"""
Chronic risk quotient for adult worker (maintenance of hive in winter)
"""
self.out_aw_winter_chronic_rq = self.out_aw_winter_total_dose/self.adult_oral_noael
return self.out_aw_winter_chronic_rq
def ad_chronic_rq(self):
"""
Chronic risk quotient for adult drone
"""
self.out_ad_chronic_rq = self.out_ad_total_dose/self.adult_oral_noael
return self.out_ad_chronic_rq
def aq_chronic_rq(self):
"""
Chronic risk quotient for adult queen (laying 1500 eggs/day)
"""
self.out_aq_chronic_rq = self.out_aq_total_dose/self.adult_oral_noael
return self.out_aq_chronic_rq
# if __name__ == '__main__':
# pd_in = pd.DataFrame({
# "application_rate": [1.2],
# "application_method": ['foliar spray'],
# "empirical_residue": ['FALSE'],
# "empirical_pollen": [1.],
# "empirical_nectar": [0.4],
# "empirical_jelly": [0.5],
# "adult_contact_ld50": [2.2],
# "adult_oral_ld50": [3.5],
# "adult_oral_noael": [1.7],
# "larval_ld50": [0.8],
# "larval_noael": [0.5],
# "log_kow": [0.24],
# "koc": [12.3],
# "mass_tree_vegetation": [69.3],
# "lw1_jelly": [1.9],
# "lw2_jelly": [9.4],
# "lw3_jelly": [19.],
# "lw4_nectar": [60.],
# "lw4_pollen": [1.8],
# "lw5_nectar": [120.],
# "lw5_pollen": [3.6],
# "ld6_nectar": [130.],
# "ld6_pollen": [3.6],
# "lq1_jelly": [1.9],
# "lq2_jelly": [9.4],
# "lq3_jelly": [23.],
# "lq4_jelly": [141.],
# "aw_cell_nectar": [60.],
# "aw_cell_pollen": [6.65],
# "aw_brood_nectar": [140.],
# "aw_brood_pollen": [9.6],
# "aw_comb_nectar": [60.],
# "aw_comb_pollen": [1.7],
# "aw_fpollen_nectar": [43.5],
# "aw_fpollen_pollen": [0.041],
# "aw_fnectar_nectar": [292.],
# "aw_fnectar_pollen": [0.041],
# "aw_winter_nectar": [29.],
# "aw_winter_pollen": [2.],
# "ad_nectar": [235.],
# "ad_pollen": [0.0002],
# "aq_jelly": [525.],
# })
# output = Beerex(pd_in, None)
# output.execute_model()
# print("Model has been run")
| [
6738,
11593,
37443,
834,
1330,
7297,
220,
1303,
1671,
654,
287,
11361,
513,
13,
15,
7668,
2099,
16765,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
25064,
198,
198,
2,
19796,
... | 1.918086 | 18,080 |
# Generated by Django 3.1.5 on 2021-02-16 20:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
20,
319,
33448,
12,
2999,
12,
1433,
1160,
25,
3901,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14... | 3.04918 | 61 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# Custom
from src.normalisation import channel, instance
if __name__ == "__main__":
C = 8
y = torch.randn([3,C,16,16])
y_dims = y.size()
G = Generator(y_dims[1:], y_dims[0], C=C, n_residual_blocks=3, sample_noise=True)
x_hat = G(y)
print(x_hat.size()) | [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
8562,
198,
6738,
12351,
13,
11265,
5612,
1330,
6518,
11,
4554,
628,
198,
198... | 2.308176 | 159 |
import pandas as pd
from django.http import response
from django.shortcuts import render,HttpResponse
from .models import Person,report,management,Person_without_Aadhar
import datetime
from django.utils.timezone import utc
from rest_framework import serializers, status,viewsets
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.parsers import MultiPartParser,FormParser
from .serializers import PersonSerializer,ReportSerializer,ManagementSerializer,Person_Without_Aadhar_Serializer
from functools import cmp_to_key
import face_recognition
import os
from sklearn.model_selection import train_test_split as tts
from sklearn.linear_model import LogisticRegression
# Create your views here. | [
11748,
19798,
292,
355,
279,
67,
198,
6738,
42625,
14208,
13,
4023,
1330,
2882,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
43481,
31077,
198,
6738,
764,
27530,
1330,
7755,
11,
13116,
11,
27604,
11,
15439,
62,
19419,
62,
... | 3.765 | 200 |
from exceptions.exceptions import ArgumentInvalid, ObjectsNotExist
from typing import Generator, Optional
from sqlalchemy import and_
from configures.const import POST_MINIMUM_WORDS
from handlers import BaseHandler
from models.database_models import Comment
from models.database_models.post_model import Post
from models.database_models.topic_model import Topic
from models.database_models.user_model import User
from models.response_models.post_model import ResponsePostModel
| [
6738,
13269,
13,
1069,
11755,
1330,
45751,
44651,
11,
35832,
3673,
3109,
396,
198,
6738,
19720,
1330,
35986,
11,
32233,
198,
198,
6738,
44161,
282,
26599,
1330,
290,
62,
198,
198,
6738,
4566,
942,
13,
9979,
1330,
24582,
62,
23678,
3955,... | 4.173913 | 115 |
#!/usr/bin/env python
"""
Cubic spline peak finder.
Hazen 03/16
"""
import pickle
import numpy
import tifffile
import storm_analysis.sa_library.analysis_io as analysisIO
import storm_analysis.sa_library.fitting as fitting
import storm_analysis.sa_library.ia_utilities_c as utilC
import storm_analysis.sa_library.matched_filter_c as matchedFilterC
import storm_analysis.spliner.cubic_fit_c as cubicFitC
import storm_analysis.spliner.spline_to_psf as splineToPSF
def initFitter(finder, parameters, spline_fn):
"""
Initialize and return a cubicFitC.CSplineFit object.
"""
# Load variance, scale by gain.
#
# Variance is in units of ADU*ADU.
# Gain is ADU/photo-electron.
#
variance = None
if parameters.hasAttr("camera_calibration"):
[offset, variance, gain] = analysisIO.loadCMOSCalibration(parameters.getAttr("camera_calibration"))
variance = variance/(gain*gain)
# Set variance in the peak finder, this method also pads the
# variance to the correct size.
variance = finder.setVariance(variance)
# Create C fitter object.
if (spline_fn.getType() == "2D"):
return cubicFitC.CSpline2DFit(scmos_cal = variance,
spline_fn = spline_fn)
else:
return cubicFitC.CSpline3DFit(scmos_cal = variance,
spline_fn = spline_fn)
def initFindAndFit(parameters):
"""
Initialize and return a SplinerFinderFitter object.
"""
# Create spline object.
spline_fn = splineToPSF.loadSpline(parameters.getAttr("spline"))
# Create peak finder.
finder = fitting.PeakFinderArbitraryPSF(parameters = parameters,
psf_object = spline_fn)
# Create cubicFitC.CSplineFit object.
mfitter = initFitter(finder, parameters, spline_fn)
# Create peak fitter.
fitter = fitting.PeakFitterArbitraryPSF(mfitter = mfitter,
parameters = parameters)
# Specify which properties we want from the analysis.
properties = ["background", "error", "height", "iterations", "significance", "sum", "x", "y", "z"]
return fitting.PeakFinderFitter(peak_finder = finder,
peak_fitter = fitter,
properties = properties)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
43632,
291,
4328,
500,
9103,
1064,
263,
13,
198,
198,
39,
1031,
268,
7643,
14,
1433,
198,
37811,
198,
198,
11748,
2298,
293,
198,
11748,
299,
32152,
198,
198,
11748,
256,
... | 2.272038 | 1,055 |
import pandas as pd
url = "https://en.wikipedia.org/wiki/Road_safety_in_Europe"
target_table_name = "European Union Road Safety Facts and Figures"
# Fetch all tables on webpage that match
# provided table name and return as list
df = pd.read_html(url, match=target_table_name)
# Grab the first table
df = df[0] if len(df) >= 1 else 'Table Not Found!'
# Drop unwanted columns
df.drop(
['Road Network Length (in km) in 2013[29]',
'Number of People Killed per Billion km[30]',
'Number of Seriously Injured in 2017/2018[30]'],
axis=1,
inplace=True
)
# Rename columns to a cleaner version
df.rename(columns={
'Area (thousands of km2)[24]' :'Area',
'Population in 2018[25]' :'Population',
'GDP per capita in 2018[26]' :'GDP per capita',
'Population density (inhabitants per km2) in 2017[27]' :'Population density',
'Vehicle ownership (per thousand inhabitants) in 2016[28]':'Vehicle ownership',
'Total Road Deaths in 2018[30]' :'Total road deaths',
'Road deaths per Million Inhabitants in 2018[30]' :'Road deaths per Million Inhabitants.',
},
inplace=True
)
# Add the year column with the default value of 2018 for all rows
df['Year'] = 2018
# Save to csv file in current directory
df.to_csv('european_union_road_safety_facts_and_figures.csv', index=False)
# Print the table to console
print(df) | [
11748,
19798,
292,
355,
279,
67,
198,
198,
6371,
796,
366,
5450,
1378,
268,
13,
31266,
13,
2398,
14,
15466,
14,
29197,
62,
44708,
62,
259,
62,
16112,
1,
198,
16793,
62,
11487,
62,
3672,
796,
366,
22030,
4479,
5567,
11233,
26972,
290... | 2.554593 | 577 |
import csv
data = loaddata()
for k,v in data.iteritems():
print len(k), k + ":" + v
#return 0 if not a place
#return 1 if it's region
#return 2 if it's a province
#return 3 if it's a munincipality
| [
11748,
269,
21370,
198,
198,
7890,
796,
3440,
7890,
3419,
198,
1640,
479,
11,
85,
287,
1366,
13,
2676,
23814,
33529,
198,
220,
220,
220,
3601,
18896,
7,
74,
828,
479,
1343,
366,
11097,
1343,
410,
220,
198,
220,
220,
220,
1303,
7783,... | 2.494505 | 91 |
"""Test suite for the bootstrap_elasticsearch view of richie's search app."""
import json
from unittest import mock
from django.contrib.messages import get_messages
from cms.test_utils.testcases import CMSTestCase
from richie.apps.core.factories import UserFactory
class BootstrapElasticsearchViewTestCase(CMSTestCase):
"""
Integration test suite to validate the behavior of the `bootstrap_elasticsearch` view.
"""
@mock.patch("django.core.management.call_command")
def test_views_bootstrap_elasticsearch_with_permission(self, mock_command):
"""Confirm triggering the search index bootstrapping works as expected."""
user = UserFactory(is_staff=True)
self.client.login(username=user.username, password="password")
# Add the necessary permission
self.add_permission(user, "can_manage_elasticsearch")
url = "/api/v1.0/bootstrap-elasticsearch/"
response = self.client.post(url, follow=True)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(content, {})
# Check the presence of a confirmation message
messages = list(get_messages(response.wsgi_request))
self.assertEqual(len(messages), 1)
self.assertEqual(
str(messages[0]), "The search index was successfully bootstrapped"
)
mock_command.assert_called_once_with("bootstrap_elasticsearch")
@mock.patch("django.core.management.call_command")
def test_views_bootstrap_elasticsearch_no_permission(self, mock_command):
"""Bootstrapping ES should be forbidden if the permission is not not granted."""
user = UserFactory(is_staff=True)
self.client.login(username=user.username, password="password")
url = "/api/v1.0/bootstrap-elasticsearch/"
response = self.client.post(url, follow=True)
self.assertEqual(response.status_code, 403)
self.assertEqual(
response.content, b"You are not allowed to manage the search index."
)
self.assertFalse(mock_command.called)
@mock.patch("django.core.management.call_command")
def test_views_bootstrap_elasticsearch_post_required(self, mock_command):
"""Bootstrapping ES can only be triggered with a POST method."""
user = UserFactory(is_staff=True)
self.client.login(username=user.username, password="password")
# Add the necessary permission
self.add_permission(user, "can_manage_elasticsearch")
url = "/api/v1.0/bootstrap-elasticsearch/"
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 405)
response = self.client.put(url, follow=True)
self.assertEqual(response.status_code, 405)
response = self.client.delete(url, follow=True)
self.assertEqual(response.status_code, 405)
self.assertFalse(mock_command.called)
@mock.patch("django.core.management.call_command")
def test_views_bootstrap_elasticsearch_anonymous(self, mock_command):
"""An anonymous user should not be allowed to bootstrap ES."""
url = "/api/v1.0/bootstrap-elasticsearch/"
response = self.client.post(url, follow=True)
self.assertEqual(response.status_code, 401)
self.assertFalse(mock_command.called)
@mock.patch("django.core.management.call_command")
def test_views_bootstrap_elasticsearch_not_staff_with_permission(
self, mock_command
):
"""A user with permissions that is not staff should not be allowed to bootstrap ES."""
user = UserFactory()
self.client.login(username=user.username, password="password")
# Add the necessary permission
self.add_permission(user, "can_manage_elasticsearch")
url = "/api/v1.0/bootstrap-elasticsearch/"
response = self.client.post(url, follow=True)
self.assertEqual(response.status_code, 403)
self.assertEqual(
response.content, b"You are not allowed to manage the search index."
)
self.assertFalse(mock_command.called)
| [
37811,
14402,
18389,
329,
262,
6297,
26418,
62,
417,
3477,
12947,
1570,
286,
5527,
494,
338,
2989,
598,
526,
15931,
198,
11748,
33918,
198,
6738,
555,
715,
395,
1330,
15290,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
37348,
1095,
... | 2.621279 | 1,579 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
from django.db import models
from django.utils.translation import ugettext_lazy as _
from app.core.models import Domain, Mailbox
EXT_LIST_TYPE = (
('general', _(u'普通邮件列表')),
('dept', _(u'部门邮件列表')),
('sys', _(u'系统邮件列表'))
)
EXT_LIST_PERMISSION = (
('public', _(u'公开列表')),
('private', _(u'私有列表')),
('domain', _(u'本地域名公共列表')),
('domain2', _(u'同域名公共列表'))
)
EXT_LIST_STATUS = (
(u'-1', _(u'正常')),
(u'1', _(u'禁用')),
)
EXT_LIST_MEM_PERMIT = (
('1', _(u'收发')),
('-1', _(u'只发')),
('0', _(u'只收')),
)
RELATE_EMAIL_ACCESS = (
('read', _(u'读')),
('edit', _(u'修改')),
('send', _(u'发送')),
('all', _(u'完全控制')),
)
from auditlog.registry import auditlog
auditlog.register(ExtList, include_fields=['id', 'listname', 'address', 'domain_id', 'permission', 'description', 'disabled'])
auditlog.register(ExtListMember, include_fields=['id', 'extlist', 'address', 'permit', 'name', 'update_time'])
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
11748,
640,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
... | 1.79682 | 566 |
#!/usr/bin/env python3
'''
@description : Given a gnomAD VCF, shortlist SNPs for a capture-based panel to detect allelic imbalance
@created : 09/16/2020
@author : Cyriac Kandoth
'''
from __future__ import division, print_function
import os, sys, argparse, time, pysam
if __name__ == "__main__":
start_time = time.time()
main()
end_time = time.time()
total_time = end_time - start_time
sys.stderr.write("Runtime: %0.2f seconds\n" % total_time)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
7061,
6,
198,
31,
11213,
1058,
11259,
257,
19967,
296,
2885,
569,
22495,
11,
1790,
4868,
11346,
12016,
329,
257,
8006,
12,
3106,
6103,
284,
4886,
28654,
677,
32556,
198,
31,
25598,... | 2.703488 | 172 |
import socket
import json
import os
import sys
from threading import Thread
from multiprocessing import Process
from queue import Queue
from logging import Formatter,getLogger,FileHandler
from logging.handlers import TimedRotatingFileHandler,RotatingFileHandler
import logging
import time
from functools import reduce
logg = logging.getLogger(__name__)
logg.setLevel(logging.DEBUG)
logg.addHandler(getErrorHandler("/var/log/socket-server/socketserver.log"))
try:
mySocketServer = MySocketServer()
mySocketServer.startWork()
mySocketServer.runSocketServer()
except Exception as error:
logg.exception(error)
| [
11748,
17802,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
25064,
198,
6738,
4704,
278,
1330,
14122,
198,
6738,
18540,
305,
919,
278,
1330,
10854,
198,
6738,
16834,
1330,
4670,
518,
198,
6738,
18931,
1330,
5178,
1436,
11,
1136,
11187... | 3.296875 | 192 |
from je_editor.utils.file.open.open_file import read_file
def open_last_edit_file(file_from_output_content, code_editor):
"""
:param file_from_output_content: readied file from output content
:param code_editor the editor to insert file content
:return readied file
open last edit file
if success open file
insert file content to code_editor
"""
temp_to_check_file = read_file(file_from_output_content)
if temp_to_check_file is not None:
code_editor.delete("1.0", "end-1c")
code_editor.insert("end-1c", temp_to_check_file[1])
return temp_to_check_file[0]
| [
6738,
11223,
62,
35352,
13,
26791,
13,
7753,
13,
9654,
13,
9654,
62,
7753,
1330,
1100,
62,
7753,
628,
198,
4299,
1280,
62,
12957,
62,
19312,
62,
7753,
7,
7753,
62,
6738,
62,
22915,
62,
11299,
11,
2438,
62,
35352,
2599,
198,
220,
2... | 2.642553 | 235 |
from __future__ import print_function
import zmq
import time
ADDR='tcp://127.0.0.1:11155'
ctx = zmq.Context()
srv = ctx.socket(zmq.REP)
srv.bind(ADDR)
#srv.setsockopt(zmq.RCVTIMEO, 3000);
while True:
try:
msg = srv.recv()
except Exception as e:
print('zmq socket revc timedout:', e)
else:
print('client says: %s' % msg)
srv.send('hi from server')
time.sleep(2)
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
1976,
76,
80,
198,
11748,
640,
198,
198,
2885,
7707,
11639,
83,
13155,
1378,
16799,
13,
15,
13,
15,
13,
16,
25,
1157,
18742,
6,
198,
198,
49464,
796,
1976,
76,
80,
13,
219... | 2.176796 | 181 |
from __future__ import annotations
from typing import Dict, Any, List, TYPE_CHECKING
if TYPE_CHECKING:
from xml.etree.ElementTree import Element
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
19720,
1330,
360,
713,
11,
4377,
11,
7343,
11,
41876,
62,
50084,
2751,
198,
198,
361,
41876,
62,
50084,
2751,
25,
198,
220,
220,
220,
422,
35555,
13,
316,
631,
13,
20180,
27660,
... | 3.377778 | 45 |
import os
import sys
from pathlib import Path
from tkinter import *
from tkinter import filedialog
from os import listdir
from os.path import isfile, join
from nltk.tokenize.regexp import RegexpTokenizer
from make_story import get_next_words, load_data_from_file
BUTTONS_PER_COL = 8
DATA_PATH = "./data/"
data = None
tokenizer = RegexpTokenizer(r'\w+')
Path(DATA_PATH).mkdir(parents=True, exist_ok=True)
if len(sys.argv) == 1:
ask_for_files()
elif len(sys.argv) == 2:
name = DATA_PATH + sys.argv[1] + '.txt'
if os.path.isfile(name):
data = load_data_from_file(sys.argv[1])
else:
print(f"no file found at {name}")
root = Tk()
root.title("Story-o-mat")
root.geometry("1100x1000")
root.configure(background='black')
root.attributes("-topmost", True)
title = Label(root, text="Der Lovecraft-o-mat")
title.pack()
title.configure(font=("Courier", 30), bg="black", fg="red", pady=10, width=50)
frame=Frame(root, width=300, height=160)
frame.pack()
text_field = Text(frame, width=52, height=20, padx=19, wrap="word")
text_field.pack()
text_field.configure(font=("Courier", 30))
text_buttons = []
col5 = Frame(root)
col5.pack(side=TOP)
add_text_coloumn(0)
add_text_coloumn(1)
add_text_coloumn(2)
add_text_coloumn(4)
add_text_coloumn(5)
word_count = Label(root, text="0 Words")
word_count.pack()
word_count.configure(font=("Courier", 20), bg="black", fg="red", pady=10, width=50)
save_btn = Button(root,text="save", command=lambda: file_save())
save_btn.pack()
root.bind("<space>", fill_text_buttons)
root.mainloop()
| [
11748,
28686,
198,
11748,
25064,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
256,
74,
3849,
1330,
1635,
198,
6738,
256,
74,
3849,
1330,
5717,
498,
519,
198,
6738,
28686,
1330,
1351,
15908,
198,
6738,
28686,
13,
6978,
1330,
318,
7753,... | 2.417431 | 654 |
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import six
from conversion_imagenet import TestModels
if __name__ == '__main__':
test_mxnet()
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
2237,
198,
6738,
11315,
62,
320,
11286,
316,
1330,
6208,
5841,
1424,
628,
198,
19... | 3.295082 | 61 |
# coding=utf-8
#
# created by kpe on 15.Mar.2019 at 12:52
#
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow.python import keras
from tensorflow.python.keras import backend as K
from bert.layer import Layer
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
198,
2,
2727,
416,
479,
431,
319,
1315,
13,
7676,
13,
23344,
379,
1105,
25,
4309,
198,
2,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
117... | 3.152941 | 85 |
# postgresql/pg8000.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors <see AUTHORS
# file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: postgresql+pg8000
:name: pg8000
:dbapi: pg8000
:connectstring: postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...]
:url: https://pythonhosted.org/pg8000/
.. note::
The pg8000 dialect is **not tested as part of SQLAlchemy's continuous
integration** and may have unresolved issues. The recommended PostgreSQL
dialect is psycopg2.
.. _pg8000_unicode:
Unicode
-------
pg8000 will encode / decode string values between it and the server using the
PostgreSQL ``client_encoding`` parameter; by default this is the value in
the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
Typically, this can be changed to ``utf-8``, as a more useful default::
#client_encoding = sql_ascii # actually, defaults to database
# encoding
client_encoding = utf8
The ``client_encoding`` can be overridden for a session by executing the SQL:
SET CLIENT_ENCODING TO 'utf8';
SQLAlchemy will execute this SQL on all new connections based on the value
passed to :func:`_sa.create_engine` using the ``client_encoding`` parameter::
engine = create_engine(
"postgresql+pg8000://user:pass@host/dbname", client_encoding='utf8')
.. _pg8000_isolation_level:
pg8000 Transaction Isolation Level
-------------------------------------
The pg8000 dialect offers the same isolation level settings as that
of the :ref:`psycopg2 <psycopg2_isolation_level>` dialect:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT``
.. versionadded:: 0.9.5 support for AUTOCOMMIT isolation level when using
pg8000.
.. seealso::
:ref:`postgresql_isolation_level`
:ref:`psycopg2_isolation_level`
""" # noqa
import decimal
import re
from .base import _DECIMAL_TYPES
from .base import _FLOAT_TYPES
from .base import _INT_TYPES
from .base import PGCompiler
from .base import PGDialect
from .base import PGExecutionContext
from .base import PGIdentifierPreparer
from .base import UUID
from .json import JSON
from ... import exc
from ... import processors
from ... import types as sqltypes
from ... import util
from ...sql.elements import quoted_name
try:
from uuid import UUID as _python_UUID # noqa
except ImportError:
_python_UUID = None
dialect = PGDialect_pg8000
| [
2,
1281,
34239,
13976,
14,
6024,
33942,
13,
9078,
198,
2,
15069,
357,
34,
8,
5075,
12,
1238,
2481,
262,
16363,
2348,
26599,
7035,
290,
20420,
1279,
3826,
37195,
20673,
198,
2,
2393,
29,
198,
2,
198,
2,
770,
8265,
318,
636,
286,
16... | 3.023392 | 855 |
from tempfile import NamedTemporaryFile
from cbmail.base import Attachment, BaseMail
from cbmail.mixins import MailingListMixin
from django.test import SimpleTestCase
from mock import patch
| [
6738,
20218,
7753,
1330,
34441,
12966,
5551,
8979,
198,
198,
6738,
269,
65,
4529,
13,
8692,
1330,
3460,
15520,
11,
7308,
25804,
198,
6738,
269,
65,
4529,
13,
19816,
1040,
1330,
11099,
278,
8053,
35608,
259,
198,
6738,
42625,
14208,
13,
... | 3.62963 | 54 |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('check_db_connection', views.get_time, name="check_db"),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
5009,
13,
9630,
11,
1438,
11639,
9630,
33809,
198,
220,
220,
220,
3108,
10786,
... | 2.793651 | 63 |
# coding=utf-8
import re
import json
import requests
from bs4 import BeautifulSoup
if __name__ == '__main__':
print(get())
| [
2,
19617,
28,
40477,
12,
23,
198,
11748,
302,
198,
11748,
33918,
198,
11748,
7007,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
3601,
7,... | 2.826087 | 46 |
import ast
import operator
import typing
from collections import defaultdict
from codegen import raml_types
from codegen.generate_abstract import AbstractModuleGenerator
from codegen.utils import reorder_class_definitions
FIELD_TYPES = {
"string": "marshmallow.fields.String",
"object": "marshmallow.fields.Dict",
"float": "marshmallow.fields.Float",
"number": "marshmallow.fields.Integer",
"integer": "marshmallow.fields.Integer",
"boolean": "marshmallow.fields.Bool",
"array": "marshmallow.fields.List",
"datetime": "marshmallow.fields.DateTime",
"date-only": "marshmallow.fields.Date",
"any": "marshmallow.fields.Raw",
}
class SchemaModuleGenerator(AbstractModuleGenerator):
"""This generator is responsible for generating the schemas.py file"""
def add_type_definition(self, resource):
"""Create a class definition"""
if resource.name in FIELD_TYPES:
return
if "asMap" in resource.annotations:
return self.add_dict_field_definition(resource, resource.package_name)
return self.add_schema_definition(resource, resource.package_name)
class SchemaClassGenerator:
"""Create a marshmallow schema"""
def _create_nested_field(self, type_obj):
"""Create a `marshmallow.fields.Nested()` field.
Generated code::
marshmallow.fields.Nested(
nested="commercetools.schemas.{package}.{object}Schema",
unknown=marshmallow.EXCLUDE,
allow_none=True
)
"""
return ast.Call(
func=ast.Name(id="marshmallow.fields.Nested"),
args=[],
keywords=[
ast.keyword(
arg="nested",
value=ast.Str(
s=f"commercetools.schemas.{type_obj.package_name}.{type_obj.name}Schema"
),
),
ast.keyword(arg="unknown", value=ast.Name(id="marshmallow.EXCLUDE")),
ast.keyword(arg="allow_none", value=ast.NameConstant(True)),
],
)
def _create_regex_call(self, field_name, method_name):
"""Generate `data = self.fields[{ field_name }].{ method_name }(data)`"""
return ast.Assign(
targets=[ast.Name(id="data")],
value=ast.Call(
func=ast.Attribute(
value=ast.Subscript(
value=ast.Attribute(value=ast.Name(id="self"), attr="fields"),
slice=ast.Index(value=ast.Str(s=field_name)),
),
attr=method_name,
),
args=[ast.Name(id="data")],
keywords=[],
),
)
def _create_marshmallow_hook(self, name):
"""Create a method on the Marshmallow schema which acts as a hook.
Generated code:
@marshmallow.post_load
def { name }(self, data):
<empty>
"""
return ast.FunctionDef(
name=name,
args=ast.arguments(
args=[
ast.arg(arg="self", annotation=None),
ast.arg(arg="data", annotation=None),
],
vararg=None,
kwonlyargs=[],
kw_defaults=[],
kwarg=None,
defaults=[],
),
body=[],
decorator_list=[ast.Name(id=f"marshmallow.{name}")],
returns=None,
)
| [
11748,
6468,
198,
11748,
10088,
198,
11748,
19720,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
6738,
2438,
5235,
1330,
15770,
75,
62,
19199,
198,
6738,
2438,
5235,
13,
8612,
378,
62,
397,
8709,
1330,
27741,
26796,
8645,
1352,
198,
67... | 1.988262 | 1,789 |
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d
from .batchnorm import patch_sync_batchnorm, convert_model
from .replicate import DataParallelWithCallback, patch_replication_callback
| [
2,
770,
2393,
318,
636,
286,
16065,
11413,
1143,
12,
33,
963,
35393,
12,
20519,
15884,
354,
13,
198,
2,
3740,
1378,
12567,
13,
785,
14,
85,
330,
3883,
14,
50,
24871,
1143,
12,
33,
963,
35393,
12,
20519,
15884,
354,
198,
2,
4307,
... | 3.25641 | 117 |
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko.bado.io
~~~~~~~~~~~~
Provides functions for asynchronously reading files and urls
Examples:
basic usage::
>>> from riko import get_path
>>> from riko.bado.io import async_url_open
"""
import pygogo as gogo
from io import open
from tempfile import NamedTemporaryFile
from os import remove
from meza.compat import encode
from . import coroutine, return_value
try:
from twisted.test.proto_helpers import AccumulatingProtocol
except ImportError:
AccumulatingProtocol = object
else:
from twisted.internet.reactor import callLater
from twisted.protocols.basic import FileSender
from twisted.web.client import getPage, downloadPage
from twisted.test.proto_helpers import StringTransport
logger = gogo.Gogo(__name__, monolog=True).logger
# http://stackoverflow.com/q/26314586/408556
# http://stackoverflow.com/q/8157197/408556
# http://stackoverflow.com/a/33708936/408556
@coroutine
@coroutine
@coroutine
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
43907,
25,
1509,
28,
19,
25,
912,
28,
19,
25,
11201,
392,
8658,
198,
37811,
198,
380,
7204,
13,
65,
4533,
13,
952,
198,
15116,
8728,
198,
15946,
1460,
5499,
329,
... | 2.856742 | 356 |
#!/usr/bin/env python3
'''Search or create POTCAR file.
The elements can be parsed manually, or read from POSCAR.
'''
import os
import sys
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from mykit.core.utils import trim_after
from mykit.vasp.poscar import Poscar
from mykit.vasp.potcar import PotcarSearch
if __name__ == "__main__":
pv_addpot()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
7061,
6,
18243,
393,
2251,
350,
2394,
20034,
2393,
13,
198,
198,
464,
4847,
460,
307,
44267,
14500,
11,
393,
1100,
422,
28069,
20034,
13,
198,
7061,
6,
198,
198,
11748,
28686,
19... | 2.930769 | 130 |
from typing import List, Optional
from cloudrail.knowledge.context.aws.lambda_.lambda_alias import create_lambda_function_arn
from cloudrail.knowledge.context.aws.iam.policy_statement import PolicyStatement
from cloudrail.knowledge.context.aws.iam.policy import Policy
from cloudrail.knowledge.context.aws.service_name import AwsServiceName
from cloudrail.knowledge.utils.utils import hash_list
class LambdaPolicyStatements(Policy):
"""
Attributes:
function_name: The name of the Lambda Function the policy statements are for.
statements: The statements themselves.
qualifier: A Lambda Function may have a qualified set, this will be it
(or None).
lambda_func_arn: The ARN of the Lambda Funciton these policy statements
are for.
"""
@property
| [
6738,
19720,
1330,
7343,
11,
32233,
198,
198,
6738,
6279,
30224,
13,
45066,
13,
22866,
13,
8356,
13,
50033,
44807,
50033,
62,
26011,
1330,
2251,
62,
50033,
62,
8818,
62,
1501,
198,
6738,
6279,
30224,
13,
45066,
13,
22866,
13,
8356,
13... | 3.003546 | 282 |
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: flat
import flatbuffers
| [
2,
6338,
7560,
416,
262,
21939,
36474,
364,
17050,
11,
466,
407,
13096,
198,
198,
2,
25745,
25,
6228,
198,
198,
11748,
6228,
36873,
364,
198
] | 4.153846 | 26 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'qt_window_test_ui.ui'
#
# Created: Fri Jan 16 22:18:17 2015
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
5178,
7822,
7560,
422,
3555,
334,
72,
2393,
705,
39568,
62,
17497,
62,
9288,
62,
9019,
13,
9019,
6,
198,
2,
198,
2,
15622,
25,
19480,
2365,
1467,
2534,
25,
... | 2.520548 | 146 |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from blueapps.utils.unique import uniqid
from django.core.cache import cache
from apps.api import BkDataDatabusApi
from apps.utils.log import logger
from apps.log_databus.constants import DEFAULT_TIME_FORMAT, DEFAULT_CATEGORY_ID, MAX_SYNC_CLEAN_TTL
from apps.log_databus.exceptions import ProjectNoteExistException
from apps.log_databus.models import BKDataClean
from apps.log_search.handlers.index_set import IndexSetHandler
from apps.log_search.models import ProjectInfo, Scenario
class BKDataCleanUtils:
"""
bk data clean utils class:
- to get bkdata_clean
- to flush Quánxiàn Authority
"""
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@classmethod
@staticmethod
@staticmethod
@staticmethod
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
24893,
1087,
318,
10607,
284,
1104,
262,
1280,
2723,
2055,
416,
1642,
347,
42,
12,
25294,
5525,
241,
251,
165,
110,
116,
33768,
98,
33232,
245,
33176,
111,
... | 3.296636 | 654 |
"""Time humanizing functions."""
from __future__ import annotations
import datetime as dt
import enum
import functools
import math
from typing import Any
import human_readable.i18n as i18n
_ = i18n.gettext
P_ = i18n.pgettext
N_ = i18n.ngettext
@functools.total_ordering
class Unit(enum.Enum):
"""Enum for minimum unit."""
MICROSECONDS = 0
MILLISECONDS = 1
SECONDS = 2
MINUTES = 3
HOURS = 4
DAYS = 5
MONTHS = 6
YEARS = 7
def __lt__(self, other: Any) -> Any:
"""Comparison between units."""
return self.value < other.value
def time_of_day(hour: int) -> str:
"""Given current hour, returns time of the day."""
if 0 < hour < 12:
return _("morning")
elif 12 < hour <= 18:
return _("afternoon")
elif 18 < hour <= 23:
return _("evening")
return ""
def _formal_time(
value: dt.time, hour: int, count_hours: list[str], count_minutes: list[str]
) -> str:
"""Return formal timing."""
hour_count = count_hours[hour]
if value.minute > 30:
reversed_minute_count = count_minutes[60 - value.minute]
minute_translation = i18n.ngettext(
"{amount} minute", "{amount} minutes", 60 - value.minute
).format(amount=reversed_minute_count)
return i18n.ngettext(
"{minute_translation} to {hour_count} hour",
"{minute_translation} to {hour_count} hours",
hour,
).format(minute_translation=minute_translation, hour_count=hour_count)
elif value.minute == 0:
return i18n.ngettext(
"{hour_count} o'clock", "{hour_count} o'clock", hour
).format(hour_count=hour_count)
else:
minute_count = count_minutes[value.minute]
minute_translation = i18n.ngettext(
"{amount} minute", "{amount} minutes", value.minute
).format(amount=minute_count)
return i18n.ngettext(
"{minute_translation} past {hour_count}",
"{minute_translation} past {hour_count}",
hour,
).format(hour_count=hour_count, minute_translation=minute_translation)
def _informal_hour_count(hour: int, count_hours: list[str]) -> str:
"""Return word hour used in informal timing."""
if hour == 0:
hour_count = _("midnight")
elif hour == 12:
hour_count = _("noon")
elif hour > 12:
hour_count = count_hours[hour - 12]
else:
hour_count = count_hours[hour]
return hour_count
def _informal_minute_count(
value: dt.time, hour: int, hour_count: str, count_minutes: list[str]
) -> str:
"""Return messsage format informal timing based on minute count."""
if value.minute == 0:
clock = hour_count
elif value.minute > 30:
if value.minute == 45:
reversed_minute_count = _("a quarter")
else:
reversed_minute_count = count_minutes[60 - value.minute]
if hour == 0:
clock = _("{reversed_minute_count} to midnight").format(
reversed_minute_count=reversed_minute_count
)
elif hour == 12:
clock = _("{reversed_minute_count} to noon").format(
reversed_minute_count=reversed_minute_count
)
else:
clock = i18n.ngettext(
"{reversed_minute_count} to {hour_count}",
"{reversed_minute_count} to {hour_count}",
hour,
).format(reversed_minute_count=reversed_minute_count, hour_count=hour_count)
elif value.minute == 30:
clock = _("half past {hour_count}").format(hour_count=hour_count)
elif value.minute == 15:
clock = _("a quarter past {hour_count}").format(hour_count=hour_count)
else:
minute_count = count_minutes[value.minute]
clock = _("{hour_count} and {minute_count}").format(
hour_count=hour_count, minute_count=minute_count
)
return clock
def _informal_time(
value: dt.time, hour: int, count_hours: list[str], count_minutes: list[str]
) -> str:
"""Return informal timing."""
period = time_of_day(hour)
hour_count = _informal_hour_count(hour, count_hours)
clock = _informal_minute_count(value, hour, hour_count, count_minutes)
if period:
return _("{clock} in the {period}").format(clock=clock, period=period)
return clock
def timing(time: dt.time, formal: bool = True) -> str:
"""Return human-readable time.
Compares time values to present time returns representing readable of time
with the given day period.
Args:
time: any datetime.
formal: formal or informal reading. Defaults to True.
Returns:
str: readable time or original object.
"""
count_hours = [
P_("hour 0", "zero"),
P_("hour 1", "one"),
P_("hour 2", "two"),
P_("hour 3", "three"),
P_("hour 4", "four"),
P_("hour 5", "five"),
P_("hour 6", "six"),
P_("hour 7", "seven"),
P_("hour 8", "eight"),
P_("hour 9", "nine"),
P_("hour 10", "ten"),
P_("hour 11", "eleven"),
P_("hour 12", "twelve"),
P_("hour 13", "one"),
P_("hour 14", "two"),
P_("hour 15", "three"),
P_("hour 16", "four"),
P_("hour 17", "five"),
P_("hour 18", "six"),
P_("hour 19", "seven"),
P_("hour 20", "eight"),
P_("hour 21", "nine"),
P_("hour 22", "ten"),
P_("hour 23", "eleven"),
]
count_minutes = [
P_("minute 0", "zero"),
P_("minute 1", "one"),
P_("minute 2", "two"),
P_("minute 3", "three"),
P_("minute 4", "four"),
P_("minute 5", "five"),
P_("minute 6", "six"),
P_("minute 7", "seven"),
P_("minute 8", "eight"),
P_("minute 9", "nine"),
P_("minute 10", "ten"),
P_("minute 11", "eleven"),
P_("minute 12", "twelve"),
P_("minute 13", "thirteen"),
P_("minute 14", "fourteen"),
P_("minute 15", "fifteen"),
P_("minute 16", "sixteen"),
P_("minute 17", "seventeen"),
P_("minute 18", "eighteen"),
P_("minute 19", "nineteen"),
P_("minute 20", "twenty"),
P_("minute 21", "twenty one"),
P_("minute 22", "twenty two"),
P_("minute 23", "twenty three"),
P_("minute 24", "twenty four"),
P_("minute 25", "twenty five"),
P_("minute 26", "twenty six"),
P_("minute 27", "twenty seven"),
P_("minute 28", "twenty eight"),
P_("minute 29", "twenty nine"),
P_("minute 30", "thirty"),
P_("minute 31", "thirty one"),
P_("minute 32", "thirty two"),
P_("minute 33", "thirty three"),
P_("minute 34", "thirty four"),
P_("minute 35", "thirty five"),
P_("minute 36", "thirty six"),
P_("minute 37", "thirty seven"),
P_("minute 38", "thirty eight"),
P_("minute 39", "thirty nine"),
P_("minute 40", "forty"),
P_("minute 41", "forty one"),
P_("minute 42", "forty two"),
P_("minute 43", "forty three"),
P_("minute 44", "forty four"),
P_("minute 45", "forty five"),
P_("minute 46", "forty six"),
P_("minute 47", "forty seven"),
P_("minute 48", "forty eight"),
P_("minute 49", "forty nine"),
P_("minute 50", "fifty"),
P_("minute 51", "fifty one"),
P_("minute 52", "fifty two"),
P_("minute 53", "fifty three"),
P_("minute 54", "fifty four"),
P_("minute 55", "fifty five"),
P_("minute 56", "fifty six"),
P_("minute 57", "fifty seven"),
P_("minute 58", "fifty eight"),
P_("minute 59", "fifty nine"),
]
# time relative to next hour
if time.minute > 30:
hour = time.hour + 1
if hour == 24:
hour = 0
else:
hour = time.hour
if formal:
clock = _formal_time(time, hour, count_hours, count_minutes)
else:
clock = _informal_time(time, hour, count_hours, count_minutes)
return clock
def _abs_timedelta(delta: dt.timedelta) -> dt.timedelta:
"""Return an "absolute" value for a timedelta.
Args:
delta: relative timedelta.
Returns:
absolute timedelta.
"""
if delta.days < 0:
now = _now()
return now - (now + delta)
return delta
def date_and_delta(
value: str | int | dt.datetime | dt.timedelta,
*,
now: dt.datetime | None = None,
) -> tuple[dt.datetime, dt.timedelta]:
"""Turn a value into a date and a timedelta which represents how long ago it was."""
if not now:
now = _now()
if isinstance(value, dt.datetime):
date = value
delta = now - value
elif isinstance(value, dt.timedelta):
date = now - value
delta = value
else:
value = int(value)
delta = dt.timedelta(seconds=value)
date = now - delta
return date, _abs_timedelta(delta)
def time_delta(
value: dt.timedelta | int | dt.datetime,
use_months: bool = True,
minimum_unit: str = "seconds",
when: dt.datetime | None = None,
) -> str:
"""Return human-readable time difference.
Given a timedelta or a number of seconds, return a natural
representation of the amount of time elapsed. This is similar to
``date_time``, but does not add tense to the result. If ``use_months``
is True, then a number of months (based on 30.5 days) will be used
for fuzziness between years.
Args:
value: A timedelta or a number of seconds.
use_months: If `True`, then a number of months (based on 30.5 days) will be
used for fuzziness between years.
minimum_unit: The lowest unit that can be used. Options: "years", "months",
"days", "hours", "minutes", "seconds", "milliseconds" or "microseconds".
when: Point in time relative to which _value_ is
interpreted. Defaults to the current time in the local timezone.
Raises:
ValueError: when `minimum_unit` is specified.
Returns:
Time representation in natural language.
"""
tmp = Unit[minimum_unit.upper()]
if tmp not in (Unit.SECONDS, Unit.MILLISECONDS, Unit.MICROSECONDS):
raise ValueError(f"Minimum unit '{minimum_unit}' not supported")
minimum_unit_type = tmp
if isinstance(value, dt.datetime):
if not when:
when = _now()
delta_value = when - value
elif isinstance(value, int):
delta_value = dt.timedelta(seconds=value)
else:
delta_value = value
delta = _abs_timedelta(delta_value)
seconds = abs(delta.seconds)
days = abs(delta.days)
years = days // 365
days = days % 365
months = int(days // 30.5)
if not years and days < 1:
return _less_than_a_day(seconds, minimum_unit_type, delta)
elif years == 0:
return _less_than_a_year(days, months, use_months)
elif years == 1:
return _one_year(days, months, use_months)
translation = i18n.ngettext("{amount} year", "{amount} years", years)
return translation.format(amount=years)
def date_time(
value: dt.timedelta | int | dt.datetime,
future: bool = False,
use_months: bool = True,
minimum_unit: str = "seconds",
when: dt.datetime | None = None,
) -> str:
"""Return human-readable time.
Given a datetime or a number of seconds, return a natural representation
of that time in a resolution that makes sense. This is more or less
compatible with Django's ``natural_time`` filter. ``future`` is ignored for
datetimes, where the tense is always figured out based on the current time.
If an integer is passed, the return value will be past tense by default,
unless ``future`` is set to True.
Args:
value: time value.
future: if false uses past tense. Defaults to False.
use_months: if true return number of months. Defaults to True.
minimum_unit: The lowest unit that can be used.
when: Point in time relative to which _value_ is
interpreted. Defaults to the current time in the local timezone.
Returns:
Time in natural language.
"""
now = when or _now()
date, delta = date_and_delta(value, now=now)
# determine tense by value only if datetime/timedelta were passed
if isinstance(value, (dt.datetime, dt.timedelta)):
future = date > now
str_delta = time_delta(delta, use_months, minimum_unit, when=when)
if str_delta == _("a moment"):
return _("now")
if future:
return _("{time_difference} from now").format(time_difference=str_delta)
else:
return _("{time_difference} ago").format(time_difference=str_delta)
def day(date: dt.date, formatting: str = "%b %d") -> str:
"""Return human-readable day.
For date values that are tomorrow, today or yesterday compared to
present day returns representing string. Otherwise, returns a string
formatted according to ``formatting``.
Args:
date: a date.
formatting: chosen display format.
Returns:
str: date formatted in natural language.
"""
delta = date - dt.date.today()
if delta.days == 0:
return _("today")
elif delta.days == 1:
return _("tomorrow")
elif delta.days == -1:
return _("yesterday")
return date.strftime(formatting)
def date(date: dt.date) -> str:
"""Return human-readable date.
Like ``day()``, but will append a year for dates that are a year
ago or more.
Args:
date: a date.
Returns:
str: date in natural language.
"""
delta = _abs_timedelta(date - dt.date.today())
if delta.days >= 5 * 365 / 12:
return day(date, "%b %d %Y")
return day(date)
def year(date: dt.date) -> str:
"""Return human-readable year.
For date values that are last year, this year or next year compared to
present year returns representing string. Otherwise, returns a string
formatted according to the year.
Args:
date: a date.
Returns:
Year in natural language.
"""
delta = date.year - dt.date.today().year
if delta == 0:
return _("this year")
if delta == 1:
return _("next year")
if delta == -1:
return _("last year")
return str(date.year)
def _quotient_and_remainder(
value: float, divisor: float, unit: Unit, minimum_unit: Unit, suppress: list[Unit]
) -> tuple[float, float]:
"""Divide `value` by `divisor` returning the quotient and remainder.
If `unit` is `minimum_unit`, makes the quotient a float number and the remainder
will be zero. The rational is that if `unit` is the unit of the quotient, we cannot
represent the remainder because it would require a unit smaller than the
`minimum_unit`.
Example:
>>> _quotient_and_remainder(36, 24, Unit.DAYS, Unit.DAYS, [])
(1.5, 0)
If unit is in `suppress`, the quotient will be zero and the remainder will be the
initial value. The idea is that if we cannot use `unit`, we are forced to use a
lower unit so we cannot do the division.
Example:
>>> _quotient_and_remainder(36, 24, Unit.DAYS, Unit.HOURS, [Unit.DAYS])
(0, 36)
In other case return quotient and remainder as `divmod` would do it.
Example:
>>> _quotient_and_remainder(36, 24, Unit.DAYS, Unit.HOURS, [])
(1, 12)
Args:
value: integer value.
divisor: the divisor.
minimum_unit: minimum unit.
unit: the unit of the quotient.
suppress: list of units to be suppressed.
Returns:
Quotient and reminder tuple.
"""
if unit == minimum_unit:
return (value / divisor, 0)
elif unit in suppress:
return (0, value)
else:
return divmod(value, divisor)
def _carry(
value1: float,
value2: float,
ratio: float,
unit: Unit,
min_unit: Unit,
suppress: list[Unit],
) -> tuple[float, float]:
"""Return a tuple with two values.
If the unit is in `suppress`, multiply `value1` by `ratio` and add it to `value2`
(carry to right). The idea is that if we cannot represent `value1` we need to
represent it in a lower unit.
>>> from human_readable.times import _carry, Unit
>>> _carry(2, 6, 24, Unit.DAYS, Unit.SECONDS, [Unit.DAYS])
(0, 54)
If the unit is the minimum unit, `value2` is divided by `ratio` and added to
`value1` (carry to left). We assume that `value2` has a lower unit so we need to
carry it to `value1`.
>>> _carry(2, 6, 24, Unit.DAYS, Unit.DAYS, [])
(2.25, 0)
Otherwise, just return the same input:
>>> _carry(2, 6, 24, Unit.DAYS, Unit.SECONDS, [])
(2, 6)
Args:
value1: one integer.
value2: other integer.
ratio: multiply ratio.
unit: the unit of the quotient.
min_unit: minimum unit.
suppress: list of units to be suppressed.
Returns:
Carry left and carry right.
"""
if unit == min_unit:
return (value1 + value2 / ratio, 0)
elif unit in suppress:
return (0, value2 + value1 * ratio)
else:
return (value1, value2)
def _suitable_minimum_unit(minimum_unit: Unit, suppress: list[Unit]) -> Unit:
"""Return a minimum unit suitable that is not suppressed.
If not suppressed, return the same unit:
>>> from human_readable.times import _suitable_minimum_unit, Unit
>>> _suitable_minimum_unit(Unit.HOURS, [])
<Unit.HOURS: 4>
But if suppressed, find a unit greather than the original one that is not
suppressed:
>>> _suitable_minimum_unit(Unit.HOURS, [Unit.HOURS])
<Unit.DAYS: 5>
>>> _suitable_minimum_unit(Unit.HOURS, [Unit.HOURS, Unit.DAYS])
<Unit.MONTHS: 6>
Args:
minimum_unit: minimum unit.
suppress: list of units to be suppressed.
Raises:
ValueError: when there is not suitable minimum unit given suppress.
Returns:
Minimum unit suitable that is not suppressed.
"""
if minimum_unit in suppress:
for unit in Unit:
if unit > minimum_unit and unit not in suppress:
return unit
raise ValueError(
"Minimum unit is suppressed and no suitable replacement was found."
)
return minimum_unit
def _suppress_lower_units(min_unit: Unit, suppress: list[Unit]) -> list[Unit]:
"""Extend suppressed units (if any) with all units lower than the minimum unit.
>>> from human_readable.times import _suppress_lower_units, Unit
>>> sorted(_suppress_lower_units(Unit.SECONDS, [Unit.DAYS]))
[<Unit.MICROSECONDS: 0>, <Unit.MILLISECONDS: 1>, <Unit.DAYS: 5>]
Args:
min_unit: minimum unit.
suppress: list of units to be suppressed.
Returns:
New suppress list.
"""
suppress_set = set(suppress)
for u in Unit: # pragma: no branch
if u == min_unit:
break
suppress_set.add(u)
return list(suppress_set)
def precise_delta(
value: dt.timedelta | int,
minimum_unit: str = "seconds",
suppress: list[str] | None = None,
formatting: str = ".2f",
) -> str:
"""Return a precise representation of a timedelta.
>>> import datetime as dt
>>> from human_readable.times import precise_delta
>>> delta = dt.timedelta(seconds=3633, days=2, microseconds=123000)
>>> precise_delta(delta)
'2 days, 1 hour and 33.12 seconds'
A custom `formatting` can be specified to control how the fractional part
is represented:
>>> precise_delta(delta, formatting=".4f")
'2 days, 1 hour and 33.1230 seconds'
Instead, the `minimum_unit` can be changed to have a better resolution;
the function will still readjust the unit to use the greatest of the
units that does not lose precision.
For example setting microseconds but still representing the date with milliseconds:
>>> precise_delta(delta, minimum_unit="microseconds")
'2 days, 1 hour, 33 seconds and 123 milliseconds'
If desired, some units can be suppressed: you will not see them represented and the
time of the other units will be adjusted to keep representing the same timedelta:
>>> precise_delta(delta, suppress=["days"])
'49 hours and 33.12 seconds'
Note that microseconds precision is lost if the seconds and all
the units below are suppressed:
>>> delta = dt.timedelta(seconds=90, microseconds=100)
>>> precise_delta(delta, suppress=["seconds", "milliseconds", "microseconds"])
'1.50 minutes'
If the delta is too small to be represented with the minimum unit,
a value of zero will be returned:
>>> delta = dt.timedelta(seconds=1)
>>> precise_delta(delta, minimum_unit="minutes")
'0.02 minutes'
>>> delta = dt.timedelta(seconds=0.1)
>>> precise_delta(delta, minimum_unit="minutes")
'0 minutes'
Args:
value: a time delta.
minimum_unit: minimum unit.
suppress: list of units to be suppressed.
formatting: standard Python format.
Returns:
Humanized time delta.
"""
if isinstance(value, int):
delta = dt.timedelta(seconds=value)
else:
delta = value
if not suppress:
suppress_units = []
else:
suppress_units = [Unit[unit.upper()] for unit in suppress]
# Find a suitable minimum unit (it can be greater the one that the
# user gave us if it is suppressed).
min_unit = Unit[minimum_unit.upper()]
min_unit = _suitable_minimum_unit(min_unit, suppress_units)
del minimum_unit
# Expand the suppressed units list/set to include all the units
# that are below the minimum unit
ext_suppress = _suppress_lower_units(min_unit, suppress_units)
# handy aliases
days: float = delta.days
secs: float = delta.seconds
usecs: float = delta.microseconds
MICROSECONDS, MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS, MONTHS, YEARS = list(
Unit
)
# Given DAYS compute YEARS and the remainder of DAYS as follows:
# if YEARS is the minimum unit, we cannot use DAYS so
# we will use a float for YEARS and 0 for DAYS:
# years, days = years/days, 0
#
# if YEARS is suppressed, use DAYS:
# years, days = 0, days
#
# otherwise:
# years, days = divmod(years, days)
#
# The same applies for months, hours, minutes and milliseconds below
years, days = _quotient_and_remainder(days, 365, YEARS, min_unit, ext_suppress)
months, days = _quotient_and_remainder(days, 30.5, MONTHS, min_unit, ext_suppress)
# If DAYS is not in suppress, we can represent the days but
# if it is a suppressed unit, we need to carry it to a lower unit,
# seconds in this case.
#
# The same applies for secs and usecs below
days, secs = _carry(days, secs, 24 * 3600, DAYS, min_unit, ext_suppress)
hours, secs = _quotient_and_remainder(secs, 3600, HOURS, min_unit, ext_suppress)
minutes, secs = _quotient_and_remainder(secs, 60, MINUTES, min_unit, ext_suppress)
secs, usecs = _carry(secs, usecs, 1e6, SECONDS, min_unit, ext_suppress)
msecs, usecs = _quotient_and_remainder(
usecs, 1000, MILLISECONDS, min_unit, ext_suppress
)
# if _unused != 0 we had lost some precision
usecs, _unused = _carry(usecs, 0, 1, MICROSECONDS, min_unit, ext_suppress)
# int rule for English / unfortunatelly ngettext does not support floats
int_years = math.ceil(years) if years > 1 else math.floor(years) if years < 1 else 1
int_months = (
math.ceil(months) if months > 1 else math.floor(months) if months < 1 else 1
)
int_days = math.ceil(days) if days > 1 else math.floor(days) if days < 1 else 1
int_hours = math.ceil(hours) if hours > 1 else math.floor(hours) if hours < 1 else 1
int_minutes = (
math.ceil(minutes) if minutes > 1 else math.floor(minutes) if minutes < 1 else 1
)
int_secs = math.ceil(secs) if secs > 1 else math.floor(secs) if secs < 1 else 1
int_msecs = math.ceil(msecs) if msecs > 1 else math.floor(msecs) if msecs < 1 else 1
int_usecs = math.ceil(usecs) if usecs > 1 else math.floor(usecs) if usecs < 1 else 1
translations = [
(N_("{amount} year", "{amount} years", int_years), years),
(N_("{amount} month", "{amount} months", int_months), months),
(N_("{amount} day", "{amount} days", int_days), days),
(N_("{amount} hour", "{amount} hours", int_hours), hours),
(N_("{amount} minute", "{amount} minutes", int_minutes), minutes),
(N_("{amount} second", "{amount} seconds", int_secs), secs),
(N_("{amount} millisecond", "{amount} milliseconds", int_msecs), msecs),
(N_("{amount} microsecond", "{amount} microseconds", int_usecs), usecs),
]
texts: list[str] = []
for unit, fmt in zip(reversed(Unit), translations): # pragma: no branch
translation, amount = fmt
if amount > 0 or (not texts and unit == min_unit):
# apply formatting if amount of min unit is factional
if unit == min_unit and math.modf(amount)[0] > 0:
txt_format = translation.replace("{amount}", "{amount:{formatting}}")
texts.append(txt_format.format(amount=amount, formatting=formatting))
else:
texts.append(translation.format(amount=int(amount)))
if unit == min_unit:
break
if len(texts) == 1:
return texts[0]
head = ", ".join(texts[:-1])
tail = texts[-1]
return _("{head} and {tail}").format(head=head, tail=tail)
| [
37811,
7575,
1692,
2890,
5499,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
4818,
8079,
355,
288,
83,
198,
11748,
33829,
198,
11748,
1257,
310,
10141,
198,
11748,
10688,
198,
6738,
19720,
1330,
4377,
198,
198,
... | 2.430688 | 10,597 |
from django.utils.translation import gettext_lazy
from rest_framework import serializers
from rest_framework.settings import api_settings
from datahub.search.apps import get_global_search_apps_as_mapping
from datahub.search.query_builder import MAX_RESULTS
from datahub.search.utils import SearchOrdering, SortDirection
class SingleOrListField(serializers.ListField):
"""Field can be single instance or list."""
def to_internal_value(self, data):
"""
If data is str, call the child serialiser's run_validation() directly.
This is to maintain an error format matching the input (if a list is provided, return an
error list for each item, otherwise return a single error list).
(We call self.child.run_validation() rather than self.child.to_internal_value(), because
ListField performs child field validation in its to_internal_value().)
"""
if isinstance(data, str):
return [self.child.run_validation(data)]
return super().to_internal_value(data)
class StringUUIDField(serializers.UUIDField):
"""
String UUID field.
We can't use UUID in ES queries, that's why we need to convert them back to string.
"""
def to_internal_value(self, data):
"""
Converts string to UUID and then back to string,
to ensure that string is valid UUID.
"""
uuid = super().to_internal_value(data)
return str(uuid)
class IdNameSerializer(serializers.Serializer):
"""Serializer to return metadata constant with id and name."""
id = StringUUIDField()
name = serializers.CharField()
class _ESOrderingField(serializers.Field):
"""Serialiser field for specifying an ordering for a search."""
default_error_messages = {
'invalid_field': gettext_lazy('"{input}" is not a valid choice for the sort field.'),
'invalid_direction': gettext_lazy('"{input}" is not a valid sort direction.'),
}
default_direction = SortDirection.asc
def __init__(self, *args, **kwargs):
"""Initialise the field."""
super().__init__(*args, **kwargs)
self.choices = None
def configure(self, choices, default):
"""Sets the choices and default ordering for the field."""
self.choices = choices
self.default = default
def to_internal_value(self, data):
"""Converts an ordering string to an SearchOrdering."""
field, _, direction = data.partition(':')
if field not in self.choices:
self.fail('invalid_field', input=field)
if direction:
try:
direction = SortDirection(direction)
except ValueError:
self.fail('invalid_direction', input=direction)
else:
direction = self.default_direction
return SearchOrdering(field, direction)
def to_representation(self, value):
"""Converts an SearchOrdering to an ordering string."""
return f'{value.field}:{value.direction}'
class BaseSearchQuerySerializer(serializers.Serializer):
"""Base serialiser for basic (global) and entity search."""
SORT_BY_FIELDS = []
DEFAULT_ORDERING = None
offset = serializers.IntegerField(default=0, min_value=0, max_value=MAX_RESULTS - 1)
limit = serializers.IntegerField(default=api_settings.PAGE_SIZE, min_value=1)
sortby = _ESOrderingField(required=False)
def __init__(self, *args, **kwrags):
"""Initialises the serialiser and configures the `sortby` field."""
super().__init__(*args, **kwrags)
self.fields['sortby'].configure(self.SORT_BY_FIELDS, self.DEFAULT_ORDERING)
class _ESModelChoiceField(serializers.Field):
"""Serialiser field for selecting an ES model by name."""
default_error_messages = {
'invalid_choice': gettext_lazy('"{input}" is not a valid choice.'),
}
def get_default(self):
"""Gets the default value for the field."""
default = super().get_default()
if isinstance(default, str):
return self.to_internal_value(default)
return default
def to_internal_value(self, data):
"""Translates a model name to a model."""
global_search_models = get_global_search_apps_as_mapping()
if data not in global_search_models:
self.fail('invalid_choice', input=data)
return global_search_models[data].es_model
def to_representation(self, value):
"""Translates a model to a model name."""
return value.get_app_name()
class BasicSearchQuerySerializer(BaseSearchQuerySerializer):
"""Serialiser used to validate basic (global) search query parameters."""
entity = _ESModelChoiceField(default='company')
term = serializers.CharField(required=True, allow_blank=True)
class EntitySearchQuerySerializer(BaseSearchQuerySerializer):
"""Serialiser used to validate entity search POST bodies."""
original_query = serializers.CharField(default='', allow_blank=True)
class AutocompleteSearchQuerySerializer(serializers.Serializer):
"""Serialiser used for the autocomplation search query parameters."""
term = serializers.CharField(required=True, allow_blank=True)
limit = serializers.IntegerField(default=10, min_value=1)
| [
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
651,
5239,
62,
75,
12582,
198,
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
6738,
1334,
62,
30604,
13,
33692,
1330,
40391,
62,
33692,
198,
198,
6738,
4818,
993,
549,
13,
12947,
13,
18... | 2.774515 | 1,907 |
### Load arguments
import sys, getopt
if __name__ == "__main__":
new_seq, model_ID = main(sys.argv[1:])
### Load libraries
from keras.layers.convolutional import Conv1D, MaxPooling1D
from keras.layers.core import Dropout, Reshape, Dense, Activation, Flatten
from keras.layers import BatchNormalization, InputLayer, Input
from keras.models import Sequential
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping, History
import pandas as pd
import numpy as np
import sys
sys.path.append('Neural_Network_DNA_Demo/')
from helper import IOHelper, SequenceHelper # from https://github.com/bernardo-de-almeida/Neural_Network_DNA_Demo.git
### Load sequences
print("\nLoading sequences ...\n")
input_fasta = IOHelper.get_fastas_from_file(new_seq, uppercase=True)
print(input_fasta.shape)
# length of first sequence
sequence_length = len(input_fasta.sequence.iloc[0])
# Convert sequence to one hot encoding matrix
seq_matrix = SequenceHelper.do_one_hot_encoding(input_fasta.sequence, sequence_length,
SequenceHelper.parse_alpha_to_seq)
### load model
keras_model, keras_model_weights, keras_model_json = load_model(model_ID)
### predict dev and hk activity
print("\nPredicting ...\n")
pred=keras_model.predict(seq_matrix)
out_prediction = input_fasta
out_prediction['Predictions_dev'] = pred[0]
out_prediction['Predictions_hk'] = pred[1]
### save file
print("\nSaving file ...\n")
import os.path
model_ID_out=os.path.basename(model_ID)
out_prediction.to_csv(new_seq + "_predictions_" + model_ID_out + ".txt", sep="\t", index=False)
| [
198,
21017,
8778,
7159,
198,
198,
11748,
25064,
11,
651,
8738,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
649,
62,
41068,
11,
2746,
62,
2389,
796,
1388,
7,
17597,
13,
853,
85,
58,
16,
25,
129... | 2.721284 | 592 |
j,i=65,-2
for I in range (1,14):
J= j-5
I=i+3
print('I=%d J=%d' %(I,J))
j=J
i=I | [
220,
198,
73,
11,
72,
28,
2996,
12095,
17,
198,
1640,
314,
287,
2837,
357,
16,
11,
1415,
2599,
198,
220,
220,
220,
449,
28,
474,
12,
20,
198,
220,
220,
220,
314,
28,
72,
10,
18,
198,
220,
220,
220,
3601,
10786,
40,
28,
4,
67... | 1.402778 | 72 |
#!/usr/bin/env python
import argparse
import copy
import logging
import os
import random
import time
from typing import Dict, List, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from torch.utils.data import DataLoader
from torchvision import models
from utils import get_datasets, get_device
fmt = "[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s: %(message)s"
logging.basicConfig(format=fmt, level=logging.DEBUG)
def train_model(
model: nn.Module,
dataloaders: Dict[str, DataLoader],
criterion: nn.Module,
optimizer: optim.Optimizer,
num_epochs: int = 25,
) -> Tuple[nn.Module, List[float]]:
""" Train the provided model. """
since = time.time()
val_acc_history = []
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
device = get_device()
for epoch in range(num_epochs):
logging.info("Epoch {}/{}".format(epoch, num_epochs - 1))
logging.info("-" * 10)
# Each epoch has a training and validation phase
for phase in ["train", "val"]:
if phase == "train":
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == "train"):
outputs = model(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
# backward + optimize only if in training phase
if phase == "train":
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
logging.info(
"{} Loss: {:.4f} Acc: {:.4f}".format(phase, epoch_loss, epoch_acc)
)
# deep copy the model
if phase == "val" and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if phase == "val":
val_acc_history.append(epoch_acc)
print()
time_elapsed = time.time() - since
logging.info(
"Training complete in {:.0f}m {:.0f}s".format(
time_elapsed // 60, time_elapsed % 60
)
)
logging.info("Best val Acc: {:4f}".format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model, val_acc_history
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
1822,
29572,
198,
11748,
4866,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
640,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
11,
309,
29291,
198,
198,
... | 2.12045 | 1,511 |
"""
Real time AEmotion (env: 'torch')
"""
# %% Import libs
import pyaudio
import numpy as np
# import pickle
# import librosa
import keract
import sys
sys.path.append('..')
from src.modeling.tcn.tcn import TCN
import os
import tensorflow as tf
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
from tensorflow.keras.models import model_from_json
from sklearn.preprocessing import MinMaxScaler
# import matplotlib.pyplot as plt
# from IPython.display import clear_output
from datetime import datetime as dtime
import opensmile
import paho.mqtt.client as mqtt #import the client1
if __name__ == "__main__":
process()
# %%
| [
37811,
198,
15633,
640,
317,
10161,
9650,
357,
24330,
25,
705,
13165,
354,
11537,
198,
37811,
198,
2,
43313,
17267,
9195,
82,
198,
11748,
12972,
24051,
198,
11748,
299,
32152,
355,
45941,
198,
2,
1330,
2298,
293,
198,
2,
1330,
9195,
4... | 2.897674 | 215 |
# Charlotte Capitanchik
# 25.04.18
# Based on fastCLIP tRNA strategy
# input is SAM of tRNA mapping
# and .fai of what is was mapped to
import pysam
from collections import defaultdict
import operator
from collections import Counter
import sys
import statistics
import random
import re
# input #
#trna_fai = sys.argv[1] # "/Volumes/lab-luscomben/working/charlotte/miCLIP_UN1710_CS2_delta2/summarising_trna_mapping_style/permanent_files/UCSC_hg38_tRNAs_mature_PTM-deduplicated.fa.fai" # #"/Users/capitac/PhD/trna_index/hg38-mature-tRNAs-deduplicated.fa.fai"
reads = sys.argv[1] #"../../results/maturetRNA_mapped/DDX3_WT_iCLIP_rep2.Aligned.out.sorted.deduplicated.bam" # #/Volumes/lab-luscomben/working/charlotte/miCLIP_UN1710_CS2_delta2/summarising_trna_mapping_style/results/m1a_786o_noq_15min_20180413.Aligned.out.sorted.deduplicated.bam" # #"/Users/capitac/PhD/trna_index/m1a_HEK293_beta_1.Aligned.out.sorted.bam"
dir_and_samp = sys.argv[2] #"testing_DDX3_WT_iCLIP_rep2" # #"/Volumes/lab-luscomben/working/charlotte/testing" # #"/Users/capitac/PhD/trna_index/m1a_HEK293_beta_1"
samp = sys.argv[3] #"testing_DDX3_WT_iCLIP_rep2" #
seed = sys.argv[4] #1 #
fraction_merge = float(sys.argv[5]) #0.9 #
# output #
# "_trna_summary_stats.tsv"
# "_trna_mixedup_anticodons.tsv"
# "_ambig_ac_position_dist.tsv"
## Code to parse the .fai file to get the name of all the amino acids and counts .etc ##
## Not required for main output
# trna_index = open(trna_fai).readlines()
# aminoacid_count = defaultdict(int)
# anticodon_list=[]
# aminoacid_list=[]
# trna_index
# for i in range(0,len(trna_index)):
# linelist = trna_index[i].split("\t")
# namelist = trna_index[i].split("-")
# anticodon = "-".join(namelist[1:3])
# amino_acid = namelist[1]
# anticodon_list.append(anticodon)
# aminoacid_list.append(amino_acid)
# aminoacid_count[amino_acid] += 1
# anticodon_list = set(anticodon_list)
# aminoacid_list = set(aminoacid_list)
# # Find out the amino acid with the highest number of genes
# max_key = max(aminoacid_count.iterkeys(), key=(lambda key: aminoacid_count[key]))
# aminoacid_count[max_key]
#print(aminoacid_count[max_key])
## Now move on to the reads ##
samfile = pysam.AlignmentFile(reads,"rb")
## Summarise at three levels
read_dictionary_amino_acid = defaultdict(set)
read_dictionary_anticodon = defaultdict(set)
read_dictionary_start_positions = defaultdict(list)
read_gene_dict = defaultdict(list)
readname_list = []
for read in samfile.fetch():
if read.is_unmapped == False:
aa_tmp = samfile.get_reference_name(read.reference_id)
aa_tmp = re.sub("^(nm-|nmt-)","", aa_tmp).split("-")
aa = aa_tmp[1]
anticodon = "-".join(aa_tmp[1:3])
read_dictionary_amino_acid[read.query_name].add(aa)
read_dictionary_anticodon[read.query_name].add(anticodon)
read_dictionary_start_positions[read.query_name] += [read.reference_start]
read_gene_dict[read.query_name] += [samfile.get_reference_name(read.reference_id)]
readname_list += [read.query_name]
else:
continue
## #### For a file where anticodons are not merged ######
FINAL_read_dictionary_notmerge = defaultdict(set)
for key, value in read_dictionary_anticodon.items():
if len(value) == 1:
FINAL_read_dictionary_notmerge[key] = value
final_cdna_counts_dict_notmerge = defaultdict(list)
random.seed(seed)
# assign positions, use mode if possible, but if no mode then select randomly
# from multimapped positions using random seed
for key,value in FINAL_read_dictionary_notmerge.items():
try:
x = statistics.mode(read_dictionary_start_positions[key])
final_cdna_counts_dict_notmerge["".join(value)] += [x]
except:
final_cdna_counts_dict_notmerge["".join(value)] += [random.choice(read_dictionary_start_positions[key])]
for key,value in final_cdna_counts_dict_notmerge.items():
final_cdna_counts_dict_notmerge[key] = Counter(value)
########### Unmerged ends here #################################
## Find out which anticodons are ambiguous ##
number_of_reads_multimapped_anticodon_dict = defaultdict(int)
for key, value in read_dictionary_anticodon.items():
if len(value) > 1:
number_of_reads_multimapped_anticodon_dict[";".join(sorted(value))] += 1
sorted_mixup_ranking = sorted(number_of_reads_multimapped_anticodon_dict.items(), key=operator.itemgetter(1), reverse=True)
total = sum(number_of_reads_multimapped_anticodon_dict.values())
fract_total = total * fraction_merge
print(total)
print(fract_total)
counting=0
thresholded_multimapped_anticodon_dict = defaultdict(int)
# Threshold mixed up anticodons based on % reads we want to recover
for i in sorted_mixup_ranking:
counting += i[1]
print(i)
name=i[0]
number=i[1]
thresholded_multimapped_anticodon_dict[name] = number
if counting >= fract_total:
break
# This is the actual fraction of reads we are recovering
real_frac = float(sum(thresholded_multimapped_anticodon_dict.values())) / float(total)
# The anticodons we are going to merge
comparison_list = list(thresholded_multimapped_anticodon_dict.keys())
## Merge anticodons that are ambiguous################
FINAL_read_dictionary = defaultdict(set)
# If "readmap" has components in "checklist", then replace with the
# corresponsing entry in checklist ->
# If it doesn't - return readmap
# If it has conflicting entries - return readmap
def getIndex(readmap, checklist):
"""
inputs:
readmap - list of symbols
checklist - list of strings(delineated by semicolons)
"""
readmap = list(readmap) #type cast from set to list
holder = []
counter=0
for i in readmap: # iterate over readmap
switch=True
for idx, j in enumerate(checklist):
k = j.split(';')
if i in k:
holder.append(idx)
switch=False
if switch is True:
holder.append(-1)
if len(holder) < 1:
holder = [-1]
#evaluate holder values and return based on this
holder = set(holder)
holder = list(holder)
if -1 in holder:
return readmap
elif len(holder) > 1:
return readmap
elif len(holder) == 1:
return [checklist[holder[0]]] #checklist[list(holder)[0]]
else:
print('something went wrong')
tempdict = {}
for key, value in read_dictionary_anticodon.items():
newval = getIndex(value, comparison_list)
tempdict[key] = newval
# Filter for single mapping anticodon/merged anticodons
for key, value in tempdict.items():
if len(value) == 1:
FINAL_read_dictionary[key] = value
# assign possitions
final_cdna_counts_dict = defaultdict(list)
random.seed(seed)
for key,value in FINAL_read_dictionary.items():
try:
x = statistics.mode(read_dictionary_start_positions[key])
final_cdna_counts_dict["".join(value)] += [x]
except:
final_cdna_counts_dict["".join(value)] += [random.choice(read_dictionary_start_positions[key])]
# count at positions
for key,value in final_cdna_counts_dict.items():
final_cdna_counts_dict[key] = Counter(value)
# Write a file condensed to groups of anticodons
finalfile = open(dir_and_samp + "_tRNA.bed", "w")
for key,value in final_cdna_counts_dict.items():
gene = key
for k in value:
finalfile.write(gene+"\t"+str(k)+"\t"+str(k+1)+"\t"+gene+"\t"+str(value[k])+"\t+\n")
finalfile.close()
# Write a file where only reads unambiguous for anticodon are used
finalfile = open(dir_and_samp + "_tRNA_unambig_AC.bed", "w")
for key,value in final_cdna_counts_dict_notmerge.items():
gene = key
for k in value:
finalfile.write(gene+"\t"+str(k)+"\t"+str(k+1)+"\t"+gene+"\t"+str(value[k])+"\t+\n")
finalfile.close()
##### ESSENTIAL CODE STOPS HERE: JUST SUMMARY FILES FROM NOW ON #####
# Some statistics for a summary file
total_mapped_reads = len(read_dictionary_amino_acid)
total_uniquely_mapped = 0
number_of_reads_multimapped_aa = 0 # number of reads that are ambigous for amino acid
number_of_reads_multimapped_anticodon = 0 # number of reads that are ambiguous for anticodon
number_of_reads_multimapped_anticodon_dict = defaultdict(int)
for key, value in read_dictionary_start_positions.items():
if len(value) == 1:
total_uniquely_mapped += 1
for key, value in read_dictionary_amino_acid.items():
if len(value) > 1:
number_of_reads_multimapped_aa += 1
reads_unambig_AC = defaultdict(str)
reads_unambig_pos = defaultdict(list)
for key, value in read_dictionary_anticodon.items():
if len(value) > 1:
number_of_reads_multimapped_anticodon += 1
number_of_reads_multimapped_anticodon_dict[";".join(value)] += 1
elif len(value) == 1:
reads_unambig_AC[key] = "".join(value)
reads_unambig_pos[key] = read_dictionary_start_positions[key]
# Sort the dictionary by highest to lowest
mixed_up_anticodons = open(dir_and_samp+"_tRNA_mixedup_anticodons.tsv", "w")
sorted_mixup_ranking = sorted(number_of_reads_multimapped_anticodon_dict.items(), key=operator.itemgetter(1), reverse=True)
for i in sorted_mixup_ranking:
if i[1] > 200:
mixed_up_anticodons.write(str(i[0])+"\t"+str(i[1])+"\t"+str(total_mapped_reads)+"\t"+samp+"\n")
mixed_up_anticodons.close()
summary_stats = open(dir_and_samp+"_tRNA_summary_stats.tsv", "w")
summary_stats.write("Total mapped reads to trna\t"+str(total_mapped_reads)+"\t"+samp+"\n")
summary_stats.write("Total single mapping reads to trna\t"+str(total_uniquely_mapped)+"\t"+samp+"\n")
summary_stats.write("Number of reads that are ambiguous at amino acid level\t"+str(number_of_reads_multimapped_aa)+"\t"+samp+"\n")
summary_stats.write("Percentage of reads used for amino acid summary\t"+str(round(float(total_mapped_reads-number_of_reads_multimapped_aa)/total_mapped_reads,3))+"\t"+samp+"\n")
summary_stats.write("Number of reads that are ambiguous at anticodon level\t"+str(number_of_reads_multimapped_anticodon)+"\t"+samp+"\n")
summary_stats.write("Percentage of reads used for anticodon summary\t"+str(round(float(total_mapped_reads-number_of_reads_multimapped_anticodon)/total_mapped_reads,3))+"\t"+samp+"\n")
summary_stats.write("The actual fraction of ambiguous reads that are recovered by merging anticodons\t"+str(real_frac)+"\n")
summary_stats.write("The anticodons that were merged are\t"+("|").join(comparison_list))
### AAlevel summary
aa_counts = defaultdict(int)
for key, value in read_dictionary_amino_acid.items():
if len(value) == 1:
aa_counts["".join(value)] += 1
### Write to file
aa_counts
### Anticodon level summary & also get names of reads that are unique to one anticodon
anticodon_counts = defaultdict(int)
unique_anticodon_read_names = set()
for key, value in read_dictionary_anticodon.items():
if len(value) == 1:
anticodon_counts["".join(value)] += 1
unique_anticodon_read_names.add(key)
anticodon_counts
### How many reads that multimap within one anticodon have different starting positions? What is the maximum distance between them?
# list of reads that have ambiguous starting positions
# (here we assume that if there is a mode the starting position is "unambiguous")
# ie. if positions are 58 58 58 58 58 59 then we would take 58 as the "unambiguous" start
ambig_pos = 0
ambig_pos_reads = []
ambig_distances = []
for key,value in reads_unambig_pos.items():
if len(set(value)) != 1:
try:
x = statistics.mode(value)
except:
ambig_pos += 1
ambig_pos_reads.append(key)
ambig_distances.append(max(value)-min(value))
anticodons_ambig_pos = dict((k, read_dictionary_anticodon[k]) for k in ambig_pos_reads if k in read_dictionary_anticodon)
ambig_pos_list = []
for key,value in anticodons_ambig_pos.items():
ambig_pos_list.append("".join(value))
ac_affected = Counter(ambig_pos_list)
summary_stats.write("Number of reads that have an ambiguous position at anticodon level\t"+str(ambig_pos)+"\t"+samp+"\n")
if len(ambig_distances) != 0:
summary_stats.write("Median distance between ambiguous positions at anticodon level\t"+str(statistics.median(ambig_distances))+"\t"+samp+"\n")
else:
summary_stats.write("Median distance between ambiguous positions at anticodon level\t"+"NA"+"\t"+samp+"\n")
if len(ambig_distances) != 0:
summary_stats.write("Biggest distance between ambiguous positions at anticodon level\t"+str(max(ambig_distances))+"\t"+samp+"\n")
else:
summary_stats.write("Biggest distance between ambiguous positions at anticodon level\t"+"NA"+"\t"+samp+"\n")
summary_stats.close()
ambig_stats = open(dir_and_samp+"_tRNA_ambig_ac_position_dist.tsv", "w")
for key,value in ac_affected.items():
ambig_stats.write(key+"\t"+str(value)+"\t"+str(ambig_pos)+"\n")
ambig_stats.close() | [
2,
14685,
4476,
270,
3702,
1134,
198,
2,
1679,
13,
3023,
13,
1507,
198,
2,
13403,
319,
3049,
5097,
4061,
256,
27204,
4811,
198,
2,
5128,
318,
28844,
286,
256,
27204,
16855,
198,
2,
290,
764,
69,
1872,
286,
644,
318,
373,
27661,
28... | 2.545491 | 5,034 |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 13 11:10:35 2021
@author: Clau
"""
'''
Paper: Energy sufficiency, lowlands.
User: Health Center
'''
from core import User, np
User_list = []
#Definig users
HC = User("Health center", 1)
User_list.append(HC)
HC_indoor_bulb = HC.Appliance(HC,20,7,2,690,0.2,10)
HC_indoor_bulb.windows([480,720],[870,1440],0.35)
HC_outdoor_bulb = HC.Appliance(HC,5,13,2,690,0.2,10)
HC_outdoor_bulb.windows([0,342],[1037,1440],0.35)
HC_Phone_charger = HC.Appliance(HC,5,2,2,300,0.2,5)
HC_Phone_charger.windows([480,720],[900,1440],0.35)
HC_TV = HC.Appliance(HC,2,150,2,360,0.1,60)
HC_TV.windows([480,720],[780,1020],0.2)
HC_radio = HC.Appliance(HC,5,40,2,360,0.3,60)
HC_radio.windows([480,720],[780,1020],0.35)
HC_PC = HC.Appliance(HC,2,200,2,300,0.1,10)
HC_PC.windows([480,720],[1050,1440],0.35)
HC_printer = HC.Appliance(HC,2,100,1,60,0.3,10)
HC_printer.windows([540,1020],[0,0])
HC_fan = HC.Appliance(HC,2,60,1,240,0.2,60)
HC_fan.windows([660,960],[0,0])
HC_sterilizer_stove = HC.Appliance(HC,3,600,2,120,0.3,30)
HC_sterilizer_stove.windows([540,600],[900,960],0.35)
HC_needle_destroyer = HC.Appliance(HC,1,70,1,60,0.2,10)
HC_needle_destroyer.windows([540,600],[0,0],0.35)
HC_water_pump = HC.Appliance(HC,1,400,1,30,0.2,10)
HC_water_pump.windows([480,510],[0,0])
HC_Fridge = HC.Appliance(HC,4,150,1,1440,0,30, 'yes',3)
HC_Fridge.windows([0,1440],[0,0])
HC_Fridge.specific_cycle_1(150,20,5,10)
HC_Fridge.specific_cycle_2(150,15,5,15)
HC_Fridge.specific_cycle_3(150,10,5,20)
HC_Fridge.cycle_behaviour([580,1200],[0,0],[420,579],[0,0],[0,419],[1201,1440])
HC_microscope = HC.Appliance(HC,2,3,2,120,0.2,10)
HC_microscope.windows([480,720],[840,960],0.35)
HC_shower = HC.Appliance(HC,3,3000,2,120,0.1,15)
HC_shower.windows([360,720],[780,1400],0.35)
HC_heater = HC.Appliance(HC,2,1500,2,180,0.25,60)
HC_heater.windows([369,720],[1080,1260],0.35)
HC_dental_compresor = HC.Appliance(HC,2,500,2,60,0.15,10)
HC_dental_compresor.windows([480,720],[840,1260],0.2)
HC_centrifuge = HC.Appliance(HC,2,100,1,60,0.15,10)
HC_centrifuge.windows([480,720],[0,0],0.35)
HC_serological_rotator = HC.Appliance(HC,2,10,1,60,0.25,15)
HC_serological_rotator.windows([480,720],[0,0],0.35) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
2892,
8621,
1511,
1367,
25,
940,
25,
2327,
33448,
201,
198,
201,
198,
31,
9800,
25,
1012,
559,
201,
198,
37811,
201,
198,
201,
198,
... | 1.909091 | 1,199 |
"""
Estimate Lasso and Elastic-Net regression models on a manually generated sparse
signal corrupted with an additive noise.
"""
# %%
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
np.random.seed(42)
n_samples, n_features = 50, 100
X = np.random.randn(n_samples, n_features)
idx = np.arange(n_features)
coef = (-1)**idx * np.exp(-idx / 10)
coef[10:] = 0
y = np.dot(X, ceof)
y += 0.01 * np.random.normal(size=n_samples)
n_samples = X.shape[0]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# %% Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print('r^2 on test data : %f' % r2_score_lasso)
# %% ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print('r^2 on test data : %f' % r2_score_enet)
m, s, _ = plt.stem(np.where(enet.coef_)[0],
enet.coef_[enet.coef_ != 0],
markerfmt='x',
label='Elastic net coefficents',
use_line_collection=True)
plt.setp([m, s], color='#2ca02c')
m, s, _ = plt.stem(np.where(lasso.coef_)[0],
lasso.coef_[lasso.coef_ != 0],
markerfmt='x',
label='Lasso coefficents',
use_line_collection=True)
plt.setp([m, s], color='#ff7f0e')
m, s, _ = plt.stem(np.where(coef)[0],
lasso.coef_[coef != 0],
markerfmt='bx',
label='True coefficents',
use_line_collection=True)
plt.legend(loc='best')
plt.title('Lasso $R^2$: %.3f, Elastic Net $R^2$: %.3f' %
(r2_score_lasso, r2_score_enet))
plt.show()
# %%
| [
37811,
198,
22362,
1920,
406,
28372,
290,
48567,
12,
7934,
20683,
4981,
319,
257,
14500,
7560,
29877,
198,
12683,
282,
26940,
351,
281,
38298,
7838,
13,
198,
37811,
198,
2,
43313,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29... | 1.980237 | 1,012 |
"""This module solves kata https://www.codewars.com/kata/n-th-fibonacci."""
def original_solution(n):
"""Return the nth fibonacci number."""
if n == 1:
return 0
a, b = 0, 1
for i in range(1, n - 1):
a, b = b, (a + b)
return b
#better solution
def nth_fib(n):
"""Return the nth fibonacci number. Per the kata, f(1) is supposed to be
0 so the fibonacci sequence for this kata was not indexed at 0."""
a, b = 0, 1
for __ in range(n-1):
a, b = b, a + b
return a
| [
37811,
1212,
8265,
39107,
479,
1045,
3740,
1378,
2503,
13,
19815,
413,
945,
13,
785,
14,
74,
1045,
14,
77,
12,
400,
12,
69,
571,
261,
44456,
526,
15931,
198,
198,
4299,
2656,
62,
82,
2122,
7,
77,
2599,
198,
220,
220,
220,
37227,
... | 2.220339 | 236 |
import random_functions, colorama, Forensics_tool_redesigned_using_oops
import sqlite3, getpass, os
#module 5 - Browser Forensics
#chrome_tool_start
#chrome_tool_end
#mozilla_tool_start
#mozilla_tool_end | [
11748,
4738,
62,
12543,
2733,
11,
3124,
1689,
11,
4558,
49242,
62,
25981,
62,
445,
274,
3916,
62,
3500,
62,
44860,
201,
198,
11748,
44161,
578,
18,
11,
651,
6603,
11,
28686,
201,
198,
201,
198,
2,
21412,
642,
532,
34270,
4558,
49242... | 2.266055 | 109 |
# -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:
"""
import sys
import unittest
sys.path.append('..')
from text2vec import SBert
from text2vec import BM25
from sentence_transformers.util import cos_sim
sbert_model = SBert('paraphrase-multilingual-MiniLM-L12-v2')
if __name__ == '__main__':
unittest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
9800,
25,
55,
84,
44,
278,
7,
87,
12595,
21,
1731,
31,
38227,
13,
785,
8,
198,
31,
11213,
25,
220,
198,
37811,
198,
11748,
25064,
198,
11748,
555,
... | 2.510949 | 137 |
#!/usr/bin/env python3
import sys
import argparse
from nltk import sent_tokenize
from nltk import word_tokenize
from nltk import ngrams as nltk_ngrams
from functools import partial
LPAD_SYMBOL = "<s>"
RPAD_SYMBOL = "</s>"
nltk_ngrams = partial(nltk_ngrams,
pad_right=True, pad_left=True,
right_pad_symbol=RPAD_SYMBOL, left_pad_symbol=LPAD_SYMBOL
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--nltk", action="store_true", help="use nltk method")
parser.add_argument("-n", type=int, default=2, help="ngram size to compute")
parser.add_argument("phrase", help="surround a single phrase in quotes")
args = parser.parse_args()
if args.nltk:
for gram in ngrams2(args.phrase, args.n):
print(gram)
else:
for gram in ngrams(word_tokenize(args.phrase), args.n):
print(gram)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
198,
6738,
299,
2528,
74,
1330,
1908,
62,
30001,
1096,
198,
6738,
299,
2528,
74,
1330,
1573,
62,
30001,
1096,
198,
6738,
299,
2528,... | 2.389785 | 372 |
"""
Usage:
python small_satrn_inference.py vedastr/test.jpg
"""
import argparse
import os
import sys
import cv2
from vedastr.runners import InferenceRunner
from vedastr.utils import Config
from pathlib import Path
from vedastr.helper import download_drive_file
home = str(Path.home())
if __name__ == '__main__':
main() | [
37811,
198,
28350,
25,
198,
29412,
1402,
62,
49720,
35906,
62,
259,
4288,
13,
9078,
410,
276,
459,
81,
14,
9288,
13,
9479,
198,
37811,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
11748,
269,
85,
17,
198,
... | 2.877193 | 114 |
'''
给定一个二叉树和一个目标和,判断该树中是否存在根节点到叶子节点的路径,这条路径上所有节点值相加等于目标和。
说明: 叶子节点是指没有子节点的节点。
返回值:布尔值
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from collections import deque
| [
7061,
6,
198,
163,
119,
247,
22522,
248,
31660,
10310,
103,
12859,
234,
20998,
231,
43718,
239,
161,
240,
234,
31660,
10310,
103,
33566,
106,
43718,
229,
161,
240,
234,
171,
120,
234,
26344,
97,
23877,
255,
46237,
98,
43718,
239,
4079... | 1.087452 | 263 |
"""Two way (mutual nearest neighbor) matcher.
Authors: Ayush Baid
"""
from typing import Tuple
import cv2 as cv
import numpy as np
from enum import Enum
from gtsfm.common.keypoints import Keypoints
from gtsfm.frontend.matcher.matcher_base import MatcherBase
class MatchingDistanceType(Enum):
"""Type of distance metric to use for matching descriptors."""
HAMMING = 1
EUCLIDEAN = 2
class TwoWayMatcher(MatcherBase):
"""Two way (mutual nearest neighbor) matcher using OpenCV."""
def match(
self,
keypoints_i1: Keypoints, # pylint: disable=unused-argument
keypoints_i2: Keypoints, # pylint: disable=unused-argument
descriptors_i1: np.ndarray,
descriptors_i2: np.ndarray,
im_shape_i1: Tuple[int, int], # pylint: disable=unused-argument
im_shape_i2: Tuple[int, int], # pylint: disable=unused-argument
) -> np.ndarray:
"""Match descriptor vectors.
Output format:
1. Each row represents a match.
2. First column represents keypoint index from image #i1.
3. Second column represents keypoint index from image #i2.
4. Matches are sorted in descending order of the confidence (score), if possible.
Args:
keypoints_i1: keypoints for image #i1, of length N1.
keypoints_i2: keypoints for image #i2, of length N2.
descriptors_i1: descriptors corr. to keypoints_i1.
descriptors_i2: descriptors corr. to keypoints_i2.
im_shape_i1: shape of image #i1, as height, width.
im_shape_i2: shape of image #i2, as height, width.
Returns:
Match indices (sorted by confidence), as matrix of shape (N, 2), where N < min(N1, N2).
"""
if self._distance_type is MatchingDistanceType.EUCLIDEAN:
distance_metric = cv.NORM_L2
elif self._distance_type is MatchingDistanceType.HAMMING:
distance_metric = cv.NORM_HAMMING
else:
raise NotImplementedError("The distance type is not in MatchingDistanceType")
if descriptors_i1.size == 0 or descriptors_i2.size == 0:
return np.array([])
# we will have to remove NaNs by ourselves
valid_idx_i1 = np.nonzero(~(np.isnan(descriptors_i1).any(axis=1)))[0]
valid_idx_i2 = np.nonzero(~(np.isnan(descriptors_i2).any(axis=1)))[0]
descriptors_1 = descriptors_i1[valid_idx_i1]
descriptors_2 = descriptors_i2[valid_idx_i2]
# run OpenCV's matcher
bf = cv.BFMatcher(normType=distance_metric, crossCheck=True)
matches = bf.match(descriptors_1, descriptors_2)
matches = sorted(matches, key=lambda r: r.distance)
match_indices = np.array([[m.queryIdx, m.trainIdx] for m in matches]).astype(np.int32)
if match_indices.size == 0:
return np.array([])
# remap them back
match_indices[:, 0] = valid_idx_i1[match_indices[:, 0]]
match_indices[:, 1] = valid_idx_i2[match_indices[:, 1]]
return match_indices
| [
37811,
7571,
835,
357,
21973,
723,
16936,
4780,
8,
2603,
2044,
13,
198,
198,
30515,
669,
25,
13709,
1530,
347,
1698,
198,
37811,
198,
6738,
19720,
1330,
309,
29291,
198,
198,
11748,
269,
85,
17,
355,
269,
85,
198,
11748,
299,
32152,
... | 2.292884 | 1,335 |
import scrapy
from scrapy.loader import ItemLoader
from mercadolibre.items import CelularItem
## #comment_js_7me > div > div > div > div:nth-child(2) > div > div > div > div.UFICommentContent > div._26f8 > div > span
| [
11748,
15881,
88,
198,
6738,
15881,
88,
13,
29356,
1330,
9097,
17401,
628,
198,
6738,
11991,
324,
349,
571,
260,
13,
23814,
1330,
15248,
934,
7449,
628,
198,
198,
2235,
1303,
23893,
62,
8457,
62,
22,
1326,
1875,
2659,
1875,
2659,
1875... | 2.846154 | 78 |
import aiohttp
| [
11748,
257,
952,
4023,
628
] | 3.2 | 5 |
"""This module will update a binary feature matrix Z."""
import numpy as np
import numpy.random as nr
from . import logPX
from . import logPV
from . import sampleV # slice sampler
def sampleZ(X, Z, A, sigma_x, sigma_a, alpha, K, N, D, realvaluedZ, proposeNewfeature):
"""Drawing Z using an uncollapsed Gibbs sampler."""
for i in range(N):
# Calculate m_-i,k
m = (Z != 0).astype(np.int).sum(axis=0)
assert(Z.shape[1] == len(m))
m_i = ((Z[i, :] != 0).astype(np.int))
m_neg_i = m - m_i
# emulate IBP-FM using a finite model
# Compute prior p(z_ik = 1 or = 0 | z_-i,k) = (m_-i,k+alpha/K)/ (N+alpha/K)
# need only condition on z_-i,k rather than Z_-(ik) because the columns
# of the matrix are generated independently under this prior
prior_z1 = (m_neg_i+alpha/K)/(float(N)+alpha/K)
prior_z0 = (N-m_neg_i)/(float(N)+alpha/K) # 1 - Pz1
assert(np.isfinite(prior_z0).all())
assert(np.isfinite(prior_z1).all())
# Iterate through the columns of the matrix
for k in range(K):
if (realvaluedZ):
old_zik = Z[i, k]
# Compute a log likelihood p(z_ik = 0 | Z_-(ik), X)
Z[i, k] = 0
logp0 = logPX.logPX(X, Z, A, sigma_x, N, D)
assert(np.isfinite(logp0))
logp0 += np.log(prior_z0[k])
assert(np.isfinite(logp0))
# Compute a log likelihood p(z_ik = 1 | Z_-(ik), X)
Z[i, k] = 1
if (realvaluedZ):
if (old_zik == 0):
Z[i, k] = nr.normal(0, 1) # propose v from prior N(0, 1)
else:
Z[i, k] = old_zik # recycle the current value
logp1 = logPX.logPX(X, Z, A, sigma_x, N, D)
assert(np.isfinite(logp1))
logp1 += np.log(prior_z1[k])
assert(np.isfinite(logp1))
# Add log prior for feature weight v
# need only calculate single v because the rest weights are same
# between z_ik = 0 and z_ik = 1
if (realvaluedZ):
logp1 += logPV.logPvi(Z[i, k])
assert(np.isfinite(logp1))
log_diff = logp1 - logp0
# If np.exp(log_diff) overflows, then numpy will handle overflows gracefully
# np.exp(1000) result in inf and 1/inf therefor simply results in 0
# if set np.seterr('raise') RuntimeWarning -> FloatingPointError
try:
exp_log_diff = np.exp(log_diff)
except FloatingPointError as e:
print (e)
print ("Cannot exponentiate ", log_diff)
p0 = 1.0 / (1 + exp_log_diff)
if (nr.uniform(0, 1) < p0):
Z[i, k] = 0
else:
if (realvaluedZ):
# sample new v through a slice sampler
Z[i, k] = sampleV.sampleV(i, k, X, Z, A, sigma_x, N, D)
else:
Z[i, k] = 1
return (Z, K, A)
| [
37811,
1212,
8265,
481,
4296,
257,
13934,
3895,
17593,
1168,
526,
15931,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
299,
32152,
13,
25120,
355,
299,
81,
198,
6738,
764,
1330,
2604,
47,
55,
198,
6738,
764,
1330,
2604,
47,
53,
198,... | 1.86618 | 1,644 |
from cbutil import Path
from pathinfo import *
for p in source_file_paths + header_file_paths:
content = None
with p.open('r') as fr:
content = fr.read()
if content != None and '\r' in content:
print(p)
with p.open('w', encoding = 'UTF-8') as fw:
fw.write(content)
| [
6738,
269,
4360,
346,
1330,
10644,
198,
6738,
3108,
10951,
1330,
1635,
628,
198,
1640,
279,
287,
2723,
62,
7753,
62,
6978,
82,
1343,
13639,
62,
7753,
62,
6978,
82,
25,
198,
220,
220,
220,
2695,
796,
6045,
198,
220,
220,
220,
351,
... | 2.182432 | 148 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-10 03:18
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
24,
13,
17,
319,
1584,
12,
3023,
12,
940,
7643,
25,
1507,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
19... | 2.719298 | 57 |
import logging
import argparse
import sys
import os
import json
import time
import datetime
from writers import csv_writer
from writers import json_writer
from utils import get_memory
from utils import memory_limit
from accessors.pride_data import get_filelist, get_projectlist, write_archive_file, download_projectlist
log = logging.getLogger('PrideData')
log.setLevel(logging.DEBUG)
if not os.path.exists("logs/"):
os.mkdir("logs")
log_filename = 'logs/logging-{}.log'.format(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
handler = logging.FileHandler(log_filename, mode='w')
handler.setFormatter(logging.Formatter(
fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
handler.setLevel(logging.DEBUG)
log.addHandler(handler)
handler = logging.StreamHandler(stream=sys.stdout)
handler.setFormatter(logging.Formatter(
fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
handler.setLevel(logging.INFO)
log.addHandler(handler)
if __name__ == "__main__":
log.info("PRIDE download started!")
parser = argparse.ArgumentParser(
description="Download PRIDE projects and create Nocoy trainable csv!")
parser.add_argument('-A', '--accessions', nargs='*', default=None,
type=list, help="Specify certain projects by accessions to download.")
parser.add_argument('-C', '--csv', action='store_true', help="Generates a csv file for each available file tuple!")
parser.add_argument('-CL', '--csv_location', nargs='*', default=None,
type=str, help="Relative Location of single unified generated CSV!")
parser.add_argument('-FE', '--features', default=["Hyperscore", "Charge", "sumI", "norm_high_peak_intensity", "Num_of_Modifications", "Pep_Len", "Num_Pl",
"mh(group)", "mh(domain)", "uniqueDM", "uniqueDMppm", "Sum_match_intensities", "Log_sum_match_intensity", "b+_ratio",
"b++_ratio", "y+_ratio", "y++_ratio", "b+_count", "b++_count", "y+_count", "y++_count", "b+_long_count",
"b++_long_count", "y+_long_count", "y++_long_count", "median_matched_frag_ion_errors", "mean_matched_frag_ion_errors",
"iqr_matched_frag_ion_errors", "Class_Label"], type=list, help="Features to be extracted from the acquired data and stored to CSV!")
parser.add_argument('-M', '--memory', type=float, default=1.0, help="Limits the RAM for the program to the given ratio of the available RAM!")
parser.add_argument('-N', '--number', metavar='1..10000', type=int, choices=range(
1, 10001), default=1, help="Maximal number of projects per page with fitting metadata to include.")
parser.add_argument('-P', '--pages', metavar='1..50', type=int, choices=range(
1, 51), default=1, help="Maximal number of project pages to search.")
parser.add_argument('-O', '--single_file', action='store_true', help="Only download a single file tuple for each available project!")
parser.add_argument('-INI', '--ini', nargs='*', default=None, help="Disregard command line arguments and parse configuration from a given config file!")
parser.add_argument('-I', '--instruments', nargs='*', default=None,
type=str, help="MS/MS instruments used in projects. String used by PRIDE")
parser.add_argument('-J', '--json', action='store_true', help="Generates a json file from each available project!")
parser.add_argument('-S', '--species', nargs='*', default=None,
type=str, help="Species evaluated in projects. NCBI Taxonomy ID")
parser.add_argument('-F', '--folder', nargs='*', default="data_pride",
type=str, help="Folder containing downloaded data relative to the python script!")
parser.add_argument('-SUB', '--submission', default="COMPLETE",
type=str, help="SubmissionType for projects.")
parser.add_argument('-CO', '--cores', default=4, type=int, help="Maximal number of cores!")
args = parser.parse_args()
if args.ini:
argparse_dict = vars(args)
log.info("Parsing configuration from {}".format(args.ini[0]))
with open(args.ini[0], 'r') as configfile:
argparse_dict.update(json.load(configfile))
print(args)
memory_limit(args.memory)
projects, projectDescriptions = get_projectlist(args)
log.info("Found {} matching projects!".format(len(projects)))
log.debug(projects)
archivePath = os.path.join(args.folder, 'archive')
if os.path.exists(archivePath):
with open(archivePath) as fp:
for line in fp:
for project in projects:
if project in line:
projects.remove(project)
if args.single_file:
log.info('Only downloading single file tuples for each available project!')
if projects:
downloaded_files = download_projectlist(projects, projectDescriptions, args.folder, args.single_file)
jsonPath = os.path.join(args.folder, 'psms.json')
if downloaded_files:
write_archive_file(archivePath, downloaded_files)
if args.csv:
csv_writer.writeCSVPSMSfromArchive(archivePath, args.cores, args.features, args.csv_location[0])
if args.json:
json_writer.writeJSONPSMSfromArchive(archivePath, jsonPath) | [
11748,
18931,
198,
11748,
1822,
29572,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
640,
198,
11748,
4818,
8079,
198,
198,
6738,
8786,
1330,
269,
21370,
62,
16002,
198,
6738,
8786,
1330,
33918,
62,
16002,
198,
198,... | 2.60431 | 2,042 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Contains test cases for the utils.py module."""
import locale
import sys
import unittest
from pathlib import Path
from unittest import mock
PATH = Path(__file__).parent
sys.path.insert(0, str(PATH.parent))
from youtube_dl_gui import utils
class TestUtils(unittest.TestCase):
"""Test case for utils functions"""
@mock.patch("youtube_dl_gui.utils.locale_getpreferredencoding")
@mock.patch("youtube_dl_gui.utils.locale_getpreferredencoding")
class TestToBytes(unittest.TestCase):
"""Test case for the to_bytes method."""
class TestFormatBytes(unittest.TestCase):
"""Test case for the format_bytes method."""
class TestBuildCommand(unittest.TestCase):
"""Test case for the build_command method."""
def run_tests(self, ydl_bin, tmpl):
"""Run the main test.
Args:
ydl_bin (str): Name of the youtube-dl binary
tmpl (str): Youtube-dl output template
"""
self.options[1] = tmpl # Plug the template in our options
result = self.result.format(ydl_bin=ydl_bin, tmpl=tmpl)
self.assertEqual(utils.build_command(self.options, self.url, ydl_bin), result)
class TestGetDefaultLang(unittest.TestCase):
"""Test case for the get_default_lang function."""
@mock.patch("youtube_dl_gui.utils.locale_getdefaultlocale")
def run_tests(self, ret_value, result, mock_getdefaultlocale):
"""Run the main test.
Args:
ret_value (tuple): Return tuple of the locale.getdefaultlocale module
result (str): Result we want to see
mock_getdefaultlocale (MagicMock): Mock object
"""
mock_getdefaultlocale.return_value = ret_value
lang = utils.get_default_lang()
mock_getdefaultlocale.assert_called_once()
self.assertEqual(lang, result)
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
4264,
1299,
1332,
2663,
329,
262,
3384,
4487,
13,
9078,
8265,
526,
15931,
628,
198,
11748,
36693,
198,
1174... | 2.531044 | 757 |
__version__ = "0.0.6"
from .core import * | [
834,
9641,
834,
796,
366,
15,
13,
15,
13,
21,
1,
198,
6738,
764,
7295,
1330,
1635
] | 2.411765 | 17 |
import os
from jinja2 import Environment, FileSystemLoader
import pypandoc
import click
import pathlib
import re
BEFORE="""{% extends 'layouts/main.html' %}
{% block content %}
"""
AFTER="""
{% endblock %}
"""
# add option for substitution
@click.command()
@click.option('--var', '-v', multiple=True)
@click.argument("infile")
def main(var, infile):
"""Generate or update a complete HTML page from the source
.j2 or .md file provided
"""
data = dict([j.split("=") for j in var])
try:
j2_env = Environment(loader=FileSystemLoader("."),
trim_blocks=True)
except Exception as e:
print(str(e))
exit(1)
path = pathlib.Path(infile)
if path.suffix == ".md":
try:
with open(infile, 'r') as read:
html = read.read()
except Exception as e:
print(str(e))
exit(1)
html = pypandoc.convert_file(infile, to='html',
format='markdown-blank_BEFORE_blockquote')
html, blocks = substitutions(path, html)
rendered = j2_env.from_string(BEFORE+html+AFTER+blocks).render(data)
elif path.suffix == ".j2":
rendered = j2_env.get_template(
infile).render(data)
else:
print("Unsupported format.", path.suffix);
exit(1)
print(rendered)
if __name__ == '__main__':
main()
| [
11748,
28686,
198,
6738,
474,
259,
6592,
17,
1330,
9344,
11,
9220,
11964,
17401,
198,
11748,
279,
4464,
392,
420,
198,
11748,
3904,
198,
11748,
3108,
8019,
198,
11748,
302,
628,
198,
12473,
30818,
2625,
15931,
90,
4,
14582,
705,
10724,
... | 2.22504 | 631 |
'''
Created on Oct 18, 2012
@author: mmunn
Unit test : EUCA-2244 Default launch permission for user image (account resource)
The fix for this issue changes the database so euca_conf --initialize
must have been run with the fix present for this test to pass.
setUp : Install Credentials, sat variables
test : Create and register an image and make sure the default visibility is Private not Public
tearDown : Removes Credential, removes image
cloud.conf:( place in same directory as this test)
IP ADDRESS CENTOS 6.3 64 BZR [CC00 CLC SC00 WS]
ip address CENTOS 6.3 64 BZR [NC00]
'''
import unittest
import shutil
from eucaops import Eucaops
from testcases.cloud_user.images.imageutils import ImageUtils
if __name__ == "__main__":
unittest.main("Euca2244") | [
7061,
6,
198,
41972,
319,
2556,
1248,
11,
2321,
198,
31,
9800,
25,
285,
6199,
77,
198,
26453,
1332,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1058,
4576,
8141,
12,
24137,
19,
15161,
4219,
7170,
329,
2836,
2939,
357,
23317,
8271,
... | 2.530086 | 349 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-09 19:01
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import django_remote_submission.models
import model_utils.fields
import uuid
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
18,
319,
2177,
12,
2919,
12,
2931,
678,
25,
486,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
1... | 3.037037 | 108 |
import os,urllib, urllib2
import raid
path = '/home/lorenzo/projects/assets/GCF_52/'
#testMultiple()
#testOne('20674984470_1')
#testOne('20675014450_1')
#testOne('20673569920_1')
testWS('20673569920_1')
| [
11748,
28686,
11,
333,
297,
571,
11,
2956,
297,
571,
17,
198,
11748,
9513,
198,
198,
6978,
796,
31051,
11195,
14,
75,
29578,
10872,
14,
42068,
14,
19668,
14,
15916,
37,
62,
4309,
14,
6,
198,
198,
2,
9288,
31217,
3419,
198,
2,
9288... | 2.204301 | 93 |
import os
import paho.mqtt.client as mqtt
import time
import mysql.connector
import json
print("Starting up!")
sql_host = os.environ['SQL_HOST']
sql_user = os.environ['SQL_USER']
sql_passwd = os.environ['SQL_PASSWD']
sql_database = os.environ['SQL_DATABASE']
mqtt_host = os.environ['MQTT_HOST']
mqtt_user = os.environ['MQTT_USER']
mqtt_passwd = os.environ['MQTT_PASSWD']
print("SQL parameters")
print(f"HOST: {sql_host}")
print(f"USER: {sql_user}")
print(f"PASSWD: {sql_passwd}")
print(f"DATABASE: {sql_database}")
print()
print("MQTT parameters")
print(f"HOST: {mqtt_host}")
print(f"USER: {mqtt_user}")
print(f"PASSWD: {mqtt_passwd}")
cnx = mysql.connector.connect(host=sql_host, user=sql_user, password=sql_passwd, database=sql_database)
client = mqtt.Client()
client.username_pw_set(username=mqtt_user,password=mqtt_passwd)
client.on_disconnect = on_disconnect
client.connect(mqtt_host)
client.on_message=on_message
client.subscribe("#")
client.loop_forever()
| [
11748,
28686,
198,
11748,
279,
17108,
13,
76,
80,
926,
13,
16366,
355,
285,
80,
926,
198,
11748,
640,
198,
11748,
48761,
13,
8443,
273,
198,
11748,
33918,
628,
220,
220,
220,
220,
198,
4798,
7203,
22851,
510,
2474,
8,
198,
198,
2541... | 2.398034 | 407 |
import os
import pdfkit
PAGE_SIZE = "A4" | [
11748,
28686,
198,
11748,
37124,
15813,
198,
198,
4537,
8264,
62,
33489,
796,
366,
32,
19,
1
] | 2.411765 | 17 |
import matplotlib.pyplot as plt
from numpy import arange
import pandas as pd
import os
import logging
logging.basicConfig()
logger = logging.getLogger('PlotEvalNumBar')
logger.setLevel('INFO')
if __name__ == '__main__':
csv_path = r"../../../tests/mf_param_opt_tests/output/FidelityLevelTest.csv"
bar_path = r"output/PlotEvalNumBar/FidelityLevelEvalNum.png"
data = pd.read_csv(csv_path, header=0, index_col=0)
pen = PlotEvalNumBar(data=data[['Fidelity', 'EvalCounts']], path=bar_path, show=True)
pen.plot()
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
299,
32152,
1330,
610,
858,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
198,
11748,
18931,
198,
198,
6404,
2667,
13,
35487,
16934,
3419,
198,
6404,
1362,
... | 2.533333 | 210 |