content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import database
MAX_HEIGHT = 7 # maximum cupoid size in the diagram
# the protoDeclare class represents one whole protoDeclare tag in the html file
# the class saves the field nodes, appearance, form and the shape of the protoDeclare node.
# the class represents the field values in the protoDeclare class | [
11748,
6831,
628,
198,
22921,
62,
13909,
9947,
796,
767,
1303,
5415,
6508,
1868,
2546,
287,
262,
16362,
198,
2,
262,
44876,
37835,
533,
1398,
6870,
530,
2187,
44876,
37835,
533,
7621,
287,
262,
27711,
2393,
198,
2,
262,
1398,
16031,
2... | 4.173333 | 75 |
import numpy as np
from scipy.spatial.distance import cdist
import torch
import os
from torch.optim import Adam, lr_scheduler
from opt import opt
from data import Data
from mgn_ptl import MGN_PTL
from mgn import MGN
from loss import Loss
from functions import mean_ap, cmc, re_ranking
if __name__ == '__main__':
assert opt.project_name is not None
print(opt)
loader = Data()
if opt.arch == 'mgn_ptl':
model = usegpu(MGN_PTL())
elif opt.arch == 'mgn':
model = usegpu(MGN())
else:
ValueError('Only mgn & mgn_ptl are supported')
loss = Loss()
reid = Main(model, loss, loader)
if opt.mode == 'train':
if not os.path.exists('weights/{}/'.format(opt.project_name)):
os.makedirs('weights/{}/'.format(opt.project_name))
for epoch in range(1, opt.epoch+1):
print('\nepoch', epoch)
reid.train()
if epoch % 50 == 0 or epoch == 10 or epoch == 1:
print('\nstart evaluate')
reid.test()
torch.save(model.state_dict(), ('weights/{}/model_{}.pt'.format(opt.project_name, epoch)))
reid.test()
torch.save(model.state_dict(), ('weights/{}/model_final.pt'.format(opt.project_name)))
if opt.mode == 'evaluate':
print('start evaluate')
model.load_state_dict(torch.load('{}'.format(opt.weight)))
reid.test() | [
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
2777,
34961,
13,
30246,
1330,
269,
17080,
198,
11748,
28034,
198,
11748,
28686,
198,
6738,
28034,
13,
40085,
1330,
7244,
11,
300,
81,
62,
1416,
704,
18173,
198,
6738,
2172,
1... | 2.271845 | 618 |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.sites.models import Site
from ionyweb.administration.tests import test_reverse, AdministrationTests
from time import time
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
49315,
13,
27530,
1330,
14413,
198,
6738,
22088,
88,
12384,
13,
39081,
1358,
13,
4... | 3.389831 | 59 |
from tests.thing import Thing
thing = Thing()
a = thing.__dict__
thing.test()
| [
6738,
5254,
13,
1197,
1330,
21561,
198,
198,
1197,
796,
21561,
3419,
198,
64,
796,
1517,
13,
834,
11600,
834,
198,
1197,
13,
9288,
3419,
198
] | 3.038462 | 26 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ***********************IMPORTANT NMAP LICENSE TERMS************************
# * *
# * The Nmap Security Scanner is (C) 1996-2013 Insecure.Com LLC. Nmap is *
# * also a registered trademark of Insecure.Com LLC. This program is free *
# * software; you may redistribute and/or modify it under the terms of the *
# * GNU General Public License as published by the Free Software *
# * Foundation; Version 2 ("GPL"), BUT ONLY WITH ALL OF THE CLARIFICATIONS *
# * AND EXCEPTIONS DESCRIBED HEREIN. This guarantees your right to use, *
# * modify, and redistribute this software under certain conditions. If *
# * you wish to embed Nmap technology into proprietary software, we sell *
# * alternative licenses (contact sales@insecure.com). Dozens of software *
# * vendors already license Nmap technology such as host discovery, port *
# * scanning, OS detection, version detection, and the Nmap Scripting *
# * Engine. *
# * *
# * Note that the GPL places important restrictions on "derivative works", *
# * yet it does not provide a detailed definition of that term. To avoid *
# * misunderstandings, we interpret that term as broadly as copyright law *
# * allows. For example, we consider an application to constitute a *
# * derivative work for the purpose of this license if it does any of the *
# * following with any software or content covered by this license *
# * ("Covered Software"): *
# * *
# * o Integrates source code from Covered Software. *
# * *
# * o Reads or includes copyrighted data files, such as Nmap's nmap-os-db *
# * or nmap-service-probes. *
# * *
# * o Is designed specifically to execute Covered Software and parse the *
# * results (as opposed to typical shell or execution-menu apps, which will *
# * execute anything you tell them to). *
# * *
# * o Includes Covered Software in a proprietary executable installer. The *
# * installers produced by InstallShield are an example of this. Including *
# * Nmap with other software in compressed or archival form does not *
# * trigger this provision, provided appropriate open source decompression *
# * or de-archiving software is widely available for no charge. For the *
# * purposes of this license, an installer is considered to include Covered *
# * Software even if it actually retrieves a copy of Covered Software from *
# * another source during runtime (such as by downloading it from the *
# * Internet). *
# * *
# * o Links (statically or dynamically) to a library which does any of the *
# * above. *
# * *
# * o Executes a helper program, module, or script to do any of the above. *
# * *
# * This list is not exclusive, but is meant to clarify our interpretation *
# * of derived works with some common examples. Other people may interpret *
# * the plain GPL differently, so we consider this a special exception to *
# * the GPL that we apply to Covered Software. Works which meet any of *
# * these conditions must conform to all of the terms of this license, *
# * particularly including the GPL Section 3 requirements of providing *
# * source code and allowing free redistribution of the work as a whole. *
# * *
# * As another special exception to the GPL terms, Insecure.Com LLC grants *
# * permission to link the code of this program with any version of the *
# * OpenSSL library which is distributed under a license identical to that *
# * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
# * linked combinations including the two. *
# * *
# * Any redistribution of Covered Software, including any derived works, *
# * must obey and carry forward all of the terms of this license, including *
# * obeying all GPL rules and restrictions. For example, source code of *
# * the whole work must be provided and free redistribution must be *
# * allowed. All GPL references to "this License", are to be treated as *
# * including the special and conditions of the license text as well. *
# * *
# * Because this license imposes special exceptions to the GPL, Covered *
# * Work may not be combined (even as part of a larger work) with plain GPL *
# * software. The terms, conditions, and exceptions of this license must *
# * be included as well. This license is incompatible with some other open *
# * source licenses as well. In some cases we can relicense portions of *
# * Nmap or grant special permissions to use it in other open source *
# * software. Please contact fyodor@nmap.org with any such requests. *
# * Similarly, we don't incorporate incompatible open source software into *
# * Covered Software without special permission from the copyright holders. *
# * *
# * If you have any questions about the licensing restrictions on using *
# * Nmap in other works, are happy to help. As mentioned above, we also *
# * offer alternative license to integrate Nmap into proprietary *
# * applications and appliances. These contracts have been sold to dozens *
# * of software vendors, and generally include a perpetual license as well *
# * as providing for priority support and updates. They also fund the *
# * continued development of Nmap. Please email sales@insecure.com for *
# * further information. *
# * *
# * If you received these files with a written license agreement or *
# * contract stating terms other than the terms above, then that *
# * alternative license agreement takes precedence over these comments. *
# * *
# * Source is provided to this software because we believe users have a *
# * right to know exactly what a program is going to do before they run it. *
# * This also allows you to audit the software for security holes (none *
# * have been found so far). *
# * *
# * Source code also allows you to port Nmap to new platforms, fix bugs, *
# * and add new features. You are highly encouraged to send your changes *
# * to the dev@nmap.org mailing list for possible incorporation into the *
# * main distribution. By sending these changes to Fyodor or one of the *
# * Insecure.Org development mailing lists, or checking them into the Nmap *
# * source code repository, it is understood (unless you specify otherwise) *
# * that you are offering the Nmap Project (Insecure.Com LLC) the *
# * unlimited, non-exclusive right to reuse, modify, and relicense the *
# * code. Nmap will always be available Open Source, but this is important *
# * because the inability to relicense code has caused devastating problems *
# * for other Free Software projects (such as KDE and NASM). We also *
# * occasionally relicense the code to third parties as discussed above. *
# * If you wish to specify special license conditions of your *
# * contributions, just say so when you send them. *
# * *
# * This program is distributed in the hope that it will be useful, but *
# * WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Nmap *
# * license file for more details (it's in a COPYING file included with *
# * Nmap, and also available from https://svn.nmap.org/nmap/COPYING *
# * *
# ***************************************************************************/
import gtk
import gtk.gdk
import pango
import zenmapCore.I18N
from zenmapCore.UmitConf import NmapOutputHighlight
from zenmapGUI.higwidgets.higdialogs import HIGDialog
from zenmapGUI.higwidgets.hignotebooks import HIGNotebook
from zenmapGUI.higwidgets.higboxes import HIGVBox
from zenmapGUI.higwidgets.higtables import HIGTable
from zenmapGUI.higwidgets.higlabels import HIGEntryLabel
from zenmapGUI.higwidgets.higbuttons import HIGButton, HIGToggleButton
if __name__ == "__main__":
n = NmapOutputProperties(None)
n.run()
gtk.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
220,
8412,
2466,
8162,
3955,
15490,
8643,
28692,
2969,
38559,
24290,
28994,
5653,
8412,
4557,
198,
2,
1635,
220... | 2.391898 | 4,098 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: hetero-kmeans-param.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='hetero-kmeans-param.proto',
package='com.webank.ai.fate.core.mlmodel.buffer',
syntax='proto3',
serialized_options=_b('B\025KmeansModelParamProto'),
serialized_pb=_b('\n\x19hetero-kmeans-param.proto\x12&com.webank.ai.fate.core.mlmodel.buffer\"\x88\x02\n\x10KmeansModelParam\x12\x19\n\x11\x63ount_of_clusters\x18\x01 \x01(\x03\x12\x16\n\x0emax_interation\x18\x02 \x01(\x03\x12\x11\n\tconverged\x18\x03 \x01(\x08\x12M\n\x0e\x63luster_detail\x18\x04 \x03(\x0b\x32\x35.com.webank.ai.fate.core.mlmodel.buffer.Clusterdetail\x12O\n\x0f\x63\x65ntroid_detail\x18\x05 \x03(\x0b\x32\x36.com.webank.ai.fate.core.mlmodel.buffer.Centroiddetail\x12\x0e\n\x06header\x18\x06 \x03(\t\" \n\rClusterdetail\x12\x0f\n\x07\x63luster\x18\x01 \x03(\x01\"\"\n\x0e\x43\x65ntroiddetail\x12\x10\n\x08\x63\x65ntroid\x18\x01 \x03(\x01\x42\x17\x42\x15KmeansModelParamProtob\x06proto3')
)
_KMEANSMODELPARAM = _descriptor.Descriptor(
name='KmeansModelParam',
full_name='com.webank.ai.fate.core.mlmodel.buffer.KmeansModelParam',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='count_of_clusters', full_name='com.webank.ai.fate.core.mlmodel.buffer.KmeansModelParam.count_of_clusters', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_interation', full_name='com.webank.ai.fate.core.mlmodel.buffer.KmeansModelParam.max_interation', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='converged', full_name='com.webank.ai.fate.core.mlmodel.buffer.KmeansModelParam.converged', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_detail', full_name='com.webank.ai.fate.core.mlmodel.buffer.KmeansModelParam.cluster_detail', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='centroid_detail', full_name='com.webank.ai.fate.core.mlmodel.buffer.KmeansModelParam.centroid_detail', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='header', full_name='com.webank.ai.fate.core.mlmodel.buffer.KmeansModelParam.header', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=70,
serialized_end=334,
)
_CLUSTERDETAIL = _descriptor.Descriptor(
name='Clusterdetail',
full_name='com.webank.ai.fate.core.mlmodel.buffer.Clusterdetail',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cluster', full_name='com.webank.ai.fate.core.mlmodel.buffer.Clusterdetail.cluster', index=0,
number=1, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=336,
serialized_end=368,
)
_CENTROIDDETAIL = _descriptor.Descriptor(
name='Centroiddetail',
full_name='com.webank.ai.fate.core.mlmodel.buffer.Centroiddetail',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='centroid', full_name='com.webank.ai.fate.core.mlmodel.buffer.Centroiddetail.centroid', index=0,
number=1, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=370,
serialized_end=404,
)
_KMEANSMODELPARAM.fields_by_name['cluster_detail'].message_type = _CLUSTERDETAIL
_KMEANSMODELPARAM.fields_by_name['centroid_detail'].message_type = _CENTROIDDETAIL
DESCRIPTOR.message_types_by_name['KmeansModelParam'] = _KMEANSMODELPARAM
DESCRIPTOR.message_types_by_name['Clusterdetail'] = _CLUSTERDETAIL
DESCRIPTOR.message_types_by_name['Centroiddetail'] = _CENTROIDDETAIL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
KmeansModelParam = _reflection.GeneratedProtocolMessageType('KmeansModelParam', (_message.Message,), {
'DESCRIPTOR': _KMEANSMODELPARAM,
'__module__': 'hetero_kmeans_param_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.KmeansModelParam)
})
_sym_db.RegisterMessage(KmeansModelParam)
Clusterdetail = _reflection.GeneratedProtocolMessageType('Clusterdetail', (_message.Message,), {
'DESCRIPTOR': _CLUSTERDETAIL,
'__module__': 'hetero_kmeans_param_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.Clusterdetail)
})
_sym_db.RegisterMessage(Clusterdetail)
Centroiddetail = _reflection.GeneratedProtocolMessageType('Centroiddetail', (_message.Message,), {
'DESCRIPTOR': _CENTROIDDETAIL,
'__module__': 'hetero_kmeans_param_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.Centroiddetail)
})
_sym_db.RegisterMessage(Centroiddetail)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
14445,
78,
12,
74,
1326,
504,
12,
17143,
13,
1676,
1462,
198,
198,
11748... | 2.216285 | 3,537 |
from rest_framework import serializers
from django.contrib.auth.models import User
from . import models
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
201,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
201,
198,
6738,
764,
1330,
4981,
201,
198,
201,
198,
201
] | 3.4375 | 32 |
# --TASK-- visualize located objects based on datafile
import bpy, json
from calibration import get_calibration
from generate_mesh import generate_mesh
# load data
data_file = open('<source_datafile_path>', 'r')
data = json.loads(data_file.read())
data_file.close()
# iterate frames in data
for frame_count, frame_data in data.items():
# generate mesh for located objects
vertices = []
edges = []
for obj in frame_data['objects']:
vertices.append(tuple(obj['X']))
bpy.context.scene.frame_set(int(frame_count))
generate_mesh(vertices, edges, color=(0.9, 1.0, 0.0), vertex_size_factor=4.0, single_frame=True)
# generate mesh for camera rays
calibration = get_calibration()
for obj in frame_data['objects']:
vertices = [
tuple(calibration['X0_A']) + ('X0_A',),
tuple(calibration['X0_B']) + ('X0_B',),
tuple(obj['X']) + ('X',)
]
edges = [
('X0_A','X'),
('X0_B','X'),
]
generate_mesh(vertices, edges, color=(0.9, 1.0, 0.0), render_vertices=False, single_frame=True)
# set keyframe range of animation
bpy.context.scene.frame_start = 0
bpy.context.scene.frame_end = len(data.keys())-1
| [
2,
1377,
51,
1921,
42,
438,
38350,
5140,
5563,
1912,
319,
1366,
7753,
201,
198,
201,
198,
11748,
275,
9078,
11,
33918,
201,
198,
6738,
36537,
1330,
651,
62,
9948,
571,
1358,
201,
198,
6738,
7716,
62,
76,
5069,
1330,
7716,
62,
76,
... | 2.38193 | 487 |
from typing import Dict, List, Optional
from pydantic import BaseModel
from typing_extensions import Literal
from aws_lambda_powertools.utilities.parser.models import (
DynamoDBStreamChangedRecordModel,
DynamoDBStreamModel,
DynamoDBStreamRecordModel,
EventBridgeModel,
SnsModel,
SnsNotificationModel,
SnsRecordModel,
SqsModel,
SqsRecordModel,
)
| [
6738,
19720,
1330,
360,
713,
11,
7343,
11,
32233,
198,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
198,
6738,
19720,
62,
2302,
5736,
1330,
25659,
1691,
198,
198,
6738,
3253,
82,
62,
50033,
62,
6477,
31391,
13,
315,
2410,
13,
48610,... | 2.884058 | 138 |
import dataclasses
@dataclasses.dataclass | [
11748,
4818,
330,
28958,
198,
198,
31,
19608,
330,
28958,
13,
19608,
330,
31172
] | 3 | 14 |
# This is a python implementation of the Vigenere Cipher, it employs a form of polyalphabetic substitution and remained unbreakable for three centuries
# Reference can be found at: https://en.wikipedia.org/wiki/Vigen%C3%A8re_cipher
# In this implementaion we will use the keyword: LEMON
import math
# Examples:
# encryption()
decrytpion()
| [
2,
770,
318,
257,
21015,
7822,
286,
262,
569,
9324,
567,
44334,
11,
340,
24803,
257,
1296,
286,
7514,
17307,
33312,
32097,
290,
6150,
555,
9032,
540,
329,
1115,
10675,
220,
198,
2,
20984,
460,
307,
1043,
379,
25,
3740,
1378,
268,
13... | 3.49 | 100 |
# Copyright 2019 Zrna Research LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from __future__ import (absolute_import, division,
print_function)
from builtins import (ascii, bytes, chr, dict, filter, hex, input,
int, map, next, oct, open, pow, range, round,
str, super, zip)
from .util import Connection
from .util import to_path_name, to_field_name, to_class_name
from collections import OrderedDict
from functools import wraps
from google.protobuf import text_format
from google.protobuf.json_format import MessageToDict, MessageToJson
from inflection import camelize
import json
import pprint
import sys
import textwrap
import zrna.zr_pb2 as zr
| [
2,
15069,
13130,
1168,
81,
2616,
4992,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
407,
198,
2,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
... | 3.128535 | 389 |
# -*- coding: utf-8 -*-
"""Invitation related logic."""
from __future__ import unicode_literals
from json import dumps
from .. import models
from .. import users
from ..decorators import requires_auth
class Invitation(models.GitHubCore):
"""Representation of an invitation to collaborate on a repository.
.. attribute:: created_at
A :class:`~datetime.datetime` instance representing the time and date
when this invitation was created.
.. attribute:: html_url
The URL to view this invitation in a browser.
.. attribute:: id
The unique identifier for this invitation.
.. attribute:: invitee
A :class:`~github3.users.ShortUser` representing the user who was
invited to collaborate.
.. attribute:: inviter
A :class:`~github3.users.ShortUser` representing the user who invited
the ``invitee``.
.. attribute:: permissions
The permissions that the ``invitee`` will have on the repository. Valid
values are ``read``, ``write``, and ``admin``.
.. attribute:: repository
A :class:`~github3.repos.ShortRepository` representing the repository
on which the ``invitee` was invited to collaborate.
.. attribute:: url
The API URL that the ``invitee`` can use to respond to the invitation.
Note that the ``inviter`` must use a different URL, not returned by
the API, to update or cancel the invitation.
"""
class_name = 'Invitation'
allowed_permissions = frozenset(['admin', 'read', 'write'])
@requires_auth
def accept(self):
"""Accept this invitation.
:returns:
True if successful, False otherwise
:rtype:
bool
"""
return self._boolean(self._patch(self.url), 204, 404)
@requires_auth
def decline(self):
"""Decline this invitation.
:returns:
True if successful, False otherwise
:rtype:
bool
"""
return self._boolean(self._delete(self.url), 204, 404)
@requires_auth
def delete(self):
"""Delete this invitation.
:returns:
True if successful, False otherwise
:rtype:
bool
"""
url = self._build_url(
'invitations', self.id, base_url=self.repository.url)
return self._boolean(self._delete(url), 204, 404)
@requires_auth
def update(self, permissions):
"""Update this invitation.
:param str permissions:
(required), the permissions that will be granted by this invitation
once it has been updated. Options: 'admin', 'read', 'write'
:returns:
The updated invitation
:rtype:
:class:`~github3.repos.invitation.Invitation`
"""
if permissions not in self.allowed_permissions:
raise ValueError("'permissions' must be one of {0}".format(
', '.join(sorted(self.allowed_permissions))
))
url = self._build_url(
'invitations', self.id, base_url=self.repository.url)
data = {'permissions': permissions}
json = self._json(self._patch(url, data=dumps(data)), 200)
return self._instance_or_null(Invitation, json)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
19904,
3780,
3519,
9156,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
33918,
1330,
45514,
198,
198,
6738,
11485,
133... | 2.521806 | 1,307 |
while True:
t = int(input('Quer ver a tabuada de qual valor?'))
if t < 0:
break
print('-' * 32)
for c in range(1, 11):
print(f'{t} x {c:2} = {t*c}')
print('Programa Tabuada Encerrado.Volte Sempre!')
| [
4514,
6407,
25,
198,
220,
220,
220,
256,
796,
493,
7,
15414,
10786,
4507,
263,
3326,
257,
7400,
84,
4763,
390,
4140,
1188,
273,
8348,
4008,
198,
220,
220,
220,
611,
256,
1279,
657,
25,
198,
220,
220,
220,
220,
220,
220,
220,
2270,... | 2.008696 | 115 |
import os
import subprocess
import shutil
import numpy as np
import glob
from multiprocessing import Pool
#gather files for creating symbolic links
#make sure nproc is greater than or equal to 1
# if self.nproc < 1:
# print 'Need to have at least 1 processor allocated'
# raise
#use index to create formated symbolic links numerically increasing
#write ffmpeg to file
#Actually run ffmpeg
#run ffmpeg without writing file
| [
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
4423,
346,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
15095,
198,
6738,
18540,
305,
919,
278,
1330,
19850,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
220,
19... | 2.615385 | 195 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# Flask-Resources is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Flask Resources module to create REST APIs."""
from werkzeug.exceptions import HTTPException
from ..args.parsers import (
create_request_parser,
item_request_parser,
search_request_parser,
)
from ..context import resource_requestctx
from .base import BaseView
class ListView(BaseView):
"""List view representation.
Allows searching and creating an item in the list.
"""
def __init__(self, *args, **kwargs):
"""Constructor."""
super(ListView, self).__init__(*args, **kwargs)
self.search_parser = self.resource.config.search_request_parser
self.create_parser = self.resource.config.create_request_parser
self.response_handlers = self.resource.config.list_response_handlers
self.request_loaders = self.resource.config.item_request_loaders
def get(self, *args, **kwargs):
"""Search the collection."""
resource_requestctx.request_args = self.search_parser.parse()
_response_handler = self.response_handlers[resource_requestctx.accept_mimetype]
return _response_handler.make_response(*self.resource.search(*args, **kwargs))
def post(self, *args, **kwargs):
"""Create an item in the collection."""
_response_handler = self.response_handlers[resource_requestctx.accept_mimetype]
_response_loader = self.request_loaders[resource_requestctx.payload_mimetype]
resource_requestctx.request_args = self.create_parser.parse()
resource_requestctx.data = _response_loader.load_request()
return _response_handler.make_response(
*self.resource.create(*args, **kwargs)
)
class ItemView(BaseView):
"""Item view representation.
Allows reading, (partial) updating and deleting an item.
"""
def __init__(self, *args, **kwargs):
"""Constructor."""
super(ItemView, self).__init__(*args, **kwargs)
self.item_parser = self.resource.config.item_request_parser
self.response_handlers = self.resource.config.item_response_handlers
self.request_loaders = self.resource.config.item_request_loaders
def get(self, *args, **kwargs):
"""Get."""
_response_handler = self.response_handlers[resource_requestctx.accept_mimetype]
try:
return _response_handler.make_response(*self.resource.read(*args, **kwargs))
except HTTPException as error:
# TODO: 1) should this be here, or we use the blueprint error handlers?
# records rest have something here.
# 2) we should check if e.g. a tombstone page is an error or
# a normal response.
return _response_handler.make_error_response(error)
def put(self, *args, **kwargs):
"""Put."""
_response_handler = self.response_handlers[resource_requestctx.accept_mimetype]
# TODO: If application/json is used for both put and post, then they have to
# use the same response handler. Possibly this is ok, but need to be
# checked. Probably the problems is delegated to partial_update()
_response_loader = self.request_loaders[resource_requestctx.payload_mimetype]
try:
resource_requestctx.data = _response_loader.load_request()
return _response_handler.make_response(
*self.resource.update(*args, **kwargs)
)
except HTTPException as error:
return _response_handler.make_error_response(error)
def patch(self, *args, **kwargs):
"""Patch."""
_response_handler = self.response_handlers[resource_requestctx.accept_mimetype]
_response_loader = self.request_loaders[resource_requestctx.payload_mimetype]
try:
resource_requestctx.data = _response_loader.load_request()
return _response_handler.make_response(
*self.resource.partial_update(*args, **kwargs)
)
except HTTPException as error:
return _response_handler.make_error_response(error)
def delete(self, *args, **kwargs):
"""Delete."""
_response_handler = self.response_handlers[resource_requestctx.accept_mimetype]
# TODO: Delete can potentially have a body - e.g. the tombstone messages.
# HTTP spec seems to allow this, but not that common.
try:
return _response_handler.make_response(
*self.resource.delete(*args, **kwargs)
)
except HTTPException as error:
return _response_handler.make_error_response(error)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
34,
8,
12131,
327,
28778,
13,
198,
2,
198,
2,
46947,
12,
33236,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198,
2... | 2.554965 | 1,883 |
import time
from base import SeleniumBaseTest
| [
11748,
640,
198,
6738,
2779,
1330,
15300,
47477,
14881,
14402,
198
] | 4.181818 | 11 |
#!/usr/bin/python
import math
import time
from roberta import Hal
from roberta import BlocklyMethods
h = Hal()
item = BlocklyMethods.createListWith(0, 0, 0)
item2 = BlocklyMethods.createListWith()
item3 = BlocklyMethods.createListWith()
item4 = BlocklyMethods.createListWith()
item5 = BlocklyMethods.createListWith()
if __name__ == "__main__":
main() | [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
10688,
198,
11748,
640,
198,
6738,
686,
4835,
64,
1330,
11023,
198,
6738,
686,
4835,
64,
1330,
9726,
306,
46202,
198,
71,
796,
11023,
3419,
628,
198,
9186,
796,
9726,
306,
46202,
... | 3.008403 | 119 |
import re
from re import MULTILINE, DOTALL
from comments import CommentExtractor
class CommentExtractorPython(CommentExtractor):
""" Comment extractor for Python code """
def __process_single_line_match(self, match):
""" Process a regular expression match object for a single line comment and return
a string version of the comment
Args:
match: Regular expression match object
Returns:
String version of the match with comment character removed
"""
return re.sub(r'^#', '', match.group(0)).strip()
def __process_multi_line_match(self, match):
""" Process a regular expression match object for a multi-line comment and
return a string version of the comment
Args:
match: Regular expression match object
Returns:
String version of the match with comment characters and newlines removed
so the comment is on a single line
"""
return ' '.join([re.sub(r'^"""|"""$|^\'\'\'|\'\'\'$', '', line.strip()).strip() \
for line in match.group(0).split('\n')]) \
.strip()
def extract_comments(self, file_contents):
""" Returns a list of comments in the source code
Returned comments are NOT IN THE ORDER OF THE SOURCE CODE.
Args:
file_contents: Contents of a source code file as a single string including newline characters
Returns:
List of comments in the source code. Each multiline comment is one element of the list,
regardless of how many lines it spans in the source code. Comment characters
are removed.
Returned comments are NOT IN THE ORDER OF THE SOURCE CODE. In particular, single line comments
are grouped together and multiline comments are grouped together.
"""
single_line_re = r'#.*$'
iter_single = re.finditer(single_line_re, file_contents, MULTILINE)
single_line_comments = [self.__process_single_line_match(match) for match in iter_single]
multi_line_re = r'^\s+("""|\'\'\').+?("""|\'\'\')\s+$'
iter_multi = re.finditer(multi_line_re, file_contents, DOTALL | MULTILINE)
multi_line_comments = [self.__process_multi_line_match(match) for match in iter_multi]
return single_line_comments + multi_line_comments
| [
11748,
302,
198,
6738,
302,
1330,
337,
16724,
4146,
8881,
11,
42743,
7036,
198,
6738,
3651,
1330,
18957,
11627,
40450,
198,
198,
4871,
18957,
11627,
40450,
37906,
7,
21357,
11627,
40450,
2599,
198,
220,
220,
220,
37227,
18957,
7925,
273,
... | 2.413168 | 1,048 |
#!/usr/bin/python
# Authors: Chris Tung
# Ignacio Taboada
#
"""Example script that simulates a population of sources with a luminosity
distribution that is dependent on redshift"""
# General imports
# from __future__ import division
import argparse
# Numpy / Scipy
import numpy as np
# Firesong code
from Evolution import get_LEvolution
from input_output import output_writer, print_config_LEGEND, get_outputdir
from sampling import InverseCDF
def legend_simulation(outputdir,
filename='LEGEND.out',
L_Evolution="HA2014BL",
zmin=0.0005,
zmax=10.,
bins=10000,
index=2.13,
emin=1e4,
emax=1e7,
lmin=38,
lmax=48,
seed=None,
verbose=True):
"""
Simulate a universe of neutrino sources with luminosity distribution
dependent on redshift
Args:
outputdir (str or None): path to write output. If None, return results
without writing a file
filename (str): name of the output file.
L_Evolution (str): Name of luminosity evolution model
zmin (float, optional, default=0.0005): Closest redshift to consider
zmax (float, optional, default=10.): Farthest redshift to consider
bins (int, optional, default=1000): Number of bins used when creating
the redshift PDF
fluxnorm (float, optional, default=0.9e-8): Normalization on the total
astrophysical diffuse flux, E^2dPhi/dE. Units of GeV s^-1 sr^-1
index (float, optional, default=2.13): Spectral index of diffuse flux
emin (float, optional, default=1e4): Minimum neutrino energy in GeV
emax (float, optional, default=1e7): Maximum neutrino energy in GeV
lmin (float, optional, default=38): Minimum log10 luminosity in erg/s
lmax (float, optional, default=38): Maximum log10 luminosity in erg/s
seed (int or None, optional, default=None): random number seed
verbose (bool, optional, default=True): print simulation paramaters
if True else suppress printed output
Returns:
dict: keys contain simulation results, including the input params
as well as the sources. Only returned if filename is None
"""
LE_model = get_LEvolution(L_Evolution, lmin, lmax)
N_sample = int(LE_model.Nsources(zmax))
delta_gamma = 2 - index
print_config_LEGEND(L_Evolution, lmin, lmax, N_sample)
##################################################
# Simulation starts here
##################################################
rng = np.random.RandomState(seed)
# Prepare CDF for redshift generation
redshift_bins = np.arange(zmin, zmax, zmax / float(bins))
RedshiftPDF = [LE_model.RedshiftDistribution(redshift_bins[i])
for i in range(0, len(redshift_bins))]
invCDF = InverseCDF(redshift_bins, RedshiftPDF)
# Prepare a luminosity CDF as a function of redshift
luminosity_bins = np.arange(lmin, lmax, (lmax - lmin) / 1000.)
LE_model.L_CDF(redshift_bins, luminosity_bins)
if filename is not None:
out = output_writer(outputdir, filename)
else:
results = {}
# Generate redshift
zs = invCDF(rng.uniform(low=0.0, high=1.0, size=N_sample))
# Generate luminosity as function of z
lumis = LE_model.Luminosity_Sampling(zs)
if np.ndim(lumis) < 1:
lumis = np.array([lumis] * N_sample)
# Calculate the flux of each source
fluxes = LE_model.Lumi2Flux(lumis, index, emin, emax, zs)
# Random declination over the entire sky
sinDecs = rng.uniform(-1, 1, size=N_sample)
declins = np.degrees(np.arcsin(sinDecs))
TotalFlux = np.sum(fluxes)
# Write out
if filename is not None:
out.write(declins, zs, fluxes)
out.finish(TotalFlux)
else:
results['dec'] = declins
results['z'] = zs
results['flux'] = fluxes
# print before finish
if verbose:
print("Actual diffuse flux simulated :")
log = "E^2 dNdE = {TotalFlux} (E/100 TeV)^({delta_gamma}) [GeV/cm^2.s.sr]"
print(log.format(**locals()))
if filename is None:
return results
if __name__ == "__main__":
outputdir = get_outputdir()
# Process command line options
parser = argparse.ArgumentParser()
parser.add_argument('-o', action='store', dest='filename',
default='Legend.out', help='Output filename')
parser.add_argument("--Levolution", action="store",
dest="Evolution", default='HA2014BL',
help="Source evolution options: HA2014BL")
parser.add_argument("--zmax", action="store", type=float,
dest="zmax", default=10.,
help="Highest redshift to be simulated")
parser.add_argument("--index", action="store", dest='index',
type=float, default=2.19,
help="Spectral index of the outputflux")
parser.add_argument("--lmin", action="store", dest="lmin",
type=float, default=41.5,
help="log10 of the minimum luminosity in erg/s")
parser.add_argument("--lmax", action="store", dest="lmax",
type=float, default=41.5,
help="log10 of the maximum luminosity in erg/s")
options = parser.parse_args()
legend_simulation(outputdir,
filename=options.filename,
L_Evolution=options.Evolution,
zmax=options.zmax,
index=options.index,
lmin=options.lmin,
lmax=options.lmax)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
46665,
25,
5180,
309,
2150,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
16583,
48711,
16904,
1170,
64,
198,
2,
198,
198,
37811,
16281,
4226,
326,
985,
15968,
257,
3265,
286,
4... | 2.236902 | 2,634 |
# -*- coding: utf-8 -*-
"""
Package configuration for clik-wtforms.
:author: Joe Joyce <joe@decafjoe.com>
:copyright: Copyright (c) Joe Joyce and contributors, 2017-2019.
:license: BSD
"""
import os
from setuptools import find_packages, setup
name = 'clik-wtforms'
version = '0.90.2'
requires = (
'clik',
'wtforms',
)
url = 'https://%s.readthedocs.io' % name
description = 'An extension for clik that integrates with WTForms.'
long_description = 'Please see the official project page at %s' % url
root_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.join(root_dir, 'src')
packages = find_packages(src_dir, include=[name])
setup(
author='Joe Joyce',
author_email='joe@decafjoe.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: PyPy',
],
description=description,
install_requires=requires,
license='BSD',
long_description=long_description,
name=name,
package_dir={'': 'src'},
py_modules=['clik_wtforms'],
url=url,
version=version,
zip_safe=False,
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
27813,
8398,
329,
537,
1134,
12,
46569,
23914,
13,
198,
198,
25,
9800,
25,
5689,
25936,
1279,
73,
2577,
31,
12501,
1878,
73,
2577,
13,
785,
29,
198,
25,
... | 2.628205 | 624 |
"""
this is the doc string of this model
i'll just write it someday just yeah thats all
"""
import time
import random
def composite_numbers():
'''
-It generates composite numbers which are just opposite of prime numbers,,
it means real numbers that
aren't a prime number is a composite number
'''
n = 2
while n > 0:
for x in range(2, n):
if n % x == 0:
yield n
break
n+=1
def prime_numbers(end):
'''
- A number that is divisible only by itself and 1 (e.g. 2, 3, 5, 7, 11).
- Prime numbers are very useful in cryptography
Prime numbers are considered as the most exciting numbers among the math lovers..
'''
for n in range(2,end):
for x in range(2, n):
if n % x == 0:
pass
else:
yield n
def odd_seq(inverse=False):
'''
This function generates odd sequence
An odd number is a number which is not divisible by 2.
'''
if inverse is False:
n = 1
while True:
yield n
n+=2
else:
n = -1
while True:
yield n
n-=2
def even_seq(inverse=False):
'''
- even_seq generates infinite sequence of even numbers
-A number which is divisible by 2 and generates a remainder of 0 is called an even number.
'''
n = 0
if inverse is False:
while True:
yield n
n+=2
else:
while True:
yield n
n-=2
def fibonacci():
'''
- In mathematics, the Fibonacci numbers, commonly denoted Fn, form a sequence,
called the Fibonacci sequence, such that each number is the sum of the two preceding ones,
starting from 0 and 1.
- The Following Formula is "fn = fn-1 + fn-2 ".
- Fibonacci is really a mysterious sequence !
'''
x , y = 0 ,1
while True:
r = x + y
x = y
y = r
yield x
def xibonacci(x,inverse=False):
'''
- xibonacci isn't a real sequence rather it's just a method that generates
a sequence of number such that each term from the "x"
onward is the sum of previous "x" terms.
similar as fibonacci that sums previous "x" terms.
-xibonacci usually requires one positional arguments that is the value of "x".
- possible sequences that could be generated through this method:
- fibonacci
- tribonacci
- tetrabonacci
- hexabonacci
And so on ... to the infinity !
'''
inp = int(x)
empty_list = []
for _ in range(inp-1):
empty_list.append(0)
if inverse is False:
empty_list.append(1)
else:
empty_list.append(-1)
while True:
x = empty_list[-inp:]
empty_list = empty_list[-inp:]
y = sum(empty_list)
yield empty_list[-1]
empty_list.append(y)
def lucas_number(inverse=False):
'''
- The Lucas sequence has the same recursive relationship as the "Fibonacci" sequence,
where each term is the sum of the two previous terms, but with different starting values
- This produces a sequence where the ratios of successive terms approach the golden ratio,
and in fact the terms themselves are roundings("round()") of integer powers of the golden ratio.
- `x` and `y` are the constant starting_point for `Lucas Sequence`.
'''
if not inverse:
x,y,r = 2,1,0
else:
x ,y,r = -2,-1,0
while True:
yield x
r = x+y
x = y
y = r
def catalan_numbers():
"""
- In combinatorial mathematics,the Catalan numbers form a sequence of natural numbers
that occur in various counting problems,often involving recursively defined objects.
- Follows "n = 1/(n+1)(2n*n)"
"""
res = 0
catalan_list = [1,1]
i = 0
while True:
yield catalan_list[i]
res = 0
for x in range(len(catalan_list)):
res += catalan_list[x] * catalan_list [-(x+1)]
catalan_list.append(res)
i+=1
def vaneck_seq(inverse=False):
'''
-This Algorithm was taked from OEIS and the author is Ehsan Kia..
'''
try:
list_vanseq = [0]
last_pos = {}
i = 0
while True:
new_value = i - last_pos.get(list_vanseq[i], i)
list_vanseq.append(new_value)
last_pos[list_vanseq[i]] = i
yield new_value
i += 1
except KeyError:
pass
def pronic_numbers():
'''
- A pronic number is a number which is the product of two consecutive integers,
that is, a number of the form n(n + 1).
- Details:
* https://en.wikipedia.org/wiki/Pronic_number
* https://oeis.org/A002378
'''
increase , digit = 0 , 0
while True:
digit += increase
yield digit
increase+=2
def random_numbers(number_type="regular",limits=1000,seed=None):
'''
- Random Numbers Are Just Random Numbers As It Looks By Its Name,
- Use Seed For Controlling their Randomness,
- `Limits` Defines The Range.
'''
while True:
if seed is not None:
random.seed(seed)
breh = random.randint(0,10**4)
yield breh
def looknsay(starting_point="1",inverse=None):
'''
- To generate a member of the sequence from the previous member, read off the digits of the previous member,
counting the number of digits in groups of the same digit. For example:
- 1 is read off as "one 1" or 11.
- 11 is read off as "two 1s" or 21.
- 21 is read off as "one 2, then one 1" or 1211.
- 1211 is read off as "one 1, one 2, then two 1s" or 1112
- The sequence grows indefinitely. In fact, any variant defined by
starting with a different integer seed number will (eventually) also grow indefinitely,
'''
starting_point = str(starting_point)
recursed_val = count_next(starting_point)
if recursed_val == "11":
yield "1"
yield recursed_val
yield from looknsay(recursed_val)
| [
37811,
198,
5661,
318,
262,
2205,
4731,
286,
428,
2746,
198,
72,
1183,
655,
3551,
340,
25580,
655,
10194,
29294,
477,
198,
37811,
198,
198,
11748,
640,
198,
11748,
4738,
628,
198,
4299,
24185,
62,
77,
17024,
33529,
198,
197,
7061,
6,
... | 2.697722 | 1,975 |
"""
Prime Time
Have the function PrimeTime(num) take the num parameter being passed and
return the string true if the parameter is a prime number, otherwise return the string false.
The range will be between 1 and 2^16.
Examples
Input: 19
Output: true
Input: 110
Output: false
Author: Eda AYDIN
"""
# keep this function call here
print(PrimeTime(int(input())))
| [
37811,
198,
26405,
3862,
198,
11980,
262,
2163,
5537,
7575,
7,
22510,
8,
1011,
262,
997,
11507,
852,
3804,
290,
198,
7783,
262,
4731,
2081,
611,
262,
11507,
318,
257,
6994,
1271,
11,
4306,
1441,
262,
4731,
3991,
13,
198,
464,
2837,
... | 3.538462 | 104 |
# *****************************************************************
# Copyright 2013 MIT Lincoln Laboratory
# Project: SPAR
# Authors: SY
# Description: Constants for TA1 analytics.
#
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# Earlier NH Original Version
# 16 May 2013 SY
# 6 June 2013 MZ Schema update
# 3 Aug 2013 SY Another schema update
# *****************************************************************
# SPAR imports:
import spar_python.common.enum as enum
from spar_python.report_generation.common.results_schema import *
DBF_ALIAS1 = "dbf1"
DBF_ALIAS2 = "dbf2"
# field types in the results database:
FIELD_TYPES = enum.Enum("TEXT", "INTEGER", "REAL", "BOOL")
# field types in the test database:
TEST_FIELD_TYPES = enum.Enum("integer", "string", "enum", "date")
CATEGORIES = enum.Enum(
"EQ", "P1", "P2", "P3", "P4", "P6", "P7", "P8", "P9", "P11")
MODS_CATEGORIES = enum.Enum("insert", "delete", "update")
ATOMIC_CATEGORIES = [CATEGORIES.EQ,
CATEGORIES.P2,
CATEGORIES.P3,
CATEGORIES.P4,
CATEGORIES.P6,
CATEGORIES.P7,
CATEGORIES.P11]
SELECTION_COLS = ["*", "id"]
SUBCATEGORIES = {
CATEGORIES.P1: enum.Enum("eqand", "eqor", "eqdnf", "eqdeep",
"eqcnf", "eqnot", "otherand", "otheror",
"otheribm"),
CATEGORIES.P2: enum.Enum("range", "less", "greater"),
CATEGORIES.P6: enum.Enum("initialone", "middleone", "finalone",
#"middlemany"
),
CATEGORIES.P7: enum.Enum("initial", "both", "final", "other"),
CATEGORIES.P8: enum.Enum("eq", "other"),
CATEGORIES.P9: enum.Enum("eq", "alarmwords", "other"),
CATEGORIES.P11: enum.Enum("eqfull", "eqdoubleslash",
"rangefull", "rangedoubleslash")}
SUBSUBCATEGORIES = {
(CATEGORIES.P1, SUBCATEGORIES[CATEGORIES.P1].eqcnf): range(1, 9),
(CATEGORIES.P1, SUBCATEGORIES[CATEGORIES.P1].eqdeep): range(1, 9),
(CATEGORIES.P1, SUBCATEGORIES[CATEGORIES.P1].eqnot): range(1, 7),
(CATEGORIES.P1, SUBCATEGORIES[CATEGORIES.P1].otheribm): range(1, 3),
(CATEGORIES.P7, SUBCATEGORIES[CATEGORIES.P7].other): range(1, 18),
(CATEGORIES.P8, SUBCATEGORIES[CATEGORIES.P8].other): range(1, 7),
(CATEGORIES.P9, SUBCATEGORIES[CATEGORIES.P9].other): range(1, 7)}
# category names:
CATEGORY_NAMES = {
CATEGORIES.EQ: "Equality",
CATEGORIES.P1: "Boolean",
CATEGORIES.P2: "Range",
CATEGORIES.P3: "Keyword",
CATEGORIES.P4: "Stemming",
CATEGORIES.P6: "Wildcard Search",
CATEGORIES.P7: "Subsequence Search",
CATEGORIES.P8: "Threshold",
CATEGORIES.P9: "Ranking",
CATEGORIES.P11: "XML"}
CATEGORY_AND_SUBCATEGORY_NAMES = {
(CATEGORIES.P1, SUBCATEGORIES[CATEGORIES.P1].eqand):
"Conjunctions of Equalities",
(CATEGORIES.P1, SUBCATEGORIES[CATEGORIES.P1].eqor):
"Disjunctions of Equalities",
(CATEGORIES.P1, SUBCATEGORIES[CATEGORIES.P1].eqdnf):
"Disjunctions of Conjunctions of Equalities",
(CATEGORIES.P1, SUBCATEGORIES[CATEGORIES.P1].eqcnf):
"Conjunctions of Disjunctions of Equalities",
(CATEGORIES.P1, SUBCATEGORIES[CATEGORIES.P1].eqdeep):
"Boolean Formulas of Depth Greater than Two",
(CATEGORIES.P1, SUBCATEGORIES[CATEGORIES.P1].eqnot):
"Boolean Formulas of Equalities Containing Negations",
(CATEGORIES.P1, SUBCATEGORIES[CATEGORIES.P1].otherand):
"Conjunctions of Other Query Types",
(CATEGORIES.P1, SUBCATEGORIES[CATEGORIES.P1].otheror):
"Disjunctions of Other Query Types",
(CATEGORIES.P1, SUBCATEGORIES[CATEGORIES.P1].otheribm):
"Conjunctions of one or two Equalities and a Range Query",
(CATEGORIES.P2, SUBCATEGORIES[CATEGORIES.P2].range):
"Two-Sided Range Queries",
(CATEGORIES.P2, SUBCATEGORIES[CATEGORIES.P2].greater):
"Greater-Than Range Queries",
(CATEGORIES.P2, SUBCATEGORIES[CATEGORIES.P2].less):
"Less-Than Range Queries",
(CATEGORIES.P8, SUBCATEGORIES[CATEGORIES.P8].eq):
"Threshold Queries over Equalities",
(CATEGORIES.P8, SUBCATEGORIES[CATEGORIES.P8].other):
"Threshold Queries over Other Query Types",
(CATEGORIES.P9, SUBCATEGORIES[CATEGORIES.P9].eq):
"Ranking Queries over Equalities",
(CATEGORIES.P9, SUBCATEGORIES[CATEGORIES.P9].other):
"Ranking Queries over Other Query Types",
(CATEGORIES.P11, SUBCATEGORIES[CATEGORIES.P11].eqfull):
"Full XML Equality Queries",
(CATEGORIES.P11, SUBCATEGORIES[CATEGORIES.P11].rangefull):
"Full XML Range Queries",
(CATEGORIES.P11, SUBCATEGORIES[CATEGORIES.P11].eqdoubleslash):
"Leaf Node XML Equality Queries",
(CATEGORIES.P11, SUBCATEGORIES[CATEGORIES.P11].rangedoubleslash):
"Leaf Node XML Range Queries"}
# indicates which atomic categories are applicable to which test field types:
CATEGORY_TO_FIELDS = {
CATEGORIES.EQ: TEST_FIELD_TYPES.numbers_list(),
CATEGORIES.P2: TEST_FIELD_TYPES.numbers_list(),
CATEGORIES.P3: [TEST_FIELD_TYPES.string],
CATEGORIES.P4: [TEST_FIELD_TYPES.string],
CATEGORIES.P6: [TEST_FIELD_TYPES.string],
CATEGORIES.P7: [TEST_FIELD_TYPES.string],
CATEGORIES.P11: [TEST_FIELD_TYPES.string]}
MOD_CATEGORIES = enum.Enum("insert", "delete", "update")
### FOR STORING RESULTS IN A DB ###
DBA_TABLENAME = "atomic_queries"
DBA_AQID = "aqid"
DBA_CAT = "category"
DBA_SUBCAT = "sub_category"
DBA_SUBSUBCAT = "sub_sub_category"
DBA_NUMRECORDS = "db_num_records"
DBA_RECORDSIZE = "db_record_size"
DBA_WHERECLAUSE = "where_clause"
DBA_NUMMATCHINGRECORDS = "num_matching_records"
DBA_FIELD = "field"
DBA_FIELDTYPE = "field_type"
DBA_KEYWORDLEN = "keyword_len"
DBA_RANGE = "range"
DBA_FIELDS_TO_TYPES = {
DBA_AQID: FIELD_TYPES.INTEGER,
DBA_CAT: FIELD_TYPES.TEXT,
DBA_SUBCAT: FIELD_TYPES.TEXT,
DBA_SUBSUBCAT: FIELD_TYPES.TEXT,
DBA_NUMRECORDS: FIELD_TYPES.INTEGER,
DBA_RECORDSIZE: FIELD_TYPES.INTEGER,
DBA_WHERECLAUSE: FIELD_TYPES.TEXT,
DBA_NUMMATCHINGRECORDS: FIELD_TYPES.INTEGER,
DBA_FIELD: FIELD_TYPES.TEXT,
DBA_FIELDTYPE: FIELD_TYPES.TEXT,
DBA_KEYWORDLEN: FIELD_TYPES.INTEGER,
DBA_RANGE: FIELD_TYPES.INTEGER}
DBA_REQUIRED_FIELDS = [
DBA_AQID,
DBA_CAT,
DBA_NUMRECORDS,
DBA_RECORDSIZE,
DBA_WHERECLAUSE,
DBA_NUMMATCHINGRECORDS,
DBA_FIELD,
DBA_FIELDTYPE]
DBF_TABLENAME = "full_queries"
DBF_FQID = "qid"
DBF_CAT = "category"
DBF_SUBCAT = "sub_category"
DBF_SUBSUBCAT = "sub_sub_category"
DBF_NUMRECORDS = "db_num_records"
DBF_RECORDSIZE = "db_record_size"
DBF_WHERECLAUSE = "where_clause"
DBF_P8M = "p8_m"
DBF_P8N = "p8_n"
DBF_P9MATCHINGRECORDCOUNTS = "p9_matching_record_counts"
DBF_NUMMATCHINGRECORDS = "num_matching_records"
DBF_MATCHINGRECORDIDS = "matching_record_ids"
DBF_MATCHINGRECORDHASHES = "matching_record_hashes"
DBF_P1ANDNUMRECORDSMATCHINGFIRSTTERM = "p1_and_num_records_matching_first_term"
DBF_P1ORSUMRECORDSMATCHINGEACHTERM = "p1_or_sum_records_matching_each_term"
DBF_P1NEGATEDTERM = "p1_negated_term"
DBF_P1NUMTERMSPERCLAUSE = "p1_num_terms_per_clause"
DBF_P1NUMCLAUSES = "p1_num_clauses"
DBF_P1DEPTH = "p1_depth"
DBF_REJECTINGPOLICIES = "rejecting_policies"
DBF_IBM1SUPPORTED = "supported_by_ibm_ta1"
DBF_IBM2SUPPORTED = "supported_by_ibm_ta2"
DBF_COLUMBIASUPPORTED = "supported_by_columbia"
DBF_SELECTSTAR = "run_in_select_star_mode"
DBF_FIELDS_TO_TYPES = {
DBF_FQID: FIELD_TYPES.INTEGER,
DBF_CAT: FIELD_TYPES.TEXT,
DBF_SUBCAT: FIELD_TYPES.TEXT,
DBF_SUBSUBCAT: FIELD_TYPES.TEXT,
DBF_NUMRECORDS: FIELD_TYPES.INTEGER,
DBF_RECORDSIZE: FIELD_TYPES.INTEGER,
DBF_WHERECLAUSE: FIELD_TYPES.TEXT,
DBF_P8M: FIELD_TYPES.INTEGER,
DBF_P8N: FIELD_TYPES.INTEGER,
DBF_P9MATCHINGRECORDCOUNTS: FIELD_TYPES.TEXT,
DBF_NUMMATCHINGRECORDS: FIELD_TYPES.INTEGER,
DBF_MATCHINGRECORDIDS: FIELD_TYPES.TEXT,
DBF_MATCHINGRECORDHASHES: FIELD_TYPES.TEXT,
DBF_P1ANDNUMRECORDSMATCHINGFIRSTTERM: FIELD_TYPES.INTEGER,
DBF_P1ORSUMRECORDSMATCHINGEACHTERM: FIELD_TYPES.INTEGER,
DBF_P1NEGATEDTERM: FIELD_TYPES.TEXT,
DBF_P1NUMTERMSPERCLAUSE: FIELD_TYPES.INTEGER,
DBF_P1NUMCLAUSES: FIELD_TYPES.INTEGER,
DBF_P1DEPTH: FIELD_TYPES.INTEGER,
DBF_REJECTINGPOLICIES: FIELD_TYPES.TEXT,
DBF_IBM1SUPPORTED: FIELD_TYPES.BOOL,
DBF_IBM2SUPPORTED: FIELD_TYPES.BOOL,
DBF_COLUMBIASUPPORTED: FIELD_TYPES.BOOL,
DBF_SELECTSTAR: FIELD_TYPES.BOOL}
DBF_REQUIRED_FIELDS = [
DBF_FQID,
DBF_CAT,
DBF_NUMRECORDS,
DBF_RECORDSIZE,
DBF_WHERECLAUSE]
DBP_TABLENAME = "performer_queries"
DBP_FQID = "qid"
DBP_PERFORMERNAME = "performer"
DBP_TESTCASEID = "test_case_id"
DBP_SELECTIONCOLS = "selection_cols"
DBP_SENDTIME = "send_time"
DBP_RESULTSTIME = "results_time"
DBP_QUERYLATENCY = "query_latency"
DBP_EVENTMSGTIMES = "eventmsg_times"
DBP_EVENTMSGIDS = "eventmsg_ids"
DBP_EVENTMSGVALS = "eventmsg_vals"
DBP_NUMNEWRETURNEDRECORDS = "num_noncached_returned_records"
DBP_RETURNEDRECORDIDS = "returned_record_ids"
DBP_RETURNEDRECORDHASHES = "returned_record_hashes"
DBP_NUMTHREADS = "num_threads"
DBP_STATUS = "status"
DBP_CURRENTPOLICIES = "current_policies"
DBP_ISCORRECT = "correctness"
DBP_ISMODIFICATIONQUERY = "modification_query"
DBP_ISTHROUGHPUTQUERY = "throughput_query"
DBP_FIELDS_TO_TYPES = {
DBP_FQID: FIELD_TYPES.INTEGER,
DBP_PERFORMERNAME: FIELD_TYPES.TEXT,
DBP_TESTCASEID: FIELD_TYPES.TEXT,
DBP_SELECTIONCOLS: FIELD_TYPES.TEXT,
DBP_SENDTIME: FIELD_TYPES.REAL,
DBP_RESULTSTIME: FIELD_TYPES.REAL,
DBP_QUERYLATENCY: FIELD_TYPES.REAL,
DBP_EVENTMSGTIMES: FIELD_TYPES.TEXT,
DBP_EVENTMSGIDS: FIELD_TYPES.TEXT,
DBP_EVENTMSGVALS: FIELD_TYPES.TEXT,
DBP_NUMNEWRETURNEDRECORDS: FIELD_TYPES.INTEGER,
DBP_RETURNEDRECORDIDS: FIELD_TYPES.TEXT,
DBP_RETURNEDRECORDHASHES: FIELD_TYPES.TEXT,
DBP_NUMTHREADS: FIELD_TYPES.INTEGER,
DBP_STATUS: FIELD_TYPES.TEXT,
DBP_CURRENTPOLICIES: FIELD_TYPES.TEXT,
DBP_ISCORRECT: FIELD_TYPES.BOOL,
DBP_ISMODIFICATIONQUERY: FIELD_TYPES.BOOL,
DBP_ISTHROUGHPUTQUERY: FIELD_TYPES.BOOL}
DBP_REQUIRED_FIELDS = [
DBP_PERFORMERNAME,
DBP_TESTCASEID,
DBP_FQID,
DBP_SELECTIONCOLS,
DBP_ISMODIFICATIONQUERY,
DBP_ISTHROUGHPUTQUERY]
MODS_TABLENAME = "mods"
MODS_MID = "mid"
MODS_CATEGORY = "category"
MODS_NUMRECORDS = "db_num_records"
MODS_RECORDSIZE = "db_record_size"
MODS_RECORDID = "record_id"
MODS_FIELDS_TO_TYPES = {
MODS_MID: FIELD_TYPES.INTEGER,
MODS_CATEGORY: FIELD_TYPES.TEXT,
MODS_NUMRECORDS: FIELD_TYPES.INTEGER,
MODS_RECORDSIZE: FIELD_TYPES.INTEGER,
MODS_RECORDID: FIELD_TYPES.INTEGER}
MODS_REQUIRED_FIELDS = [
MODS_MID,
MODS_CATEGORY,
MODS_NUMRECORDS,
MODS_RECORDSIZE,
MODS_RECORDID]
MODQUERIES_TABLENAME = "mod_queries"
MODQUERIES_QID = "qid"
MODQUERIES_WHERECLAUSE = "where_clause"
MODQUERIES_NUMRECORDS = "db_num_records"
MODQUERIES_RECORDSIZE = "db_record_size"
MODQUERIES_MID = "mid"
MODQUERIES_FIELDS_TO_TYPES = {
MODQUERIES_QID: FIELD_TYPES.INTEGER,
MODQUERIES_WHERECLAUSE: FIELD_TYPES.TEXT,
MODQUERIES_NUMRECORDS: FIELD_TYPES.INTEGER,
MODQUERIES_RECORDSIZE: FIELD_TYPES.INTEGER,
MODQUERIES_MID: FIELD_TYPES.INTEGER}
MODQUERIES_REQUIRED_FIELDS = [
MODQUERIES_QID,
MODQUERIES_WHERECLAUSE,
MODQUERIES_NUMRECORDS,
MODQUERIES_RECORDSIZE,
MODQUERIES_MID]
M2MQ_TABLENAME = "mods_to_modqueries"
M2MQ_QID = "qid"
M2MQ_MID = "mid"
M2MQ_PREIDS = "pre_matching_record_ids"
M2MQ_PREHASHES = "pre_matching_record_hashes"
M2MQ_POSTIDS = "post_matching_record_ids"
M2MQ_POSTHASHES = "post_matching_record_hashes"
M2MQ_FIELDS_TO_TYPES = {
M2MQ_QID: FIELD_TYPES.INTEGER,
M2MQ_MID: FIELD_TYPES.INTEGER,
M2MQ_PREIDS: FIELD_TYPES.TEXT,
M2MQ_PREHASHES: FIELD_TYPES.TEXT,
M2MQ_POSTIDS: FIELD_TYPES.TEXT,
M2MQ_POSTHASHES: FIELD_TYPES.TEXT}
M2MQ_REQUIRED_FIELDS = [
M2MQ_QID,
M2MQ_MID,
M2MQ_PREIDS,
M2MQ_PREHASHES,
M2MQ_POSTIDS,
M2MQ_POSTHASHES]
PMODS_TABLENAME = "performer_mods"
PMODS_PERFORMER = "performer"
PMODS_TESTCASEID = "test_case_id"
PMODS_MID = "mid"
PMODS_SENDTIME = "send_time"
PMODS_RESULTSTIME = "results_time"
PMODS_MODLATENCY = "mod_latency"
PMODS_EVENTMSGTIMES = "eventmsg_times"
PMODS_EVENTMSGIDS = "eventmsg_ids"
PMODS_EVENTMSGVALS = "eventmsg_vals"
PMODS_STATUS = "status"
PMODS_FIELDS_TO_TYPES = {
PMODS_PERFORMER: FIELD_TYPES.TEXT,
PMODS_TESTCASEID: FIELD_TYPES.TEXT,
PMODS_MID: FIELD_TYPES.INTEGER,
PMODS_SENDTIME: FIELD_TYPES.REAL,
PMODS_RESULTSTIME: FIELD_TYPES.REAL,
PMODS_MODLATENCY: FIELD_TYPES.REAL,
PMODS_EVENTMSGTIMES: FIELD_TYPES.TEXT,
PMODS_EVENTMSGIDS: FIELD_TYPES.TEXT,
PMODS_EVENTMSGVALS: FIELD_TYPES.TEXT,
PMODS_STATUS: FIELD_TYPES.TEXT}
PMODS_REQUIRED_FIELDS = [
PMODS_PERFORMER,
PMODS_TESTCASEID,
PMODS_MID]
F2A_TABLENAME = "full_to_atomic_junction"
F2A_AQID= "atomic_row_id"
F2A_FQID = "full_row_id"
F2A_FIELDS_TO_TYPES = {
F2A_AQID: FIELD_TYPES.INTEGER,
F2A_FQID: FIELD_TYPES.INTEGER}
F2A_REQUIRED_FIELDS = [F2A_AQID, F2A_FQID]
F2F_TABLENAME = "full_to_full_junction"
F2F_COMPOSITEQID = "composite_full_query"
F2F_BASEQID = "base_full_query"
F2F_FIELDS_TO_TYPES = {
F2F_COMPOSITEQID: FIELD_TYPES.INTEGER,
F2F_BASEQID: FIELD_TYPES.INTEGER}
F2F_REQUIRED_FIELDS = [F2F_COMPOSITEQID, F2F_BASEQID]
PVER_TABLENAME = "performer_verifications"
PVER_PERFORMER = "performer"
PVER_TESTCASEID = "test_case_id"
PVER_RECORDID = "record_id"
PVER_VERIFICATION = "verification"
PVER_SENDTIME = "send_time"
PVER_RESULTSTIME = "results_time"
PVER_VERIFICATIONLATENCY = "verification_latency"
PVER_MODLATENCY = "mod_latency"
PVER_EVENTMSGTIMES = "eventmsg_times"
PVER_EVENTMSGIDS = "eventmsg_ids"
PVER_EVENTMSGVALS = "eventmsg_vals"
PVER_STATUS = "status"
PVER_CORRECTNESS = "correctness"
PVER_FIELDS_TO_TYPES = {
PVER_PERFORMER: FIELD_TYPES.TEXT,
PVER_TESTCASEID: FIELD_TYPES.TEXT,
PVER_RECORDID: FIELD_TYPES.INTEGER,
PVER_VERIFICATION: FIELD_TYPES.BOOL,
PVER_SENDTIME: FIELD_TYPES.REAL,
PVER_RESULTSTIME: FIELD_TYPES.REAL,
PVER_VERIFICATIONLATENCY: FIELD_TYPES.REAL,
PVER_EVENTMSGTIMES: FIELD_TYPES.TEXT,
PVER_EVENTMSGIDS: FIELD_TYPES.TEXT,
PVER_EVENTMSGVALS: FIELD_TYPES.TEXT,
PVER_STATUS: FIELD_TYPES.TEXT,
PVER_CORRECTNESS: FIELD_TYPES.BOOL}
PVER_REQUIRED_FIELDS = [
PVER_PERFORMER,
PVER_TESTCASEID,
PVER_VERIFICATION,
PVER_VERIFICATIONLATENCY]
TABLENAME_TO_FIELDTOTYPE = {
DBA_TABLENAME: DBA_FIELDS_TO_TYPES,
DBF_TABLENAME: DBF_FIELDS_TO_TYPES,
DBP_TABLENAME: DBP_FIELDS_TO_TYPES,
MODS_TABLENAME: MODS_FIELDS_TO_TYPES,
MODQUERIES_TABLENAME: MODQUERIES_FIELDS_TO_TYPES,
M2MQ_TABLENAME: M2MQ_FIELDS_TO_TYPES,
PMODS_TABLENAME: PMODS_FIELDS_TO_TYPES,
F2A_TABLENAME: F2A_FIELDS_TO_TYPES,
F2F_TABLENAME: F2F_FIELDS_TO_TYPES,
PVER_TABLENAME: PVER_FIELDS_TO_TYPES}
TABLENAME_TO_REQUIREDFIELDS = {
DBA_TABLENAME: DBA_REQUIRED_FIELDS,
DBF_TABLENAME: DBF_REQUIRED_FIELDS,
DBP_TABLENAME: DBP_REQUIRED_FIELDS,
MODS_TABLENAME: MODS_REQUIRED_FIELDS,
MODQUERIES_TABLENAME: MODQUERIES_REQUIRED_FIELDS,
M2MQ_TABLENAME: M2MQ_REQUIRED_FIELDS,
PMODS_TABLENAME: PMODS_REQUIRED_FIELDS,
F2A_TABLENAME: F2A_REQUIRED_FIELDS,
F2F_TABLENAME: F2F_REQUIRED_FIELDS,
PVER_TABLENAME: PVER_REQUIRED_FIELDS}
# a dictionary of all pipe-delimited list fields, in (table, field) form,
# mapped to the type of their elements:
LIST_FIELDS = {
(DBF_TABLENAME, DBF_MATCHINGRECORDIDS): int,
(DBF_TABLENAME, DBF_MATCHINGRECORDHASHES): str,
(DBF_TABLENAME, DBF_REJECTINGPOLICIES): str,
(DBF_TABLENAME, DBF_P9MATCHINGRECORDCOUNTS): int,
(DBF_TABLENAME, DBF_P1NEGATEDTERM): int,
(DBP_TABLENAME, DBP_RETURNEDRECORDIDS): int,
(DBP_TABLENAME, DBP_RETURNEDRECORDHASHES): str,
(DBP_TABLENAME, DBP_CURRENTPOLICIES): str,
(DBP_TABLENAME, DBP_EVENTMSGTIMES): float,
(DBP_TABLENAME, DBP_EVENTMSGIDS): str,
(DBP_TABLENAME, DBP_EVENTMSGVALS): str,
(DBP_TABLENAME, DBP_STATUS): str,
(M2MQ_TABLENAME, M2MQ_PREIDS): int,
(M2MQ_TABLENAME, M2MQ_PREHASHES): str,
(M2MQ_TABLENAME, M2MQ_POSTIDS): int,
(M2MQ_TABLENAME, M2MQ_POSTHASHES): str,
(PMODS_TABLENAME, PMODS_STATUS): str,
(PMODS_TABLENAME, PMODS_EVENTMSGTIMES): float,
(PMODS_TABLENAME, PMODS_EVENTMSGIDS): str,
(PMODS_TABLENAME, PMODS_EVENTMSGVALS): str,
(PVER_TABLENAME, PVER_STATUS): str,
(PVER_TABLENAME, PVER_EVENTMSGTIMES): float,
(PVER_TABLENAME, PVER_EVENTMSGIDS): str,
(PVER_TABLENAME, PVER_EVENTMSGVALS): str}
# a dictionary mapping each table to auxiliary lines necessary in its
# construction:
TABLENAME_TO_AUX = {
DBA_TABLENAME:
",".join(["UNIQUE (%s, %s, %s)"
% (DBA_NUMRECORDS, DBA_RECORDSIZE, DBA_WHERECLAUSE),
"UNIQUE (%s)" % DBA_AQID]),
DBF_TABLENAME: "UNIQUE (%s)" % DBF_FQID,
DBP_TABLENAME:
",".join(["FOREIGN KEY (%s) REFERENCES %s (%s)" %
(DBP_FQID, DBF_TABLENAME, DBF_FQID),
"UNIQUE (%s, %s)" %
(DBP_SENDTIME, DBP_RESULTSTIME)]),
F2A_TABLENAME:
"".join(["FOREIGN KEY (%s) REFERENCES %s (ROWID), " %
(F2A_FQID, DBF_TABLENAME),
"FOREIGN KEY (%s) REFERENCES %s (ROWID)" %
(F2A_AQID, DBA_TABLENAME)]),
PMODS_TABLENAME: "UNIQUE (%s, %s)" % (PMODS_SENDTIME, PMODS_RESULTSTIME),
PVER_TABLENAME: "UNIQUE (%s, %s)" % (PVER_SENDTIME, PVER_RESULTSTIME)}
PERFORMER_TABLENAMES = set(
[DBP_TABLENAME, PMODS_TABLENAME, PVER_TABLENAME])
# a list of the non-performer tables, in order from most to least likely to be
# the primary table.
OTHER_TABLENAMES_HEIRARCHY = [
DBA_TABLENAME, DBF_TABLENAME, MODQUERIES_TABLENAME, MODS_TABLENAME]
# map of primary table to necessary joins (denoted tuples of the
# form (source_table, source_field, target_table, target_field,
# target_table_alias (None if not applicable))):
TABLENAME_TO_JOINS = {}
for tablename in TABLENAME_TO_FIELDTOTYPE.keys():
TABLENAME_TO_JOINS[tablename] = []
TABLENAME_TO_JOINS[MODQUERIES_TABLENAME] = [
(MODQUERIES_TABLENAME,
MODQUERIES_MID,
MODS_TABLENAME,
MODS_MID,
None)]
TABLENAME_TO_JOINS[PMODS_TABLENAME] = [
(PMODS_TABLENAME,
PMODS_MID,
MODS_TABLENAME,
MODS_MID,
None)]
TABLENAME_TO_JOINS[DBP_TABLENAME] = [
(DBP_TABLENAME,
DBP_FQID,
DBF_TABLENAME,
DBF_FQID,
None)]
# the DBF_TABLENAME value in TABLENAME_TO_JOINS should be overridden if we
# care to connect to atomic or other full queries
| [
2,
41906,
17174,
9,
198,
2,
220,
15069,
2211,
17168,
12406,
18643,
220,
220,
198,
2,
220,
4935,
25,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
6226,
1503,
198,
2,
220,
46665,
25,
220,
220,
220,
220,
220,
220,
220,
22... | 1.920606 | 9,774 |
from abc import abstractmethod
from typing import List, Any
| [
6738,
450,
66,
1330,
12531,
24396,
198,
6738,
19720,
1330,
7343,
11,
4377,
628,
628,
628,
628
] | 3.941176 | 17 |
# Copyright 2019-2020 Typo. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# This product includes software developed at or by Typo (https://www.typo.ai/).
from io import StringIO
import json
import unittest
from unittest.mock import patch
import target_typo.__init__ as init
from target_typo.typo import TypoTarget
TYPO_1 = TypoTarget(config=generate_config())
TYPO_1.token = '123'
DATASET = 'dataset'
DATA = {
'date': '2019-06-23',
'user': 'testuser'
}
if __name__ == '__main__':
unittest.main()
| [
2,
15069,
13130,
12,
42334,
17134,
78,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
2... | 3.217391 | 322 |
#Menu for the circuit configurations (returns circuit string)
# ------------------------------------------------------
# Copyright (C) 2020 Gustavo Rodriguez Gutierrez
# Licensed under the MIT license, see LICENSE.
# ------------------------------------------------------
from tkinter import *
from PIL import ImageTk,Image
loopc = Tk()
#Images for circuits
c_image1 = ImageTk.PhotoImage(file="images/c_ladder.png")
c_image2 = ImageTk.PhotoImage(file="images/c_randles.png")
c_image3 = ImageTk.PhotoImage(file="images/c_voigt2.png")
c_image4 = ImageTk.PhotoImage(file="images/c_voigt3.png")
#Circuit Parameters
circStr='1'
param='1'
LB='1'
HB='1'
#(a, b, c, d)=fscircuit()
#print(a, b, c, d)
| [
2,
23381,
329,
262,
10349,
25412,
357,
7783,
82,
10349,
4731,
8,
201,
198,
2,
20368,
19351,
438,
201,
198,
2,
15069,
357,
34,
8,
12131,
43715,
78,
19391,
48283,
201,
198,
2,
49962,
739,
262,
17168,
5964,
11,
766,
38559,
24290,
13,
... | 2.787645 | 259 |
from discord.ext import commands
import arrow
import sqlite3
from libraries import emoji_literals
SQLDATABASE = "data/database.db"
| [
6738,
36446,
13,
2302,
1330,
9729,
198,
11748,
15452,
198,
11748,
44161,
578,
18,
198,
6738,
12782,
1330,
44805,
62,
17201,
874,
628,
198,
17861,
35,
1404,
6242,
11159,
796,
366,
7890,
14,
48806,
13,
9945,
1,
628,
628
] | 3.487179 | 39 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['CloudProviderSnapshotArgs', 'CloudProviderSnapshot']
@pulumi.input_type
@pulumi.input_type
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 3.464567 | 127 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'枚举类的练习'
__author__ = 'Jacklee'
# 导入模块
#import types
# 月份常量
JAN = 1
FEB = 2
MAR = 3
# 枚举类
from enum import Enum, unique
## 第一种定义方式
@unique
## 第二种定义方式
WeekDay = Enum('WeekDay', ('Mon', 'Tue', 'Wed', 'Tru', 'Fri', 'Sat', 'Sun'))
## 类的组成,JAN ... 是一个类成员
print('Month类的成员: ', dir(Month))
m = Month(0)
print(m.name, m.value)
print('Month对象实例的成员: ', dir(m))
m = Month(1)
print(m.name, m.value)
m = Month(2)
print(m.name, m.value)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6,
162,
252,
248,
10310,
122,
163,
109,
119,
21410,
163,
119,
225,
20046,
254,
6,
198,
198,
834,
9800,
834... | 1.525316 | 316 |
#!/usr/bin/env python
# Copyright 2019 Jon Azpiazu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gitlab
import os
import datetime
from gitlab_helper import GitlabHelper
import click
import logging
@click.command()
@click.option(
'--skip-archived/--no-skip-archived', default=True, help='Skip archived projects'
)
@click.argument('gitlab-url')
@click.argument('private-token')
@click.option('--group-name', required=True, help='Group name to process')
@click.option(
'--max-days', default=10, help='Max number of days to consider a pipeline fresh'
)
@click.option(
'--dry-run/--no-dry-run)',
default=False,
help='Do not actually launch the pipelines',
)
@click.option('--log-level', default='ERROR', help='Log level')
if __name__ == '__main__':
bot_pipelines()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
220,
220,
220,
15069,
13130,
5966,
7578,
79,
17890,
84,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
... | 3.095794 | 428 |
'''
Helper classes for handling SAT Formulas
''' | [
7061,
6,
198,
47429,
6097,
329,
9041,
29020,
5178,
25283,
198,
7061,
6
] | 3.692308 | 13 |
#!/usr/bin/env python
# X86 architectures
# Author: Matej Kastak
from . import Architecture
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
1395,
4521,
45619,
198,
2,
6434,
25,
24787,
73,
509,
459,
461,
198,
198,
6738,
764,
1330,
29778,
628,
198
] | 3.166667 | 30 |
# https://www.hackerrank.com/challenges/cut-the-sticks
n = int(input().strip())
arr = list(map(int, input().strip().split(' ')))
while len(arr) > 0:
arr2 = []
for e in arr:
e -= min(arr)
if e > 0:
arr2.append(e)
print(len(arr))
arr = arr2
| [
2,
3740,
1378,
2503,
13,
31153,
8056,
962,
13,
785,
14,
36747,
34120,
14,
8968,
12,
1169,
12,
34810,
628,
198,
77,
796,
493,
7,
15414,
22446,
36311,
28955,
198,
3258,
796,
1351,
7,
8899,
7,
600,
11,
5128,
22446,
36311,
22446,
35312,... | 2.072464 | 138 |
import pytest
from betamax import Betamax
from currencycloud import Client, Config
from currencycloud.errors import NotFoundError
from currencycloud.resources import *
| [
11748,
12972,
9288,
198,
6738,
731,
321,
897,
1330,
5147,
321,
897,
198,
198,
6738,
7395,
17721,
1330,
20985,
11,
17056,
198,
6738,
7395,
17721,
13,
48277,
1330,
1892,
21077,
12331,
198,
6738,
7395,
17721,
13,
37540,
1330,
1635,
628
] | 4.25 | 40 |
from django.conf.urls import include, url
from django.contrib import admin
import project.views
admin.autodiscover()
urlpatterns = [
url(r'^$', project.views.home),
url(r'^catalog/', include('catalog.urls')),
url(r'^tenders/', include('tenders.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^articles/$', project.views.articles),
url(r'^article/(?P<article_id>[0-9]+)/$', project.views.article),
url(r'^content/ajax/get-article/$', project.views.ajaxGetArticle),
url(r'^content/ajax/save-article/$', project.views.ajaxSaveArticle),
url(r'^content/categories/$', project.views.editCategories),
url(r'^content/ajax/add-category/$', project.views.ajaxAddCategory),
url(r'^content/ajax/save-category/$', project.views.ajaxSaveCategory),
url(r'^content/ajax/switch-category-state/$', project.views.ajaxSwitchCategoryState),
url(r'^content/ajax/trash-category/$', project.views.ajaxTrashCategory),
url(r'^logs/$', project.views.logs),
url(r'^ajax/login/$', project.views.ajax_login),
url(r'^logout/$', project.views.logout_view),
url(r'^ajax/create-username/$', project.views.ajax_create_username),
url(r'^ajax/register/$', project.views.ajax_register),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
2291,
11,
19016,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
11748,
1628,
13,
33571,
198,
198,
28482,
13,
2306,
375,
29392,
3419,
198,
198,
6371,
33279,
82,
796,
68... | 2.617582 | 455 |
import itertools
#
from .... import global_tools, global_var
def incremental_programs(ax,
df_programs,
diff_init = False,
smoother = None,
):
"""
Draws in a subplot the expected availability programs
of a set of production assets.
:param ax: The ax to fill
:param df: The expected availability programs
:param diff_init: Boolean to plot relative differences
with the initial date
:param smoother: Boolean to draw oblique instead of vertical steps
:type ax: matplotlib.axes._subplots.AxesSubplot
:type df: pd.DataFrame
:type diff_init: bool
:type smoother: bool
:return: None
:rtype: None
"""
### Plot init
if diff_init:
df_programs = df_programs - df_programs.iloc[:,[0]].values
dd = df_programs.columns[0]
ds_program = df_programs.loc[:,dd]
X, Y = global_tools.piecewise_constant_interpolators(ds_program.index,
ds_program,
smoother = smoother,
)
ax.plot(X,
Y,
label = global_tools.format_latex('init - {0}'.format(dd.strftime(format = '%Y-%m-%d %H:%M %Z'))),
color = 'k',
ls = ':',
)
### Plot programs
for ii, (dd, ds_program) in itertools.islice(enumerate(df_programs.items()), int(diff_init), None):
X, Y = global_tools.piecewise_constant_interpolators(ds_program.index,
ds_program,
smoother = smoother,
)
ax.plot(X,
Y,
label = global_tools.format_latex(dd.strftime(format = '%Y-%m-%d %H:%M %Z')),
color = global_var.colors[ii],
)
### Plot nameplate capacity
if not diff_init:
ax.plot([df_programs.index.min(), df_programs.index.max()],
[df_programs.values.max() for kk in range(2)],
ls = ':',
linewidth = 0.5,
color = 'k',
label = 'nameplate capacity',
)
| [
198,
198,
11748,
340,
861,
10141,
198,
2,
198,
6738,
19424,
1330,
3298,
62,
31391,
11,
3298,
62,
7785,
628,
198,
198,
4299,
29497,
62,
23065,
82,
7,
897,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.698722 | 1,487 |
#!/usr/bin/env python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from {{cookiecutter.project_name}} import skeleton
if __name__ == "__main__":
from unittest import TestLoader, TextTestRunner
suite = TestLoader().loadTestsFromTestCase(SkeletonTestCase)
TextTestRunner(verbosity=2).run(suite)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
555,
715,
395,
198,
6738,
22935,
44453,
8968,
353,
13,
... | 2.771186 | 118 |
import pygame
from settings import *
from player import Player
from map import world_map
'''
pygame.draw.line(screen, RED, player.pos, (player.x + RENDER_DIST * math.cos(player.angle)
, player.y + RENDER_DIST * math.sin(player.angle)))
pygame.draw.line(screen, PURPLE, player.pos, (player.x + RENDER_DIST * math.cos(player.angle + FOV/2)
, player.y + RENDER_DIST * math.sin(player.angle + FOV/2)))
pygame.draw.line(screen, PURPLE, player.pos, (player.x + RENDER_DIST * math.cos(player.angle - FOV/2)
, player.y + RENDER_DIST * math.sin(player.angle - FOV/2)))
'''
'''
def Raycasting(screen, player):
x0,y0 = player.pos
for ray_n in range(N_RAYS):
ray_angle = player.angle + ray_n * D_ANGLE - FOV/2
sin_a = math.sin(ray_angle)
cos_a = math.cos(ray_angle)
for dist in range(20,RENDER_DIST,2):
x = x0 + dist * cos_a
y = y0 + dist * sin_a
if(x//TILE*TILE,y//TILE*TILE) in world_map:
dist*= math.cos(player.angle - ray_angle)
proj_height = WALL_SCALE*PROJ_COEFF/dist
c = 255/ (1+dist*dist*0.0001)
color = (c,c/2,c/2)
pygame.draw.rect(screen,color,(ray_n*SCALE,HEIGHT/2-proj_height//2,SCALE,proj_height))
break
#pygame.draw.line(screen, DARKGRAY, player.pos, (x,y), 2 )
# Guide lines
#pygame.draw.line(screen, RED, player.pos, (player.x + RENDER_DIST * math.cos(player.angle)
# , player.y + RENDER_DIST * math.sin(player.angle)))
#pygame.draw.line(screen, PURPLE, player.pos, (player.x + RENDER_DIST * math.cos(player.angle + FOV/2)
# , player.y + RENDER_DIST * math.sin(player.angle + FOV/2)))
#pygame.draw.line(screen, PURPLE, player.pos, (player.x + RENDER_DIST * math.cos(player.angle - FOV/2)
# , player.y + RENDER_DIST * math.sin(player.angle - FOV/2)))
''' | [
11748,
12972,
6057,
198,
6738,
6460,
1330,
1635,
198,
6738,
2137,
1330,
7853,
198,
6738,
3975,
1330,
995,
62,
8899,
628,
198,
7061,
6,
198,
220,
220,
220,
12972,
6057,
13,
19334,
13,
1370,
7,
9612,
11,
23848,
11,
2137,
13,
1930,
11,... | 1.841432 | 1,173 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test show-and-tell model is TPU compatible."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
# Standard Imports
import numpy as np
import tensorflow as tf
import configuration
import show_and_tell_model
tpu = tf.contrib.tpu
@contextlib.contextmanager
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.test.main()
| [
2,
15069,
2864,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 3.752508 | 299 |
#!/usr/bin/env python
# Copyright (c) 2019: Jianyu Chen (jianyuchen@berkeley.edu).
#
# This file is modified from <https://github.com/carla-simulator/carla>:
# Copyright (c) 2018 Intel Labs.
# authors: German Ros (german.ros@intel.com)
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
from enum import Enum
from collections import deque
import random
import numpy as np
import carla
from gym_carla.envs.misc import distance_vehicle, is_within_distance_ahead, compute_magnitude_angle
class RoadOption(Enum):
"""
RoadOption represents the possible topological configurations when moving from a segment of lane to other.
"""
VOID = -1
LEFT = 1
RIGHT = 2
STRAIGHT = 3
LANEFOLLOW = 4
def retrieve_options(list_waypoints, current_waypoint):
"""
Compute the type of connection between the current active waypoint and the multiple waypoints present in
list_waypoints. The result is encoded as a list of RoadOption enums.
:param list_waypoints: list with the possible target waypoints in case of multiple options
:param current_waypoint: current active waypoint
:return: list of RoadOption enums representing the type of connection from the active waypoint to each
candidate in list_waypoints
"""
options = []
for next_waypoint in list_waypoints:
# this is needed because something we are linking to
# the beggining of an intersection, therefore the
# variation in angle is small
next_next_waypoint = next_waypoint.next(3.0)[0]
link = compute_connection(current_waypoint, next_next_waypoint)
options.append(link)
return options
def compute_connection(current_waypoint, next_waypoint):
"""
Compute the type of topological connection between an active waypoint (current_waypoint) and a target waypoint
(next_waypoint).
:param current_waypoint: active waypoint
:param next_waypoint: target waypoint
:return: the type of topological connection encoded as a RoadOption enum:
RoadOption.STRAIGHT
RoadOption.LEFT
RoadOption.RIGHT
"""
n = next_waypoint.transform.rotation.yaw
n = n % 360.0
c = current_waypoint.transform.rotation.yaw
c = c % 360.0
diff_angle = (n - c) % 180.0
if diff_angle < 1.0:
return RoadOption.STRAIGHT
elif diff_angle > 90.0:
return RoadOption.LEFT
else:
return RoadOption.RIGHT
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
15069,
357,
66,
8,
13130,
25,
40922,
24767,
12555,
357,
73,
666,
88,
1229,
831,
31,
527,
13490,
13,
15532,
737,
198,
2,
198,
2,
770,
2393,
318,
9518,
422,
1279,
5450,
1378,... | 3.170635 | 756 |
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import time
from typing import Optional, Dict
import numpy as np
from jina.executors.decorators import batching, as_ndarray
from jina.executors.devices import TorchDevice
from jina.executors.encoders import BaseEncoder
if False:
import torch
HTTP_SERVICE_UNAVAILABLE = 503
class TransformerTorchEncoder(TorchDevice, BaseEncoder):
"""
Wraps the pytorch version of transformers from huggingface.
:param pretrained_model_name_or_path: Either:
- a string, the `model id` of a pretrained model hosted
inside a model repo on huggingface.co, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g.:
``./my_model_directory/``.
:param base_tokenizer_model: The name of the base model to use for creating
the tokenizer. If None, will be equal to `pretrained_model_name_or_path`.
:param pooling_strategy: the strategy to merge the word embeddings into the
chunk embedding. Supported strategies include 'cls', 'mean', 'max', 'min'.
:param layer_index: index of the transformer layer that is used to create
encodings. Layer 0 corresponds to the embeddings layer
:param max_length: the max length to truncate the tokenized sequences to.
:param acceleration: The method to accelerate encoding. The available options are:
- ``'amp'``, which uses `automatic mixed precision
`<https://pytorch.org/docs/stable/amp.html>`_ autocasting.
This option is only available on GPUs that support it
(architecture newer than or equal to NVIDIA Volatire).
- ``'quant'``, which uses dynamic quantization on the transformer model.
See `this tutorial
<https://pytorch.org/tutorials/intermediate/dynamic_quantization_bert_tutorial.html>`_
for more information. This option is currently not supported on GPUs.
:param embedding_fn_name: name of the function to be called from the `model` to do the embedding. `__call__` by default.
Other possible values would `embed_questions` for `RetriBert` based models
:param args: Additional positional arguments
:param kwargs: Additional keyword arguments
..note::
While acceleration methods can significantly speed up the encoding,
they result in loss of precision. Make sure that the tradeoff is
worthwhile for your use case.
"""
def post_init(self):
"""Load the transformer model and encoder"""
import torch
from transformers import AutoModel, AutoTokenizer
self.tokenizer = AutoTokenizer.from_pretrained(self.base_tokenizer_model)
if self.api_token is None:
self.model = AutoModel.from_pretrained(
self.pretrained_model_name_or_path, output_hidden_states=True
)
self.to_device(self.model)
if self.acceleration == 'quant' and not self.on_gpu:
self.model = torch.quantization.quantize_dynamic(
self.model, {torch.nn.Linear}, dtype=torch.qint8
)
else:
self._api_call('Scotty, please warmup.')
def amp_accelerate(self):
"""Check acceleration method """
import torch
from contextlib import nullcontext
if self.acceleration == 'amp':
return torch.cuda.amp.autocast()
else:
return nullcontext()
@batching
@as_ndarray
def encode(self, data: 'np.ndarray', *args, **kwargs) -> 'np.ndarray':
"""
Encode an array of string in size `B` into an ndarray in size `B x D`,
where `B` is the batch size and `D` is the dimensionality of the encoding.
:param data: a 1d array of string type in size `B`
:return: an ndarray in size `B x D` with the embeddings
"""
import torch
with torch.no_grad():
if not self.tokenizer.pad_token:
self.tokenizer.add_special_tokens({'pad_token': '[PAD]'})
self.model.resize_token_embeddings(len(self.tokenizer.vocab))
input_tokens = self.tokenizer(
list(data),
max_length=self.max_length,
padding='longest',
truncation=True,
return_tensors='pt',
)
input_tokens = {k: v.to(self.device) for k, v in input_tokens.items()}
if self.api_token is not None:
outputs = self._api_call(list(data))
hidden_states = torch.tensor([outputs])
else:
with self.amp_accelerate():
outputs = getattr(self.model, self.embedding_fn_name)(
**input_tokens
)
if isinstance(outputs, torch.Tensor):
return outputs.cpu().numpy()
hidden_states = outputs.hidden_states
return self._compute_embedding(hidden_states, input_tokens)
| [
834,
22163,
4766,
834,
796,
366,
15269,
357,
66,
8,
33448,
449,
1437,
9552,
15302,
13,
1439,
2489,
10395,
526,
198,
834,
43085,
834,
796,
366,
25189,
4891,
12,
17,
13,
15,
1,
198,
198,
11748,
640,
198,
198,
6738,
19720,
1330,
32233,... | 2.380559 | 2,181 |
tagcomponent = "disagg"
| [
12985,
42895,
796,
366,
6381,
9460,
1,
198
] | 3 | 8 |
# stdlib
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
# third party
from google.protobuf.reflection import GeneratedProtocolMessageType
from nacl.signing import VerifyKey
# syft absolute
import syft as sy
# relative
from ..... import lib
from .....logger import traceback_and_raise
from .....proto.core.node.common.action.run_function_or_constructor_pb2 import (
RunFunctionOrConstructorAction as RunFunctionOrConstructorAction_PB,
)
from .....util import inherit_tags
from ....common.serde.serializable import serializable
from ....common.uid import UID
from ....io.address import Address
from ....pointer.pointer import Pointer
from ....store.storeable_object import StorableObject
from ...abstract.node import AbstractNode
from ..util import check_send_to_blob_storage
from ..util import listify
from ..util import upload_result_to_s3
from .common import ImmediateActionWithoutReply
from .greenlets_switch import retrieve_object
@serializable()
class RunFunctionOrConstructorAction(ImmediateActionWithoutReply):
"""
When executing a RunFunctionOrConstructorAction, a :class:`Node` will run
a function defined by the action's path attribute and keep the returned value
in its store.
Attributes:
path: the dotted path to the function to call
args: args to pass to the function. They should be pointers to objects
located on the :class:`Node` that will execute the action.
kwargs: kwargs to pass to the function. They should be pointers to objects
located on the :class:`Node` that will execute the action.
"""
@staticmethod
def _object2proto(self) -> RunFunctionOrConstructorAction_PB:
"""Returns a protobuf serialization of self.
As a requirement of all objects which inherit from Serializable,
this method transforms the current object into the corresponding
Protobuf object so that it can be further serialized.
:return: returns a protobuf object
:rtype: RunFunctionOrConstructorAction_PB
.. note::
This method is purely an internal method. Please use serialize(object) or one of
the other public serialization methods if you wish to serialize an
object.
"""
return RunFunctionOrConstructorAction_PB(
path=self.path,
args=[sy.serialize(x, to_bytes=True) for x in self.args],
kwargs={k: sy.serialize(v, to_bytes=True) for k, v in self.kwargs.items()},
id_at_location=sy.serialize(self.id_at_location),
address=sy.serialize(self.address),
msg_id=sy.serialize(self.id),
)
@staticmethod
def _proto2object(
proto: RunFunctionOrConstructorAction_PB,
) -> "RunFunctionOrConstructorAction":
"""Creates a ObjectWithID from a protobuf
As a requirement of all objects which inherit from Serializable,
this method transforms a protobuf object into an instance of this class.
:return: returns an instance of RunFunctionOrConstructorAction
:rtype: RunFunctionOrConstructorAction
.. note::
This method is purely an internal method. Please use deserialize()
if you wish to deserialize an object.
"""
return RunFunctionOrConstructorAction(
path=proto.path,
args=tuple(sy.deserialize(blob=x, from_bytes=True) for x in proto.args),
kwargs={
k: sy.deserialize(blob=v, from_bytes=True)
for k, v in proto.kwargs.items()
},
id_at_location=sy.deserialize(blob=proto.id_at_location),
address=sy.deserialize(blob=proto.address),
msg_id=sy.deserialize(blob=proto.msg_id),
)
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
"""Return the type of protobuf object which stores a class of this type
As a part of serialization and deserialization, we need the ability to
lookup the protobuf object type directly from the object type. This
static method allows us to do this.
Importantly, this method is also used to create the reverse lookup ability within
the metaclass of Serializable. In the metaclass, it calls this method and then
it takes whatever type is returned from this method and adds an attribute to it
with the type of this class attached to it. See the MetaSerializable class for details.
:return: the type of protobuf object which corresponds to this class.
:rtype: GeneratedProtocolMessageType
"""
return RunFunctionOrConstructorAction_PB
def remap_input(self, current_input: Any, new_input: Any) -> None:
"""Redefines some of the arguments of the function"""
for i, arg in enumerate(self.args):
if arg.id_at_location == current_input.id_at_location:
self.args[i] = new_input
for k, v in self.kwargs.items():
if v.id_at_location == current_input.id_at_location:
self.kwargs[k] = new_input
| [
2,
14367,
8019,
198,
6738,
19720,
1330,
4377,
198,
6738,
19720,
1330,
360,
713,
198,
6738,
19720,
1330,
7343,
198,
6738,
19720,
1330,
32233,
198,
6738,
19720,
1330,
309,
29291,
198,
6738,
19720,
1330,
4479,
198,
198,
2,
2368,
2151,
198,... | 2.742782 | 1,905 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
####################################################################################################################################################################################################################################
######################################################################################################## PRE-DEFINED IMPORTS #######################################################################################################
####################################################################################################################################################################################################################################
# Imports that are necessary for the program architecture to work properly
# Do not edit this code
import ast
import sys
import os
####################################################################################################################################################################################################################################
########################################################################################################### YOUR IMPORTS ###########################################################################################################
####################################################################################################################################################################################################################################
# [YOUR CODE HERE]
from queue import Queue
####################################################################################################################################################################################################################################
####################################################################################################### PRE-DEFINED CONSTANTS ######################################################################################################
####################################################################################################################################################################################################################################
# Possible characters to send to the maze application
# Any other will be ignored
# Do not edit this code
UP = 'U'
DOWN = 'D'
LEFT = 'L'
RIGHT = 'R'
####################################################################################################################################################################################################################################
# Name of your team
# It will be displayed in the maze
# You have to edit this code
TEAM_NAME = "Your name here"
####################################################################################################################################################################################################################################
########################################################################################################## YOUR CONSTANTS ##########################################################################################################
####################################################################################################################################################################################################################################
####################################################################################################################################################################################################################################
########################################################################################################## YOUR VARIABLES ##########################################################################################################
####################################################################################################################################################################################################################################
route = []
####################################################################################################################################################################################################################################
####################################################################################################### PRE-DEFINED FUNCTIONS ######################################################################################################
####################################################################################################################################################################################################################################
# Writes a message to the shell
# Use for debugging your program
# Channels stdout and stdin are captured to enable communication with the maze
# Do not edit this code
####################################################################################################################################################################################################################################
# Reads one line of information sent by the maze application
# This function is blocking, and will wait for a line to terminate
# The received information is automatically converted to the correct type
# Do not edit this code
####################################################################################################################################################################################################################################
# Sends the text to the maze application
# Do not edit this code
####################################################################################################################################################################################################################################
# Reads the initial maze information
# The function processes the text and returns the associated variables
# The dimensions of the maze are positive integers
# Maze map is a dictionary associating to a location its adjacent locations and the associated weights
# The preparation time gives the time during which 'initializationCode' can make computations before the game starts
# The turn time gives the time during which 'determineNextMove' can make computations before returning a decision
# Player locations are tuples (line, column)
# Coins are given as a list of locations where they appear
# A boolean indicates if the game is over
# Do not edit this code
####################################################################################################################################################################################################################################
# Reads the information after each player moved
# The maze map and allowed times are no longer provided since they do not change
# Do not edit this code
####################################################################################################################################################################################################################################
########################################################################################################## YOUR FUNCTIONS ##########################################################################################################
####################################################################################################################################################################################################################################
def search(mazeMap, start, search_method="width"):
""" Return the routing table from the origin location to all the other locations as a dictionary"""
isStack = True
if search_method == "width":
isStack = False
waiting = StackQueue(isStack)
visited = []
routing = {}
waiting.append(start)
while not len(waiting) == 0:
current_node = waiting.pop()
visited.append(current_node)
for neighbour in mazeMap[current_node]:
if neighbour[0] not in visited:
routing[neighbour[0]] = current_node
waiting.append(neighbour[0])
return routing
def way(routing, start, end):
"""Return the route from the start to the end as a list"""
route = []
current_node = end
while current_node != start:
route.insert(0, current_node)
current_node = routing[current_node]
return route
def direction(old, new):
""" Return the direction to move from the old location to the new location"""
if new[0] - old[0] < 0:
return UP
if new[0] - old[0] > 0:
return DOWN
if new[1] - old[1] < 0:
return LEFT
return RIGHT
####################################################################################################################################################################################################################################
# This is where you should write your code to do things during the initialization delay
# This function should not return anything, but should be used for a short preprocessing
# This function takes as parameters the dimensions and map of the maze, the time it is allowed for computing, the players locations in the maze and the remaining coins locations
# Make sure to have a safety margin for the time to include processing times (communication etc.)
####################################################################################################################################################################################################################################
# This is where you should write your code to determine the next direction
# This function should return one of the directions defined in the CONSTANTS section
# This function takes as parameters the dimensions and map of the maze, the time it is allowed for computing, the players locations in the maze and the remaining coins locations
# Make sure to have a safety margin for the time to include processing times (communication etc.)
####################################################################################################################################################################################################################################
############################################################################################################# MAIN LOOP ############################################################################################################
####################################################################################################################################################################################################################################
# This is the entry point when executing this file
# We first send the name of the team to the maze
# The first message we receive from the maze includes its dimensions and map, the times allowed to the various steps, and the players and coins locations
# Then, at every loop iteration, we get the maze status and determine a move
# Do not edit this code
if __name__ == "__main__" :
# We send the team name
writeToPipe(TEAM_NAME + "\n")
# We process the initial information and have a delay to compute things using it
(mazeWidth, mazeHeight, mazeMap, preparationTime, turnTime, playerLocation, opponentLocation, coins, gameIsOver) = processInitialInformation()
initializationCode(mazeWidth, mazeHeight, mazeMap, preparationTime, playerLocation, opponentLocation, coins)
# We decide how to move and wait for the next step
while not gameIsOver :
(playerLocation, opponentLocation, coins, gameIsOver) = processNextInformation()
if gameIsOver :
break
nextMove = determineNextMove(mazeWidth, mazeHeight, mazeMap, turnTime, playerLocation, opponentLocation, coins)
writeToPipe(nextMove)
####################################################################################################################################################################################################################################
####################################################################################################################################################################################################################################
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
29113,
29113,
29113,
29113,
29113,
29113,
29113,
4242,
198,
29113,
29113,
29113,
7804,
22814,
12,
7206,
20032,
1961,
... | 6.862275 | 1,837 |
import os
import json
config = {
"secret": "",
"uuid": "",
"endpointURL": ""
}
def is_file_empty(file_path):
""" Check if file is empty by confirming if its size is 0 bytes"""
# Check if file exist and it is empty
return os.path.exists(file_path) and os.stat(file_path).st_size == 0 and os.path.getsize(file_path) == 0
| [
11748,
28686,
198,
11748,
33918,
198,
198,
11250,
796,
1391,
198,
220,
220,
220,
366,
21078,
1298,
366,
1600,
198,
220,
220,
220,
366,
12303,
312,
1298,
366,
1600,
198,
220,
220,
220,
366,
437,
4122,
21886,
1298,
13538,
198,
92,
628,
... | 2.631579 | 133 |
import copy
from pathlib import Path
import tempfile
from allennlp.common.testing import ModelTestCase
from allennlp.common.params import Params
from allennlp.data import Batch
from allennlp.data.vocabulary import Vocabulary
from allennlp.models import Model
from allennlp.nn import InitializerApplicator, Initializer
from flaky import flaky
from allennlp.common.checks import ConfigurationError
import pytest
import numpy
import target_extraction
from .util import loss_weights
| [
11748,
4866,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
20218,
7753,
198,
198,
6738,
477,
1697,
34431,
13,
11321,
13,
33407,
1330,
9104,
14402,
20448,
198,
6738,
477,
1697,
34431,
13,
11321,
13,
37266,
1330,
2547,
4105,
198,
6738,
... | 3.674242 | 132 |
import datetime
import enum
import json
import os
from pathlib import Path, PureWindowsPath
class SubfileMissingError(Exception):
"""Subfile is Missing Exception."""
@enum.unique
@enum.unique
if __name__ == '__main__':
create_json_for_parts('ldraw_studcount.json')
| [
11748,
4818,
8079,
198,
11748,
33829,
198,
11748,
33918,
198,
11748,
28686,
198,
198,
6738,
3108,
8019,
1330,
10644,
11,
17129,
11209,
15235,
628,
198,
4871,
3834,
7753,
43730,
12331,
7,
16922,
2599,
198,
220,
220,
220,
37227,
7004,
7753,... | 3.052083 | 96 |
from marshmallow import fields, Schema
| [
6738,
22397,
42725,
1330,
7032,
11,
10011,
2611,
628,
628
] | 4.2 | 10 |
import frappe
from frappe.utils import today
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist() | [
11748,
5306,
27768,
198,
6738,
5306,
27768,
13,
26791,
1330,
1909,
628,
198,
31,
69,
430,
27768,
13,
1929,
270,
46331,
3419,
198,
198,
31,
69,
430,
27768,
13,
1929,
270,
46331,
3419,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198,... | 2.082474 | 97 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
"""Парсер курса доллара и евро за текущую дату от сайта центробанка России."""
if __name__ == '__main__':
from datetime import date
date_req = date.today().strftime('%d.%m.%Y')
url = 'https://www.cbr.ru/currency_base/daily.aspx?date_req=' + date_req
# pip install robobrowser
from robobrowser import RoboBrowser
browser = RoboBrowser(
user_agent='Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0',
parser='lxml'
)
browser.open(url)
if not browser.response.ok:
print(browser.response.status_code, browser.response.reason)
quit()
for tr in browser.select('.data tr'):
td_list = tr.select('td')
if not td_list:
continue
if td_list[1].text in ['USD', 'EUR']:
print(td_list[1].text, td_list[4].text)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
9800,
834,
796,
705,
541,
21879,
1077,
6,
628,
198,
37811,
140,
253,
16142,
21169,
21727,
16843,
21169,
... | 1.978448 | 464 |
# -*- coding: utf-8 -*-
import os
import re
from sphinx_testing import with_app
import unittest
CR = '\r?\n'
rackdiag_fontpath = '/usr/share/fonts/truetype/ipafont/ipagp.ttf'
with_png_app = with_app(srcdir='tests/docs/rackdiag',
buildername='latex',
write_docstring=True,
confoverrides={
'latex_documents': [('index', 'test.tex', '', 'test', 'manual')],
})
with_pdf_app = with_app(srcdir='tests/docs/rackdiag',
buildername='latex',
write_docstring=True,
confoverrides={
'latex_documents': [('index', 'test.tex', '', 'test', 'manual')],
'rackdiag_latex_image_format': 'PDF',
'rackdiag_fontpath': rackdiag_fontpath,
})
with_oldpdf_app = with_app(srcdir='tests/docs/rackdiag',
buildername='latex',
write_docstring=True,
confoverrides={
'latex_documents': [('index', 'test.tex', '', 'test', 'manual')],
'rackdiag_tex_image_format': 'PDF',
'rackdiag_fontpath': rackdiag_fontpath,
})
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
11748,
302,
198,
6738,
599,
20079,
87,
62,
33407,
1330,
351,
62,
1324,
198,
198,
11748,
555,
715,
395,
198,
198,
9419,
796,
705,
59,
81,
30,
... | 1.636792 | 848 |
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from enum import Enum, unique, auto
from tf_nndct.graph import OpTypes
from tf_nndct.graph import base_op
from tf_nndct.graph import dtypes
from tf_nndct.graph import ops
class TFGeneric(ops.Operation):
"""A generic op that can represent any keras layer."""
@unique
#TODO(yuwang): Use _define_attr to define attr.
| [
2,
15069,
13130,
1395,
346,
28413,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2... | 3.506944 | 288 |
import os
import numpy as np
from scipy import fftpack
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.wcs import WCS
from astropy.stats import SigmaClip
from astropy.stats import gaussian_fwhm_to_sigma
from astropy.convolution import Gaussian2DKernel
from astropy.coordinates import SkyCoord
from astropy.wcs.utils import skycoord_to_pixel
from astropy.wcs.utils import proj_plane_pixel_scales
from astropy import units as u
from photutils import source_properties
from photutils import detect_sources
from photutils import Background2D, MedianBackground
import huntsman_dust.util_plot as util_plot
def image_load(image_path):
"""Returns image, header and wcs objects.
Args:
image_path(str, required): Image path to particular FITs. File
Returns:
image(array): This is the image data
header(table): This is the header object
wcs: World Coordinte System object
"""
hdulist = fits.open(image_path)
image = hdulist[0].data
header = hdulist[0].header
wcs = WCS(header)
return image, header, wcs
def background_2D(image,
sigma,
iters,
box_size,
filter_size,
plt_grid):
"""2D background estimation.
This function creates a 2D background estimate by dividing the image into
a grid, defined by box_size.
Args:
image(array, required): This is the image data
sigma(float, required): Sigma level
iters(int, required): Number of iterations
box_size(int, required): Defines the box dimesions, in pixels
filter_size(int, required): Defines the filter reach in pixels
plt_grid(boolean): Overplot grid on image
Returns:
bkg(array): 2D background level
bkgrms(array): RMS background
"""
sigma_clip = SigmaClip(sigma=sigma,
iters=iters)
mask = (image == 0)
bkg_estimator = MedianBackground()
bkg = Background2D(image,
box_size=box_size,
filter_size=filter_size,
sigma_clip=sigma_clip,
bkg_estimator=bkg_estimator,
mask=mask,
edge_method=u'pad')
# print('Background Median: ' + str(bkg.background_median))
# print('Background RMS median: ' + str(bkg.background_rms_median))
if plt_grid is True:
plt.imshow(bkg.background,
origin='lower',
cmap='Greys')
bkg.plot_meshes(outlines=True,
color='#1f77b4')
bkgrms = bkg.background_rms
return bkg, bkgrms
def find_objects(image,
threshold,
FWHM,
npixels):
"""Find sources in image by a segmentation process.
This function detects sources a given sigma above a threshold,
only if it has more that npixels that are interconnected.
Args:
image(array, required): This is the image data
threshold(array, required): This is the threshold above which
detection occurs
FWHM(int, required): Full Width Half Maximum of 2D circular
gaussian kernel used to filter the
image prior to thresholding. Input is
in terms of pixels.
npixels(int, required): The minimum number of pixels to define
a sources
Returns:
segm: The segmentation image
"""
sigma = FWHM * gaussian_fwhm_to_sigma
kernel = Gaussian2DKernel(sigma,
x_size=3,
y_size=3)
kernel.normalize()
segm = detect_sources(image,
threshold,
npixels=npixels,
filter_kernel=kernel)
return segm
def ds9_region(image_path,
image,
segm,
wcs,
ds9_region):
""""Creates ds9 region file.
This function creates a ds9 region file to display the sources
detected by the segmentation function. This file is written to
the same directory the fits files are in.
Args:
image_path(str, required): Image path to particular FITs. File
image(array, required): This is the image data
segm: The segmentation image
wcs: World Coordinte System object
ds9_region(boolean, opt): If true, creates region file
"""
if ds9_region is True:
data_path = os.path.splitext(image_path)
region_path = str(data_path[0]) + '_ds9region'
scale = proj_plane_pixel_scales(wcs)
image_scale = scale[0]
reg = source_properties(image, segm, wcs=wcs)
with open(region_path+'.reg', 'w') as f:
f.write('# Region file format: DS9 version 7.6\n\n')
f.write('global color=#ff7733\n')
f.write('global width=2\n')
f.write('fk5\n\n')
for i in range(0, len(reg.id)):
x = reg[i].sky_centroid_icrs.ra.to(u.deg)
y = reg[i].sky_centroid_icrs.dec
r = image_scale*reg[i].equivalent_radius
f.write('circle('+str(x.value)+','+str(y.value)+',' +
str(r.value)+')'+' # Source Number:' +
str(reg[i].id)+'\n')
def mask_galaxy(image,
wcs,
Ra,
Dec,
name,
radius):
"""Masks galaxy at Ra, Dec within a radius given in arcminutes
Creates a circular mask centered at a given Ra, Dec. The radius
is given in arcmins. The wcs object is used to convert these inputs
to pixel locations. A pixel scale is also determined. If the object
name is suppled, SESAME is used to find object center. If no active
internet connection is available, center location must be manually
entered, in degrees. If no center coordinates are supplied, (0, 0)
is the default center.
Args:
image(array, required): Image data
wcs: World Coordinte System object
name(str, optional): Name of galaxy or object
Ra(str): Right Ascention
Dec(str): Declination
Radius(float, required): Radius to be masked, in arcminutes
Returns:
masked_img(array): Image which has been masked
mask(boolean array): Mask of the given object"""
# Radius must be given in arcminutes
# Dimentions of the image
dim = (image.shape)
y, x = dim[0], dim[1]
# Finds the center of an object by inputting its name into SESAME
# This requires an active internet connection
# a, b are the coordinates of the center given in pixels
try:
center = SkyCoord.from_name(name)
except Exception:
print("No active internet connection. Manually enter Ra, Dec.")
Ra = Ra
Dec = Dec
center = SkyCoord(Ra, Dec, unit="deg")
c_pix = skycoord_to_pixel(center, wcs)
a, b = c_pix[0], c_pix[1]
print(center)
radius = radius*u.arcmin
# Finds pixel scale using WSC object. The default units can be found by
# unit = header['CUNIT1'], they are degrees by convention
# degrees are converted to arcmins and radius in computed in pixels
scale = proj_plane_pixel_scales(wcs)
pix_scale = scale[0]*u.deg.to(u.arcmin)
print('Image Scale: ' + str(pix_scale)+' arcmin/pix')
rad_pix = (radius/pix_scale).value
# Indexes each pixel and checks if its is >= radius from center
Y, X = np.ogrid[:y, :x]
dist_from_center = np.sqrt((X - a)**2 + (Y - b)**2)
mask = dist_from_center <= rad_pix
return mask
def plt_fits(image,
wcs,
figure,
title,
cmap,
norm):
"""Plots FITs images with axis given in Ra, Dec.
Args:
image(array): Image data
wcs: World Coordinte System object
figure(optional): Figure Number
title(str, optional): Title of the figure
cmap(str, optiona): Color map
norm: Image normalizatuion
"""
util_plot.util_plot()
fig = plt.figure(num=figure)
ax = fig.add_subplot(1, 1, 1, projection=wcs)
ax.imshow(image, origin='lower', cmap=cmap, norm=norm)
ax.coords[0].set_axislabel('RA')
ax.coords[1].set_axislabel('DEC')
ax.set_title(title)
def plt_image(image,
figure,
title,
xlabel,
ylabel,
cmap,
norm):
"""Plots FITs images with axis given in Ra, Dec.
Args:
image(array): Image data
wcs: World Coordinte System object
figure(optional): Figure Number
title(str, optional): Title of the figure
cmap(str, optiona): Color map
norm: Image normalizatuion
"""
util_plot.util_plot()
plt.figure(num=figure)
plt.imshow(image, origin='lower', cmap=cmap, norm=norm)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
def fits_write(image, header, img_path, name=None):
"""Writes an 2D data array to a fits file.
Writes a 2D array to a fits file in the same directory as the oringinal
image. It appends the image header to this new fits file.
Args:
image(array): The image data to be written to a fits file
header(hdu.header): The header information to be appended
img_path(str): Path to source file
name(str): Name of new fits file. Ex: mask.fits
"""
hdu = fits.PrimaryHDU()
hdu.data = image.astype(float)
hdu.header = header
data_path, file = os.path.split(img_path)
file_path = os.path.join(data_path, name + "."+'fits')
hdu.writeto(file_path, overwrite=True)
def azimuthalAverage(image, center=None):
"""
Calculate the azimuthally averaged radial profile.
image - The 2D image
center - The [x,y] pixel coordinates used as the center. The default is
None, which then uses the center of the image (including
fracitonal pixels).
Contributed by Jessica R. Lu
"""
# Calculate the indices from the image
y, x = np.indices(image.shape)
if not center:
center = np.array([(y.max()-y.min())/2.0, (x.max()-x.min())/2.0])
r = np.hypot(x - center[1], y - center[0])
# Get sorted radii
ind = np.argsort(r.flat)
r_sorted = r.flat[ind]
i_sorted = image.flat[ind]
# Get the integer part of the radii (bin size = 1)
r_int = r_sorted.astype(int)
# Find all pixels that fall within each radial bin.
deltar = r_int[1:] - r_int[:-1] # Assumes all radii represented
rind = np.where(deltar)[0] # location of changed radius
nr = rind[1:] - rind[:-1] # number of radius bin
# Cumulative sum to figure out sums for each radius bin
csim = np.cumsum(i_sorted, dtype=float)
tbin = csim[rind[1:]] - csim[rind[:-1]]
radial_prof = tbin / nr
return radial_prof
def p_spec(image):
"""Performs 2D FFT on image and averages radially."""
image = image.astype(float)
psd2D = np.abs(fftpack.fftshift(fftpack.fft2(image)))**2
psd1D = azimuthalAverage(psd2D)
return psd1D
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
1330,
277,
701,
8002,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
6468,
28338,
13,
952,
1330,
11414,
198,
6738,
6468,
28338,
13,
1... | 2.073749 | 5,817 |
from __future__ import annotations
import typing as t
if t.TYPE_CHECKING:
from .context import Context
from ..utils import maybe_coro
__all__ = ("Command",)
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
19720,
355,
256,
198,
198,
361,
256,
13,
25216,
62,
50084,
2751,
25,
198,
220,
220,
220,
422,
764,
22866,
1330,
30532,
198,
198,
6738,
11485,
26791,
1330,
3863,
62,
10215,
78,
198... | 3.150943 | 53 |
from runners.python import Submission
| [
6738,
19323,
13,
29412,
1330,
42641,
628
] | 5.571429 | 7 |
import settings
import handlers.base_handler
import csv
| [
11748,
6460,
198,
11748,
32847,
13,
8692,
62,
30281,
198,
11748,
269,
21370,
198
] | 4 | 14 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
@paddle.no_grad()
@paddle.no_grad()
| [
2,
15069,
357,
66,
8,
33448,
350,
37382,
47,
37382,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845... | 3.63388 | 183 |
from con_reader import CONreaderVM
from dicom_reader import DCMreaderVM
from utils import get_logger
from domain.patient_data import PatientData
import numpy as np
import pickle
import os
import sys
import cv2 as cv
logger = get_logger(__name__)
in_dir = sys.argv[1]
out_dir = sys.argv[2]
if not os.path.isdir(in_dir):
logger.error("Invalid input directory: {}".format(in_dir))
else:
patient_folders = sorted(os.listdir(in_dir))
for patient_folder in patient_folders:
create_pickle_for_patient(os.path.join(in_dir, patient_folder), out_dir)
| [
6738,
369,
62,
46862,
1330,
7102,
46862,
15996,
198,
6738,
288,
291,
296,
62,
46862,
1330,
6257,
44,
46862,
15996,
198,
6738,
3384,
4487,
1330,
651,
62,
6404,
1362,
198,
6738,
7386,
13,
26029,
62,
7890,
1330,
35550,
6601,
198,
11748,
... | 2.685714 | 210 |
from day_data import DayData
import datetime
# получить планы на указанный день недели - смещение от текущего
if __name__ == '__main__':
wd = WeekData()
print( wd.get_plan_day( 0 ) )
print( wd.get_plan_day( 1 ) )
print( wd.get_plan_day( 2 ) )
print( wd.get_plan_day( 3 ) )
print( wd.get_plan_day( 4 ) )
print( wd.get_plan_day( 5 ) )
print( wd.get_plan_day( 6 ) ) | [
6738,
1110,
62,
7890,
1330,
3596,
6601,
198,
11748,
4818,
8079,
628,
220,
220,
220,
1303,
12466,
123,
25443,
119,
35072,
141,
229,
18849,
20375,
45367,
12466,
123,
30143,
16142,
22177,
45035,
12466,
121,
16142,
220,
35072,
31583,
16142,
1... | 1.725322 | 233 |
import matplotlib.pyplot as plt
import numpy as np
import math
import os
np.set_printoptions(threshold=np.inf)
# prepare some coordinates
# [3, 4, 13, 14, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 154, 155, 156, 157, 158]
# 2 2 2
# /home/jianrenw/prox/tslam/data/local/agent/20210506/resetnormal/gtsample/voxelFalse_rwFalse_obj4_orienup_[-1.57, 0, 0]_[0, -0.14, 0.22]_[-1.57, 0, 0]_[0, -0.7, 0.17]/cf0_knn0_vr1_lstd0.5_knnk5_vconf['3d', 0, 0.01, False]_sensorFalse/agent/run_0/2dnewpointcloud/obj4_step_299.npz
# gtdata = np.load("/home/jianrenw/prox/tslam/data/local/agent/gt_pcloud/groundtruth_obj4.npz")['pcd']
# uniform_gt_data = np.load("/home/jianrenw/prox/tslam/test_o3d.npz")['pcd']
# uniform_gt_data = np.load("/home/jianrenw/prox/tslam/uniform_glass_o3d.npz")['pcd']
uniform_gt_data = np.load("/home/jianrenw/prox/tslam/uniform_donut_o3d.npz")['pcd']
# print(data['pcd'])
# obj4:0.0008 obj1:0.015 obj2:0.01
data_scale = uniform_gt_data * 0.01
data_rotate = data_scale.copy()
# x = data_rotate[:, 0].copy()
# y = data_rotate[:, 1].copy()
# z = data_rotate[:, 2].copy()
# data_rotate[:, 0] = x
# data_rotate[:, 1] = z
# data_rotate[:, 2] = -y
data_trans = data_rotate.copy()
data_trans[:, 0] += 0
data_trans[:, 1] -= 0.24
data_trans[:, 2] += 0.23
uniform_gt_data = data_trans.copy()
for root, dirs, files in os.walk("/home/jianrenw/prox/tslam/data/local/train_adroit/20210516/resetnormal/gtsample/"):
if "pointcloud_573.npz" in files and "obj2" in root:
print(root)
data = np.load(os.path.join(root, "pointcloud_597.npz"))['pcd']
# for step in [49]:#, 99, 149, 249, 299]:
# data = np.load("/home/jianrenw/prox/tslam/voxel/2dnewpointcloud/obj4_orien__step_{}.npz".format(step))['pcd']
resolution = 0.01
sep_x = math.ceil(0.25 / resolution)
sep_y = math.ceil(0.225 / resolution)
sep_z = math.ceil(0.1 / resolution)
x, y, z = np.indices((sep_x, sep_y, sep_z))
cube1 = (x<0) & (y <1) & (z<1)
gtcube = (x<0) & (y <1) & (z<1)
voxels = cube1
gt_voxels = gtcube
# draw cuboids in the top left and bottom right corners, and a link between them
map_list = []
for idx,val in enumerate(data):
idx_x = math.floor((val[0] + 0.125) / resolution)
idx_y = math.floor((val[1] + 0.25) / resolution)
idx_z = math.floor((val[2] - 0.16) / resolution)
if idx_z > 6:
continue
name = str(idx_x) + '_' + str(idx_y) + '_' + str(idx_z)
if name not in map_list:
map_list.append(name)
cube = (x < idx_x + 1) & (y < idx_y + 1) & (z < idx_z + 1) & (x >= idx_x) & (y >= idx_y) & (z >= idx_z)
# combine the objects into a single boolean array
voxels += cube
# draw gt
gt_map_list = []
for idx,val in enumerate(uniform_gt_data):
idx_x = math.floor((val[0] + 0.125) / resolution)
idx_y = math.floor((val[1] + 0.25) / resolution)
idx_z = math.floor((val[2] - 0.16) / resolution)
if idx_z > 6:
continue
name = str(idx_x) + '_' + str(idx_y) + '_' + str(idx_z)
if name not in gt_map_list:
gt_map_list.append(name)
cube = (x < idx_x + 1) & (y < idx_y + 1) & (z < idx_z + 1) & (x >= idx_x) & (y >= idx_y) & (z >= idx_z)
# combine the objects into a single boolean array
gt_voxels += cube
# gt_obj4:668
print(len(map_list) / len(gt_map_list))
# print(len(map_list) / sep_x / sep_y / sep_z )
obj_name = "donut"
# set the colors of each object
vis_voxel = gt_voxels | voxels
colors = np.empty(vis_voxel.shape, dtype=object)
colors[gt_voxels] = 'white'
colors[voxels] = 'cyan'
# and plot everything
ax = plt.figure().add_subplot(projection='3d')
ax.set_zlim(1,20)
ax.voxels(vis_voxel, facecolors=colors, edgecolor='g', alpha=.4, linewidth=.05)
# plt.savefig('uniform_gtbox_{}.png'.format(step))
plt.savefig('{}-overlap.png'.format(obj_name))
plt.close()
ax = plt.figure().add_subplot(projection='3d')
ax.set_zlim(1,20)
ax.voxels(gt_voxels, facecolors=colors, edgecolor='g', alpha=.4, linewidth=.05)
# plt.savefig('uniform_gtbox_{}.png'.format(step))
plt.savefig('{}-gt.png'.format(obj_name))
plt.close()
ax = plt.figure().add_subplot(projection='3d')
ax.set_zlim(1,20)
ax.voxels(voxels, facecolors=colors, edgecolor='g', alpha=.4, linewidth=.05)
# plt.savefig('uniform_gtbox_{}.png'.format(step))
plt.savefig('{}-exp.png'.format(obj_name))
plt.close() | [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
11748,
28686,
198,
198,
37659,
13,
2617,
62,
4798,
25811,
7,
400,
10126,
28,
37659,
13,
10745,
8,
198,
198,
2,
8335,
... | 2.11996 | 2,009 |
from PIL import Image,ImageDraw,ImageFont
import os
grayscale=[]
unicodeblocks = [(32,126),(174,846),(910,1366),(1421,1479),(5761,5788),(6656,6678),(7680,7957),(8448,8587),(9312,9472),(9696,10495),(10854,10956),(12032,12245),(11200,11208)]
for block in unicodeblocks:
for i in range(block[0],block[1]+1):
total=0
img = Image.new('L', (7,13), (255))
fnt=ImageFont.truetype("/Library/Fonts/Courier New.ttf", 11)
d = ImageDraw.Draw(img)
d.text((0, 0), chr(i),fill=(0),font=fnt)
for color in img.getdata():
total+=color
grayscale.append((total/91,chr(i)))
grayscale.sort(key=lambda x:x[0])
smol=grayscale[0][0]
dif=grayscale[-1][0]-smol
grey=[None]*12751
for a in range(0,12751):
grey[a]=grayscale[min(range(len(grayscale)), key=lambda i: abs(((grayscale[i][0]-smol)/dif)*255-a/50))][1]
fpath = os.path.dirname(os.path.abspath(__file__))
with open(fpath+"/char.txt", "wb") as file:
file.write(bytes(chr(166).join(list(x for x in grey)), 'UTF-8'))
| [
6738,
350,
4146,
1330,
7412,
11,
5159,
25302,
11,
5159,
23252,
198,
11748,
28686,
198,
198,
2164,
592,
38765,
28,
21737,
198,
46903,
1098,
27372,
796,
47527,
2624,
11,
19420,
828,
7,
22985,
11,
23,
3510,
828,
7,
43234,
11,
1485,
2791,... | 2.035573 | 506 |
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from pkgutil import get_data
from finn.core.datatype import DataType
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.fold_constants import FoldConstants
from finn.transformation.general import GiveReadableTensorNames, GiveUniqueNodeNames
from finn.transformation.infer_datatypes import InferDataTypes
from finn.transformation.infer_shapes import InferShapes
| [
2,
15069,
357,
66,
8,
12131,
11,
1395,
346,
28413,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
2,
17613,
11,
389,
10431,
2810,
326,
262,
1708,
3... | 3.56367 | 534 |
# -*- coding: utf-8 -*-
"""
Herman Sanghera
November 2, 2019
CodingBat Solutions (Python)
This is a python file containing my own solutions to the
different Python List-1 exercises on codingbat.com
"""
"""
List-1 > first_last6:
Given an array of ints, return True if 6 appears as
either the first or last element in the array. The array
will be length 1 or more.
"""
"""
List-1 > same_first_last
Given an array of ints, return True if the array is length
1 or more, and the first element and the last element are equal.
"""
"""
List-1 > make_pi
Return an int array length 3 containing the first 3
digits of pi, {3, 1, 4}.
"""
"""
List-1 > common_end
Given 2 arrays of ints, a and b, return True if they have the
same first element or they have the same last element. Both
arrays will be length 1 or more.
"""
"""
List-1 > Sum3
Given an array of ints length 3, return the sum of all the elements.
"""
"""
List-1 > rotate_left3
Given an array of ints length 3, return an array with the
elements "rotated left" so {1, 2, 3} yields {2, 3, 1}.
"""
"""
List-1 > reverse3
Given an array of ints length 3, return a new array with the
elements in reverse order, so {1, 2, 3} becomes {3, 2, 1}.
"""
"""
List-1 > max_end3
Given an array of ints length 3, figure out which is larger,
the first or last element in the array, and set all the other
elements to be that value. Return the changed array.
"""
"""
List-1 > sum2
Given an array of ints, return the sum of the first 2 elements
in the array. If the array length is less than 2, just sum up
the elements that exist, returning 0 if the array is length 0.
"""
"""
List-1 > middle_way
Given 2 int arrays, a and b, each length 3, return a new array
length 2 containing their middle elements.
"""
"""
List-1 > make_ends
Given an array of ints, return a new array length 2 containing
the first and last elements from the original array. The original
array will be length 1 or more.
"""
"""
List-1 > has23
Given an int array length 2, return True if it contains a 2 or a 3.
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
39,
2224,
30043,
372,
64,
201,
198,
21159,
362,
11,
13130,
201,
198,
34,
7656,
24541,
23555,
357,
37906,
8,
201,
198,
201,
198,
1212,
318,
257,
... | 2.910326 | 736 |
# -*- coding: utf-8 -*-
from app import (ChocolateApp, DatabaseNotEnabledError,
InvalidAuthSettingsError, InvalidPyDALParameterError)
from server import ChocolateServer, NonChocolateAppError
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
598,
1330,
357,
1925,
9140,
4677,
11,
24047,
3673,
20491,
12331,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.985714 | 70 |
import os
| [
11748,
28686,
628,
220,
220,
220,
220
] | 2.142857 | 7 |
import shutil
import simplejson
import sys
import time
import traceback
from django.core.management.base import BaseCommand
from src.settings import *
from src.util.stats import *
| [
11748,
4423,
346,
198,
11748,
2829,
17752,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
12854,
1891,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
198,
6738,
12351,
13,
33692,
1330,
1635,
198,
6... | 3.68 | 50 |
#!/usr/bin/env python3.8
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
"""Lex Bot Conversation Runner"""
import boto3
from botocore.config import Config as BotoCoreConfig
CLIENT_CONFIG = BotoCoreConfig(
retries={"mode": "adaptive", "max_attempts": 5},
)
CLIENT = boto3.client("lexv2-runtime", config=CLIENT_CONFIG)
def run_conversation_test(bot_args, conversation, session_id="test"):
"""Runs Lex Conversation Test"""
responses = []
for interaction in conversation:
api_function = getattr(CLIENT, interaction["operation"])
response = api_function(**bot_args, sessionId=session_id, **interaction["args"])
responses.append(response)
return responses
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
13,
23,
198,
2,
15069,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
12,
15,
198,
37811,
45117,
... | 2.92607 | 257 |
import torch
import torch.nn as nn
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
from detectron2.modeling.backbone.fpn import FPN, LastLevelP6P7, LastLevelMaxPool
from detectron2.layers import ShapeSpec
__all__ = [
"ResT",
"build_rest_backbone",
"build_rest_fpn_backbone",
"build_retinanet_rest_fpn_backbone"]
class PatchEmbed(nn.Module):
""" Image to Patch Embedding"""
@BACKBONE_REGISTRY.register()
def build_rest_backbone(cfg, input_shape):
"""
Create a ResT instance from config.
Returns:
ResT: a :class:`ResT` instance.
"""
name = cfg.MODEL.REST.NAME
out_features = cfg.MODEL.REST.OUT_FEATURES
depths = {"rest_lite": [2, 2, 2, 2], "rest_small": [2, 2, 6, 2],
"rest_base": [2, 2, 6, 2], "rest_large": [2, 2, 18, 2]}[name]
embed_dims = {"rest_lite": [64, 128, 256, 512], "rest_small": [64, 128, 256, 512],
"rest_base": [96, 192, 384, 768], "rest_large": [96, 192, 384, 768]}[name]
drop_path_rate = {"rest_lite": 0.1, "rest_small": 0.1, "rest_base": 0.2, "rest_large": 0.2}[name]
feature_names = ['stage1', 'stage2', 'stage3', 'stage4']
out_feature_channels = dict(zip(feature_names, embed_dims))
out_feature_strides = {"stage1": 4, "stage2": 8, "stage3": 16, "stage4": 32}
model = ResT(cfg, in_ch=3, embed_dims=embed_dims, qkv_bias=True, drop_path_rate=drop_path_rate,
depths=depths, apply_transform=True, out_features=out_features)
model._out_feature_channels = out_feature_channels
model._out_feature_strides = out_feature_strides
return model
@BACKBONE_REGISTRY.register()
def build_rest_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_rest_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_retinanet_rest_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_rest_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
in_channels_p6p7 = bottom_up.output_shape()["stage4"].channels
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelP6P7(in_channels_p6p7, out_channels, in_feature="stage4"),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone | [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
198,
6738,
4628,
76,
13,
27530,
13,
75,
6962,
1330,
14258,
15235,
11,
284,
62,
17,
83,
29291,
11,
40122,
62,
11265,
62,
198,
6738,
4886,
1313,
17,
13,
4666,
10809,
13,
... | 2.270774 | 1,396 |
# encoding: utf-8
# Copyright 2009 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
#
# Run EDRN Public Portal tests
import subprocess, os, os.path, sys, ConfigParser
_executable = os.path.join('bin', 'instance-debug')
_logDir = os.path.join('var', 'testlogs')
def forciblyClose(f):
'''Force file-like object ``f`` to close, ignoring any failure of if it's None.'''
try:
f.close()
except (IOError, AttributeError):
pass
def runTests(package):
'''Run tests in the named package.'''
outlog = errlog = None
try:
outlog = open(os.path.join(_logDir, '%s.out.log' % package), 'w')
errlog = open(os.path.join(_logDir, '%s.err.log' % package), 'w')
proc = subprocess.Popen(
('instance-debug', 'test', '-s', package),
executable=_executable,
stdout=outlog,
stderr=errlog
)
return proc.wait() == 0
finally:
forciblyClose(outlog)
forciblyClose(errlog)
def main():
'''Run all the tests.'''
if not os.path.isdir(_logDir):
os.makedirs(_logDir)
success = True
parser = ConfigParser.SafeConfigParser()
parser.read(['sources.cfg'])
for package in [i[0] for i in parser.items('sources')]:
sys.stderr.write('Running tests in "%s" ... ' % package)
sys.stderr.flush()
rc = runTests(package)
success &= rc
sys.stderr.write('%s\n' % (rc and 'pass' or 'FAIL'))
sys.stderr.flush()
sys.exit(success and 0 or 1)
if __name__ == '__main__':
main() | [
2,
21004,
25,
3384,
69,
12,
23,
198,
2,
15069,
3717,
3442,
5136,
286,
8987,
13,
11096,
371,
34874,
198,
2,
15731,
1137,
53,
1961,
13,
471,
13,
50,
13,
5070,
18972,
11094,
10810,
13,
198,
2,
198,
2,
5660,
412,
7707,
45,
5094,
256... | 2.299572 | 701 |
from .sqlstore import SQLTable, SQLView
K_STR=1 # a single string value
K_INT=2 # a numeric field
K_CSV=3 #
class Settings(object):
"""Application Settings backed by a database
provides a basic key/value store for Strings, Integers, and List-Of-Strings
"""
__instance = None
@staticmethod
@staticmethod
def getMulti(self,*keys):
"""
returns a dictionary
"""
out = {}
with self.sqlstore.conn:
c = self.sqlstore.conn.cursor()
for key in keys:
out[key] = self._get_main(c,key)
return out
def setDefault(self,key,value):
""" set the value iff it does not exist"""
with self.sqlstore.conn:
c = self.sqlstore.conn.cursor()
self._set_main(c,key,value,False)
def setMulti(self,data,overwrite=True):
""" overwrite, if false and key exists, value will not be updated """
with self.sqlstore.conn:
c = self.sqlstore.conn.cursor()
for key,value in data.items():
self._set_main(c,key,value,overwrite)
def keys(self):
""" generator function returns all settings keys """
with self.sqlstore.conn:
c = self.sqlstore.conn.cursor()
c.execute("SELECT key FROM settings")
results = c.fetchmany()
while results:
for item in results:
yield item[0]
results = c.fetchmany()
def items(self):
""" generator function returns all settings keys, values """
with self.sqlstore.conn:
c = self.sqlstore.conn.cursor()
c2 = self.sqlstore.conn.cursor()
c.execute("SELECT uid,key,kind FROM settings")
results = c.fetchmany()
while results:
for item in results:
uid,key,kind = item
value = None
if kind == K_STR:
value = self._get(c2,"setstr",uid)
elif kind == K_INT:
value = self._get(c2,"setint",uid)
elif kind == K_CSV:
value = self._get_list(c2,uid)
yield (key,value)
results = c.fetchmany()
| [
198,
198,
6738,
764,
25410,
8095,
1330,
16363,
10962,
11,
16363,
7680,
198,
198,
42,
62,
18601,
28,
16,
1303,
257,
2060,
4731,
1988,
198,
42,
62,
12394,
28,
17,
1303,
257,
35575,
2214,
198,
42,
62,
7902,
53,
28,
18,
1303,
198,
198... | 2.000864 | 1,158 |
from discord.ext import commands
| [
6738,
36446,
13,
2302,
1330,
9729,
198
] | 4.714286 | 7 |
from Ensemble.Ensemble import Ensemble
from log import logger
class BottomTrack:
"""
Ensemble Data DataSet.
Integer values that give details about the ensemble.
"""
def decode(self, data):
"""
Take the data bytearray. Decode the data to populate
the velocities.
:param data: Bytearray for the dataset.
"""
packet_pointer = Ensemble.GetBaseDataSize(self.name_len)
self.FirstPingTime = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 0, Ensemble().BytesInFloat, data)
self.LastPingTime = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 1, Ensemble().BytesInFloat, data)
self.Heading = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 2, Ensemble().BytesInFloat, data)
self.Pitch = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 3, Ensemble().BytesInFloat, data)
self.Roll = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 4, Ensemble().BytesInFloat, data)
self.WaterTemp = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 5, Ensemble().BytesInFloat, data)
self.SystemTemp = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 6, Ensemble().BytesInFloat, data)
self.Salinity = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 7, Ensemble().BytesInFloat, data)
self.Pressure = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 8, Ensemble().BytesInFloat, data)
self.TransducerDepth = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 9, Ensemble().BytesInFloat, data)
self.SpeedOfSound = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 10, Ensemble().BytesInFloat, data)
self.Status = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 11, Ensemble().BytesInFloat, data)
self.NumBeams = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 12, Ensemble().BytesInFloat, data)
self.ActualPingCount = Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * 13, Ensemble().BytesInFloat, data)
index = 14
numBeam = int(self.NumBeams)
for beams in range(numBeam):
self.Range.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.SNR.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.Amplitude.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.Correlation.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.BeamVelocity.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.BeamGood.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.InstrumentVelocity.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.InstrumentGood.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.EarthVelocity.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.EarthGood.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
if self.num_elements > 54:
for beams in range(numBeam):
self.SNR_PulseCoherent.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.Amp_PulseCoherent.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.Vel_PulseCoherent.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.Noise_PulseCoherent.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
for beams in range(numBeam):
self.Corr_PulseCoherent.append(Ensemble.GetFloat(packet_pointer + Ensemble().BytesInFloat * index, Ensemble().BytesInFloat, data))
index += 1
else:
# Fill in with 0.0
for beams in range(numBeam):
self.SNR_PulseCoherent.append(0.0)
for beams in range(numBeam):
self.Amp_PulseCoherent.append(0.0)
for beams in range(numBeam):
self.Vel_PulseCoherent.append(0.0)
for beams in range(numBeam):
self.Noise_PulseCoherent.append(0.0)
for beams in range(numBeam):
self.Corr_PulseCoherent.append(0.0)
logger.debug(self.FirstPingTime)
logger.debug(self.LastPingTime)
logger.debug(self.Heading)
logger.debug(self.Pitch)
logger.debug(self.Roll)
logger.debug(self.Salinity)
logger.debug(self.SpeedOfSound)
logger.debug(self.EarthVelocity)
| [
6738,
2039,
15140,
13,
4834,
15140,
1330,
2039,
15140,
198,
6738,
2604,
1330,
49706,
628,
198,
4871,
24530,
24802,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
2039,
15140,
6060,
6060,
7248,
13,
198,
220,
220,
220,
34142,
3815,
... | 2.397158 | 2,533 |
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
# DBSCAN
# ----------------------------------------------------------------------------------------------------------------
# ================================================================================================================
import numpy as np
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
from itertools import cycle, islice
import matplotlib.pyplot as plt
import queue
import pandas as pd
# Find all neighbour points at epsilon distance
# Fit the data into the DBSCAN model
# Visualize the clusters
if __name__ == "__main__":
main()
| [
2,
38093,
10052,
25609,
18604,
198,
2,
16529,
47232,
198,
2,
197,
197,
197,
197,
197,
197,
197,
197,
197,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
360,
4462,
44565,
198,
2,
16529,
47232,
198,
2,
38093,
10052,
25609,
... | 5.411392 | 158 |
"""
Page Order
==========
Adds a `page_order` attribute to all pages if one is not defined.
"""
from pelican import signals
| [
37811,
198,
9876,
8284,
198,
2559,
855,
198,
198,
46245,
257,
4600,
7700,
62,
2875,
63,
11688,
284,
477,
5468,
611,
530,
318,
407,
5447,
13,
198,
37811,
198,
198,
6738,
16176,
7490,
1330,
10425,
198
] | 3.5 | 36 |
import collections.abc
import functools
from ..properties import lazy
from .linkedlist import List, Nil
def on_parts(func):
"""
Distribute methods on left and right parts.
"""
@functools.wraps(func)
return method
def as_list(func):
"""
Apply method as if queue was a list and return queue with only the right
side.
"""
@functools.wraps(func)
return method
class Queue(collections.abc.Sequence):
"""
A queue is a particular kind of collection in which the entities in the
collection are kept in order and the principal operations on the collection
are the addition of entities to the rear terminal position, known as
push_right, and removal of entities from the front terminal position,
known as pop_left.
Queue data structure description on Wikipedia:
[1] http://en.wikipedia.org/wiki/Queue_(abstract_data_type)
Implementation based on two linked lists (left and right). Enqueue operation
performs cons on right list (the end of the queue). Dequeue peeks first
element from the left list (when possible), if left list is empty we
populate left list with element from right one-by-one (in natural reverse
order). Complexity of both operations are O(1). Such implementation is also
known as "Banker's Queue" in different papers, i.e. in Chris Okasaki,
"Purely Functional Data Structures"
Usage:
>>> q = Queue()
>>> q1 = q.extend_right([1, 2, 3, 4])
>>> q2 = q1.push_right(5)
>>> q2.pop_left()
(1, Queue([2, 3, 4, 5]))
[1] http://en.wikipedia.org/wiki/Double-ended_queue
Implementation details are described here:
"Confluently Persistent Deques via Data Structural Bootstrapping"
[2] https://cs.uwaterloo.ca/~imunro/cs840/p155-buchsbaum.pdf
xxx: TBD
"""
__slots__ = ("_left", "_right", "__dict__")
_left: List
_right: List
_left_size = lazy(lambda x: len(x._left))
_right_size = lazy(lambda x: len(x._right))
def push_right(self, value):
"""
Puts element in the end of queue and return a new queue.
"""
return self.__class__(self._left, self._right.cons(value))
def push_left(self, value):
"""
Puts element in the begining of queue and return a new queue.
"""
return self.__class__(self._left.cons(value), self._right)
def extend_right(self, seq):
"""
Extend queue to the right.
"""
right = self._right
i = 0
for i, x in enumerate(seq):
right = right.cons(x)
if i:
new = self.__class__(self._left, right)
new._right_size += i
return new
else:
return self
def extend_left(self, seq):
"""
Extend queue to the left.
"""
left = self._left
i = 0
for i, x in enumerate(seq):
left = left.cons(x)
if i:
new = self.__class__(self._left, left)
new._left_size += i
return new
else:
return self
def pop_left(self):
"""
Remove first element and return (value, queue).
If queue is empty, raises ValueError.
"""
if self._left is not Nil:
value, left = self._left.uncons
right = self._right
elif self._right is not Nil:
value, left = self._right.reversed().uncons
right = Nil
else:
raise ValueError("Queue is empty")
return value, Queue(left, right)
def pop_right(self):
"""
Remove last element and return (value, queue).
If queue is empty, raises ValueError.
"""
if self._right is not Nil:
value, right = self._right.parts
left = self._left
elif self._left is not Nil:
value, right = self._left.reversed().parts
left = Nil
else:
raise ValueError("Queue is empty")
return value, Queue(left, right)
def reversed(self):
"""
Reversed copy of queue.
"""
return self.__class__(self._right.reversed(), self._left.reversed())
def is_empty(self):
"""
Return True if queue is empty.
"""
return not self
map = on_parts(List.map)
__lt__ = as_list(List.__lt__)
__gt__ = as_list(List.__gt__)
__le__ = as_list(List.__le__)
__ge__ = as_list(List.__ge__)
__eq__ = as_list(List.__eq__)
| [
11748,
17268,
13,
39305,
198,
11748,
1257,
310,
10141,
198,
198,
6738,
11485,
48310,
1330,
16931,
198,
6738,
764,
25614,
4868,
1330,
7343,
11,
29213,
628,
198,
4299,
319,
62,
42632,
7,
20786,
2599,
198,
220,
220,
220,
37227,
198,
220,
... | 2.359562 | 1,919 |
#!/usr/bin/env pypy
import dpkt
import gzip
import zlib
import struct
import sys
from collections import defaultdict
from cStringIO import StringIO
from tcpsession import TCPSession, tcp_flags
| [
2,
48443,
14629,
14,
8800,
14,
24330,
279,
4464,
88,
198,
198,
11748,
288,
79,
21841,
198,
11748,
308,
13344,
198,
11748,
1976,
8019,
198,
11748,
2878,
198,
11748,
25064,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
269,
10100,
9399,... | 3.3 | 60 |
import os
import pickle
import nltk
from discopy.confusion_matrix import *
from discopy.conn_head_mapper import *
from discopy.utils import discourse_adverbial, coordinating_connective, subordinating_connective
def extract_ps_arguments(clauses, conn_head, indices, ptree, arg2):
"""
If Arg1 is in the previous sentence relative to Arg2, a majority classifier in Lin et al. gave the full previous
sentence as Arg1, which already gives good results in argument extraction
"""
feature_set = []
clause_features = extract_clause_features(clauses, conn_head, indices, ptree)
for f in clause_features:
if f[1] in arg2:
label = 'Arg2'
else:
label = 'NULL'
feature_set.append((f[0], label))
return feature_set
if __name__ == "__main__":
trainpdtb = [json.loads(s) for s in open('../../discourse/data/conll2016/en.train/relations.json', 'r').readlines()]
trainparses = json.loads(open('../../discourse/data/conll2016/en.train/parses.json').read())
devpdtb = [json.loads(s) for s in open('../../discourse/data/conll2016/en.dev/relations.json', 'r').readlines()]
devparses = json.loads(open('../../discourse/data/conll2016/en.dev/parses.json').read())
print('....................................................................TRAINING..................')
clf = ArgumentExtractClassifier()
train_ss_data, train_ps_data = generate_pdtb_features(trainpdtb, trainparses)
clf.fit_on_features(train_ss_data, train_ps_data)
print('....................................................................ON TRAINING DATA..................')
print('ACCURACY {}'.format(nltk.classify.accuracy(clf.ss_model, train_ss_data)))
print('ACCURACY {}'.format(nltk.classify.accuracy(clf.ps_model, train_ps_data)))
print('....................................................................ON DEVELOPMENT DATA..................')
val_ss_data, val_ps_data = generate_pdtb_features(devpdtb, devparses)
print('ACCURACY {}'.format(nltk.classify.accuracy(clf.ss_model, val_ss_data)))
print('ACCURACY {}'.format(nltk.classify.accuracy(clf.ps_model, val_ps_data)))
| [
11748,
28686,
198,
11748,
2298,
293,
198,
198,
11748,
299,
2528,
74,
198,
198,
6738,
1221,
11081,
13,
10414,
4241,
62,
6759,
8609,
1330,
1635,
198,
6738,
1221,
11081,
13,
37043,
62,
2256,
62,
76,
11463,
1330,
1635,
198,
6738,
1221,
11... | 2.847258 | 766 |
from object.job import Job as Job1
from object.scheduler import Scheduler as Scheduler1
import os
import copy
import pickle
import argparse
INPUT_DIRECTORY = "input"
OUTPUT_DIRECTORY = "output"
if __name__ == '__main__':
arg = arg_parser()
if arg.method == "NMC":
if "task1" in arg.input_name:
run_new_task1_sample(arg.input_name, "naiveMC", 1)
elif "task2" in arg.input_name:
run_new_task2_sample(arg.input_name, "naiveMC", 1)
else:
raise ValueError("'input_name' illegal. Currently only 'task[1|2]_.*?' supported.")
elif arg.method == "OMC":
if "task1" in arg.input_name:
run_new_task1_sample(arg.input_name, "optimizedMC", 2)
elif "task2" in arg.input_name:
run_new_task2_sample(arg.input_name, "optimizedMC", 2)
else:
raise ValueError("'input_name' illegal. Currently only 'task[1|2]_.*?' supported.")
elif arg.method == "BS":
if "task1" in arg.input_name:
run_new_task1_sample(arg.input_name, "balanced_schedule", 3)
elif "task2" in arg.input_name:
run_new_task2_sample(arg.input_name, "balanced_schedule", 3)
else:
raise ValueError("'input_name' illegal. Currently only 'task[1|2]_.*?' supported.")
elif arg.method == "SC":
if "task1" in arg.input_name:
run_new_task1_sample(arg.input_name, "single_core", 4)
elif "task2" in arg.input_name:
run_new_task2_sample(arg.input_name, "single_core", 4)
else:
raise ValueError("'input_name' illegal. Currently only 'task[1|2]_.*?' supported.")
| [
6738,
2134,
13,
21858,
1330,
15768,
355,
15768,
16,
198,
6738,
2134,
13,
1416,
704,
18173,
1330,
27774,
18173,
355,
27774,
18173,
16,
198,
11748,
28686,
198,
11748,
4866,
198,
11748,
2298,
293,
198,
11748,
1822,
29572,
198,
198,
1268,
3... | 2.167102 | 766 |
import requests
from random import choice
import dns.message
import dns.query
import dns.rdatatype
dohservers = ['https://dns.google/dns-query', 'https://cloudflare-dns.com/dns-query', 'https://doh.appliedprivacy.net/query']
server = choice(dohservers)
| [
11748,
7007,
198,
6738,
4738,
1330,
3572,
198,
198,
11748,
288,
5907,
13,
20500,
198,
11748,
288,
5907,
13,
22766,
198,
11748,
288,
5907,
13,
4372,
265,
265,
2981,
198,
198,
67,
1219,
2655,
690,
796,
37250,
5450,
1378,
67,
5907,
13,
... | 2.771739 | 92 |
"""
*
* Author: Juarez Paulino(coderemite)
* Email: juarez.paulino@gmail.com
*
"""
I=lambda:map(int,input().split())
a,b=I()
c,d=I()
print((max(1,abs(a-c))+max(1,abs(b-d))<<1)+4) | [
37811,
198,
1635,
198,
1635,
6434,
25,
220,
12585,
19655,
3362,
2879,
7,
19815,
567,
32937,
8,
198,
1635,
9570,
25,
7544,
19655,
13,
79,
2518,
2879,
31,
14816,
13,
785,
198,
1635,
198,
37227,
198,
40,
28,
50033,
25,
8899,
7,
600,
... | 2.021978 | 91 |
from common.block import Block, BlockHeader
from common.io_blockchain import store_blockchain_in_memory
from common.values import NUMBER_OF_LEADING_ZEROS
from node.transaction_validation.transaction_validation import Transaction
| [
6738,
2219,
13,
9967,
1330,
9726,
11,
9726,
39681,
198,
6738,
2219,
13,
952,
62,
9967,
7983,
1330,
3650,
62,
9967,
7983,
62,
259,
62,
31673,
198,
6738,
2219,
13,
27160,
1330,
36871,
13246,
62,
19238,
62,
2538,
2885,
2751,
62,
57,
11... | 3.666667 | 63 |
#!/usr/bin/env python
#
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name, line-too-long
#
# (c) 2019-2020 Timothy Lin <timothy.gh.lin@gmail.com>, BSD 3-Clause License.
#
"""
This is the project configuration file as well the starter script for iPug.
"""
import os
# These UDK tag source trees have been build-tested:
DEFAULT_EDK2_TAG = 'edk2-stable202008'
DEFAULT_EDK2_TAG = 'edk2-stable201911'
CODETREE = {
# edk2-libc is a new edk2 repo since edk2-stable201905. StdLib resides in this repo.
'edk2-libc' : {
#'path' : os.path.join(os.path.expanduser('~'), '.cache', 'pug', 'edk2-libc'),
'path' : os.path.join(os.getcwd(), 'edk2-libc'),
'source' : {
'url' : 'https://github.com/tianocore/edk2-libc.git',
#'signature' : '6168716', # 61687168fe02ac4d933a36c9145fdd242ac424d1 @ Apr/25/2019
},
'multiworkspace': True,
'patch' : 'git apply --directory=edk2-libc edk2-libc.patch',
},
'PciUtils' : {
'path' : os.path.join(os.getcwd(), 'PciUtilsPkg', 'pciutils'),
'source' : {
'url' : 'https://github.com/pciutils/pciutils.git',
'signature' : 'v3.7.0',
},
'patch' : 'git apply --directory=PciUtilsPkg/pciutils pciutils.patch',
}
}
DEFAULT_ACTIVE_PLATFORM = 'PciUtilsPkg/PciUtilsPkg.dsc'
###################################################################################################
if __name__ == '__main__':
import sys
sys.dont_write_bytecode = True # To inhibit the creation of .pyc file
import runpy
runpy.run_module('ipug')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
279,
2645,
600,
25,
15560,
28,
259,
12102,
12,
3672,
11,
1627,
12,
18820,
12,
6511,
198,
2,
198,
2,
35... | 2.103491 | 802 |
#!/usr/bin/env python2
import argparse, sys, os, binascii
if len(sys.argv) != 3:
print 'pick a file to extract and a directory to put all the files in'
print ' extractfail.py myarchive.fail directory'
sys.exit()
if not os.path.isfile(sys.argv[1]):
print sys.argv[1] + ' doesn\'t exist'
sys.exit()
try:
os.makedirs(sys.argv[2])
except OSError:
if not os.path.isdir(sys.argv[2]):
raise
archive = open(sys.argv[1], "rb")
archive.seek(0x0)
if archive.read(0x4) != "FAIL":
print 'this isn\'t a "FAIL" archive'
sys.exit()
archive.seek(0x4)
version = int(binascii.hexlify(archive.read(2)), 16)
print 'archive version: ' + str(version)
if version != 1:
print 'archive version too new'
print 'this script can handle up to version 1'
sys.exit()
archive.seek(0x6)
numberOfFiles = int(binascii.hexlify(archive.read(2)), 16)
print 'number of files: ' + str(numberOfFiles)
toExtract = []
# filename, offset, size
currentOffset = 0x8 + (numberOfFiles * 0x108)
print currentOffset
for filenumber in range(0, numberOfFiles):
archive.seek(0x8 + (filenumber * 0x108))
fileheader_magic = archive.read(0x4)
if fileheader_magic != "FILE":
print 'incorrect magic found (should be "FILE")'
archive.close()
sys.exit()
fileheader_name = archive.read(0x100).rstrip('\0')
fileheader_size = int(binascii.hexlify(archive.read(0x4)), 16)
toExtract.append([fileheader_name, currentOffset, fileheader_size])
currentOffset += fileheader_size
# TODO: make this more memory efficient
for fileinfo in toExtract:
print 'writing: ' + fileinfo[0]
filehandle = open(sys.argv[2] + '/' + fileinfo[0], "wb")
archive.seek(fileinfo[1])
filedata = archive.read(fileinfo[2])
filehandle.write(filedata)
filehandle.close()
archive.close()
print 'looks like it worked'
print 'extracted '+str(len(toExtract))+' files'
sys.exit()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
11748,
1822,
29572,
11,
25064,
11,
28686,
11,
9874,
292,
979,
72,
198,
198,
361,
18896,
7,
17597,
13,
853,
85,
8,
14512,
513,
25,
198,
220,
220,
220,
3601,
705,
27729,
257,
239... | 2.493506 | 770 |
#!/usr/bin/env python3.5
import raspi_io.utility as utility
from raspi_io import SoftPWM, GPIO
if __name__ == '__main__':
address = utility.scan_server()[0]
pwm20 = SoftPWM(address, GPIO.BCM, 20, 500)
pwm21 = SoftPWM(address, GPIO.BCM, 21, 1000)
pwm21.start(100)
pwm21.start(50)
pwm21.start(10)
pwm20.start(10)
pwm20.start(50)
pwm20.start(100)
pwm20.stop()
pwm21.stop()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
13,
20,
198,
11748,
374,
5126,
72,
62,
952,
13,
315,
879,
355,
10361,
198,
6738,
374,
5126,
72,
62,
952,
1330,
8297,
47,
22117,
11,
50143,
628,
198,
361,
11593,
3672,
834,
6624,
705... | 2.074257 | 202 |
#!/usr/bin/env python
"""
Copyright (c) 2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axil
import pcie_us
module = 'pcie_us_axil_master'
testbench = 'test_%s_256' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
if __name__ == '__main__':
print("Running test...")
test_bench()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
198,
15269,
357,
66,
8,
2864,
4422,
4558,
10782,
488,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
1659,
428,
3... | 3.273756 | 442 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" data.py
Functions for grabbing/manipulating/reporting on data
"""
__author__ = 'Scott Burns <scott.s.burns@vanderbilt.edu>'
__copyright__ = 'Copyright 2014 Vanderbilt University. All Rights Reserved'
import os
import pandas as pd
from redcap import Project
def get_raw():
"""Top-level function.Use to get a DataFrame from the redcap project
Callers **should** catch errors"""
return xfm_df(df_from_csv(csv_from_redcap()))
# HELPERS
def combine_project_pi(record):
"Takes a record, returns a string like PROJECT_LASTNAME"
return '{} ({})'.format(record['project'], record['pi'])
def last_launch(g):
"""Take a list-like object `g` of datetimes, sort descending
& take the first element"""
return sorted(g, reverse=True)[0]
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
1366,
13,
9078,
198,
198,
24629,
2733,
329,
23256,
14,
805,
541,
8306,
14,
49914,
319,
1366,
198,
37811,
... | 2.880282 | 284 |
import wave, struct
import numpy as np
import scipy.signal as ss
import matplotlib.pyplot as plt
from tqdm import tqdm
if __name__ == '__main__':
fs = 48000 # sampling rate (Hz)
# duration = 20 # seconds
# N = int(duration * fs)
# n = np.arange(N)
# t = n / fs
# f = 21e3 # Hz
# samples = np.sin(2 * np.pi * f * t)
# savestereo('./audio/sine21k.wav', samples, np.zeros_like(samples), fs)
# # chirps
# duration = 60
# chirp_freq = 10
# fcenter = 20e3
# fradius = 300
# chirp_dur = 1 / chirp_freq
# N = int(chirp_dur * fs)
# n = np.arange(N)
# t = n / fs
# samples = ss.chirp(t, fcenter-fradius, np.max(t), fcenter+fradius, method='linear')
# samples = np.tile(samples, int(duration / chirp_dur))
# savemono('./audio/chirp20k.wav', samples, fs)
# duration = 10 # seconds
# N = int(duration * fs)
# n = np.arange(N)
# t = n / fs
# f1 = 440 #Hz
# f2 = 554.365 #Hz
# # f = 18.5e3 + 100 * ss.sawtooth(t*10, 1)
# left = np.sin(2 * np.pi * f1 * t)
# right = np.sin(2 * np.pi * f2 * t)
# savestereo('./audio/harmony.wav', left, right, fs)
# # chirps
# duration = 20
# chirp_freq = 10
# chirp_dur = 1 / chirp_freq
# N = int(chirp_dur * fs)
# n = np.arange(N)
# t = n / fs
# fcenter = 20e3
# fradius = 500
# # fcenter = 500
# # fradius = 100
# fcenterl = fcenter - 3*fradius
# fcenterr = fcenter + 3*fradius
# smoother = ss.get_window(('tukey', 0.2), N)
# leftchirp = ss.chirp(t, fcenterl-fradius, np.max(t), fcenterl+fradius, method='linear')
# leftchirp *= smoother
# left = np.tile(leftchirp, int(duration / chirp_dur))
# rightchirp = ss.chirp(t, fcenterr-fradius, np.max(t), fcenterr+fradius, method='linear')
# rightchirp *= smoother
# right = np.tile(rightchirp, int(duration / chirp_dur))
# savestereo('./audio/stereochirp.wav', left, right, fs)
duration = 20
chirp_freq = 10
chirp_dur = 1 / chirp_freq
N = int(chirp_dur * fs)
n = np.arange(N)
t = n / fs
fcenter = 21.5e3
fradius = 500
chirp = ss.chirp(t, fcenter-fradius, np.max(t), fcenter+fradius, method='linear')
chirp *= ss.get_window(('tukey', 0.2), N)
signal = np.tile(chirp, int(duration / chirp_dur))
savestereo('./audio/chirp.wav', signal, np.zeros_like(signal), fs)
np.save('chirp.npy', signal)
| [
11748,
6769,
11,
2878,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
12683,
282,
355,
37786,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
... | 2.082333 | 1,166 |
import pytest
# FIXME finish live testing
# due to the complexity of the JavaCard world we'll test the framework using a fixed and
# only example attack
LIVE_ATR = [59, 123, 24, 0, 0, 0, 49, 192, 100, 119, 227, 3, 0, 130, 144, 0]
@pytest.mark.live
@pytest.mark.live
| [
11748,
12972,
9288,
198,
198,
2,
44855,
11682,
5461,
2107,
4856,
198,
2,
2233,
284,
262,
13357,
286,
262,
7349,
16962,
995,
356,
1183,
1332,
262,
9355,
1262,
257,
5969,
290,
198,
2,
691,
1672,
1368,
628,
198,
43,
9306,
62,
1404,
49,... | 2.84375 | 96 |
from django.db import models
from django.contrib.auth.models import User
from post.models import Post
from cloudinary.models import CloudinaryField
from django.db.models.signals import post_save
from PIL import Image
from django.conf import settings
import os
# Create your models here.
post_save.connect(create_user_profile, sender=User)
post_save.connect(save_user_profile, sender=User) | [
6738,
42625,
14208,
13,
9945,
1330,
4981,
201,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
201,
198,
6738,
1281,
13,
27530,
1330,
2947,
201,
198,
6738,
6279,
3219,
13,
27530,
1330,
10130,
3219,
15878,
201,... | 3.224 | 125 |
from typing import Any, Dict
| [
6738,
19720,
1330,
4377,
11,
360,
713,
198
] | 3.625 | 8 |
"""Vizio SmartCast API command and class for emulating remote key presses."""
from typing import List, Tuple
from pyvizio.api._protocol import ENDPOINT, KEY_ACTION
from pyvizio.api.base import CommandBase
class KeyPressEvent(object):
"""Emulated remote key press."""
def __init__(
self, key_code: Tuple[int, int], action: str = KEY_ACTION["PRESS"]
) -> None:
"""Initialize emulated remote key press."""
self.CODESET: int = key_code[0]
self.CODE: int = key_code[1]
self.ACTION: str = action
class EmulateRemoteCommand(CommandBase):
"""Command to emulate remote key press."""
def __init__(self, key_codes: List[Tuple[int, int]], device_type: str) -> None:
"""Initialize command to emulate remote key press."""
super(EmulateRemoteCommand, self).__init__(ENDPOINT[device_type]["KEY_PRESS"])
# noinspection SpellCheckingInspection
self.KEYLIST: List[KeyPressEvent] = []
for key_code in key_codes:
self.KEYLIST.append(KeyPressEvent(key_code))
| [
37811,
53,
528,
952,
10880,
19248,
7824,
3141,
290,
1398,
329,
795,
8306,
6569,
1994,
31048,
526,
15931,
198,
198,
6738,
19720,
1330,
7343,
11,
309,
29291,
198,
198,
6738,
12972,
85,
528,
952,
13,
15042,
13557,
11235,
4668,
1330,
12964,... | 2.627792 | 403 |
from SaveAndLoad.JSONSerializer import JSONSerializer
import os
from PyQt5 import QtCore
from PyQt5.QtWidgets import QSizePolicy, QGridLayout, QFrame, QLabel, QPushButton, QTextEdit, QSpinBox, QMessageBox, QAction, QInputDialog
from Core.DiceRoller import DiceRollerWithPresetRolls
from Interface.Dialogs.AddPresetRollDialog import AddPresetRollDialog, EditPresetRollDialog
from Interface.Widgets.DieTypeSpinBox import DieTypeSpinBox
from Interface.Widgets.PresetRollsTreeWidget import PresetRollsTreeWidget
from Interface.Windows.Window import Window
from SaveAndLoad.SaveAndOpenMixin import SaveAndOpenMixin
# Roller Methods
# File Menu Action Methods
# Log Menu Action Methods
# Display Update Methods
# Close Event
| [
6738,
12793,
1870,
8912,
13,
40386,
32634,
7509,
1330,
19449,
32634,
7509,
198,
11748,
28686,
198,
198,
6738,
9485,
48,
83,
20,
1330,
33734,
14055,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
10699,
36727,
11,... | 3.32 | 225 |
#!/usr/bin/env python
import sys
import os
import lightgbm as lgbm
from rdkit import Chem
from rdkit.Chem import Descriptors
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
1657,
70,
20475,
355,
300,
70,
20475,
198,
6738,
374,
67,
15813,
1330,
12870,
198,
6738,
374,
67,
15813,
13,
41829,
1330,
2935,
6519,
... | 2.709677 | 62 |
import board
import busio
import sdcardio
import storage
import digitalio
spi = None
sd = None
vfs = None
path = "/sd"
det = digitalio.DigitalInOut(board.SD_DET)
det.switch_to_input(pull = digitalio.Pull.UP) # det.value == False if sd card is inserted.
mount()
| [
11748,
3096,
198,
11748,
1323,
952,
198,
11748,
45647,
9517,
952,
198,
11748,
6143,
198,
11748,
4875,
952,
198,
198,
2777,
72,
796,
6045,
198,
21282,
220,
796,
6045,
198,
85,
9501,
796,
6045,
198,
198,
6978,
796,
12813,
21282,
1,
198,... | 2.8 | 95 |
from django.contrib import admin
from .models import Store
admin.site.register(Store, StoreAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
9363,
628,
198,
198,
28482,
13,
15654,
13,
30238,
7,
22658,
11,
9363,
46787,
8,
198
] | 3.482759 | 29 |
"""List of supported formats
"""
from collections import namedtuple
_FORMAT = namedtuple('FormatDefinition', 'mime_type,'
'extension, schema')
_FORMATS = namedtuple('FORMATS', 'GEOJSON, JSON, SHP, GML, GEOTIFF, WCS,'
'WCS100, WCS110, WCS20, WFS, WFS100,'
'WFS110, WFS20, WMS, WMS130, WMS110,'
'WMS100')
FORMATS = _FORMATS(
_FORMAT('application/vnd.geo+json', '.geojson', None),
_FORMAT('application/json', '.json', None),
_FORMAT('application/x-zipped-shp', '.zip', None),
_FORMAT('application/gml+xml', '.gml', None),
_FORMAT('image/tiff; subtype=geotiff', '.tiff', None),
_FORMAT('application/xogc-wcs', '.xml', None),
_FORMAT('application/x-ogc-wcs; version=1.0.0', '.xml', None),
_FORMAT('application/x-ogc-wcs; version=1.1.0', '.xml', None),
_FORMAT('application/x-ogc-wcs; version=2.0', '.xml', None),
_FORMAT('application/x-ogc-wfs', '.xml', None),
_FORMAT('application/x-ogc-wfs; version=1.0.0', '.xml', None),
_FORMAT('application/x-ogc-wfs; version=1.1.0', '.xml', None),
_FORMAT('application/x-ogc-wfs; version=2.0', '.xml', None),
_FORMAT('application/x-ogc-wms', '.xml', None),
_FORMAT('application/x-ogc-wms; version=1.3.0', '.xml', None),
_FORMAT('application/x-ogc-wms; version=1.1.0', '.xml', None),
_FORMAT('application/x-ogc-wms; version=1.0.0', '.xml', None)
)
| [
37811,
8053,
286,
4855,
17519,
198,
37811,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
198,
62,
21389,
1404,
796,
3706,
83,
29291,
10786,
26227,
36621,
3256,
705,
76,
524,
62,
4906,
4032,
198,
220,
220,
220,
220,
220,
220,
220,
220,... | 2.116547 | 695 |
from logging import getLogger
from constance import config
from django.conf import settings
from ..base import DispatcherOptions
from ..registry import dispatcher_registry
from .email import Email
logger = getLogger(__name__)
@dispatcher_registry.register
| [
6738,
18931,
1330,
651,
11187,
1362,
198,
198,
6738,
1500,
590,
1330,
4566,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
6738,
11485,
8692,
1330,
3167,
8071,
2044,
29046,
198,
6738,
11485,
2301,
4592,
1330,
49952,
62,
2301,
... | 3.589041 | 73 |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\display_snippet_tuning.py
# Compiled at: 2020-01-29 22:49:46
# Size of source mod 2**32: 20461 bytes
from collections import namedtuple
from event_testing.resolver import InteractionResolver
from event_testing.tests import TunableTestSetWithTooltip, TunableTestSet
from filters.tunable import TunableSimFilter
from interactions import ParticipantTypeSim
from interactions.base.picker_interaction import PickerSuperInteraction
from interactions.utils.display_mixin import get_display_mixin
from interactions.utils.localization_tokens import LocalizationTokens
from interactions.utils.loot import LootActions
from interactions.utils.tunable import TunableContinuation
from interactions.utils.tunable_icon import TunableIcon
from sims.university.university_scholarship_tuning import ScholarshipMaintenaceType, ScholarshipEvaluationType, MeritEvaluation
from sims4.localization import TunableLocalizedString, TunableLocalizedStringFactory
from sims4.tuning.tunable import TunableEnumFlags, TunableList, TunableTuple, TunableReference, Tunable, TunableRange, TunableVariant, OptionalTunable, AutoFactoryInit, HasTunableSingletonFactory
from sims4.tuning.tunable_base import GroupNames, ExportModes
from sims4.utils import flexmethod
from singletons import DEFAULT
from ui.ui_dialog_picker import TunablePickerDialogVariant, ObjectPickerTuningFlags, BasePickerRow
import enum, event_testing, services, sims4.tuning
logger = sims4.log.Logger('Display Snippet', default_owner='shipark')
SnippetDisplayMixin = get_display_mixin(use_string_tokens=True, has_description=True, has_icon=True, has_tooltip=True, enabled_by_default=True, export_modes=(ExportModes.All))
snippet_override_data = namedtuple('SnippetDisplayData', ('display_name', 'display_description',
'display_tooltip', 'display_icon'))
L. 374 0 LOAD_FAST 'inst'
2 LOAD_CONST None
4 COMPARE_OP is-not
6 POP_JUMP_IF_FALSE 12 'to 12'
8 LOAD_FAST 'inst'
10 JUMP_FORWARD 14 'to 14'
12_0 COME_FROM 6 '6'
12 LOAD_FAST 'cls'
14_0 COME_FROM 10 '10'
14 STORE_FAST 'inst_or_cls'
L. 375 16 LOAD_FAST 'target'
18 LOAD_GLOBAL DEFAULT
20 COMPARE_OP is-not
22 POP_JUMP_IF_FALSE 28 'to 28'
24 LOAD_FAST 'target'
26 JUMP_FORWARD 32 'to 32'
28_0 COME_FROM 22 '22'
28 LOAD_FAST 'inst'
30 LOAD_ATTR target
32_0 COME_FROM 26 '26'
32 STORE_FAST 'target'
L. 376 34 LOAD_FAST 'context'
36 LOAD_GLOBAL DEFAULT
38 COMPARE_OP is-not
40 POP_JUMP_IF_FALSE 46 'to 46'
42 LOAD_FAST 'context'
44 JUMP_FORWARD 50 'to 50'
46_0 COME_FROM 40 '40'
46 LOAD_FAST 'inst'
48 LOAD_ATTR context
50_0 COME_FROM 44 '44'
50 STORE_FAST 'context'
L. 377 52 LOAD_GLOBAL InteractionResolver
54 LOAD_FAST 'cls'
56 LOAD_FAST 'inst'
58 LOAD_FAST 'target'
60 LOAD_FAST 'context'
62 LOAD_CONST ('target', 'context')
64 CALL_FUNCTION_KW_4 4 '4 total positional and keyword args'
66 STORE_FAST 'resolver'
L. 379 68 LOAD_FAST 'inst_or_cls'
70 LOAD_ATTR display_snippet_text_tokens
72 LOAD_METHOD get_tokens
74 LOAD_FAST 'resolver'
76 CALL_METHOD_1 1 '1 positional argument'
78 STORE_FAST 'general_tokens'
L. 380 80 LOAD_FAST 'inst_or_cls'
82 LOAD_ATTR display_snippet_text_overrides
84 STORE_FAST 'overrides'
L. 382 86 LOAD_CONST 0
88 STORE_FAST 'index'
L. 383 90_92 SETUP_LOOP 410 'to 410'
94 LOAD_FAST 'inst_or_cls'
96 LOAD_ATTR display_snippets
98 GET_ITER
100_102 FOR_ITER 408 'to 408'
104 STORE_FAST 'display_snippet_data'
L. 384 106 LOAD_FAST 'display_snippet_data'
108 LOAD_ATTR display_snippet
110 STORE_FAST 'display_snippet'
L. 387 112 LOAD_GLOBAL InteractionResolver
114 LOAD_FAST 'cls'
116 LOAD_FAST 'inst'
118 LOAD_FAST 'target'
120 LOAD_FAST 'context'
122 LOAD_FAST 'display_snippet'
124 LOAD_ATTR guid64
126 BUILD_SET_1 1
128 LOAD_CONST ('target', 'context', 'picked_item_ids')
130 CALL_FUNCTION_KW_5 5 '5 total positional and keyword args'
132 STORE_FAST 'resolver'
L. 388 134 LOAD_FAST 'display_snippet_data'
136 LOAD_METHOD test
138 LOAD_FAST 'resolver'
140 CALL_METHOD_1 1 '1 positional argument'
142 STORE_FAST 'test_result'
L. 389 144 LOAD_FAST 'test_result'
146 LOAD_ATTR result
148 STORE_FAST 'is_enable'
L. 390 150 LOAD_FAST 'is_enable'
152 POP_JUMP_IF_TRUE 166 'to 166'
154 LOAD_FAST 'test_result'
156 LOAD_ATTR tooltip
158 LOAD_CONST None
160 COMPARE_OP is-not
162_164 POP_JUMP_IF_FALSE 398 'to 398'
166_0 COME_FROM 152 '152'
L. 394 166 LOAD_FAST 'display_snippet'
168 LOAD_ATTR display_name
170 LOAD_CONST None
172 COMPARE_OP is-not
174 POP_JUMP_IF_FALSE 186 'to 186'
176 LOAD_FAST 'display_snippet'
178 LOAD_ATTR display_name
180 LOAD_FAST 'general_tokens'
182 CALL_FUNCTION_EX 0 'positional arguments only'
184 JUMP_FORWARD 188 'to 188'
186_0 COME_FROM 174 '174'
186 LOAD_CONST None
188_0 COME_FROM 184 '184'
L. 396 188 LOAD_FAST 'display_snippet'
190 LOAD_ATTR display_description
192 LOAD_CONST None
194 COMPARE_OP is-not
196 POP_JUMP_IF_FALSE 208 'to 208'
198 LOAD_FAST 'display_snippet'
200 LOAD_ATTR display_description
202 LOAD_FAST 'general_tokens'
204 CALL_FUNCTION_EX 0 'positional arguments only'
206 JUMP_FORWARD 210 'to 210'
208_0 COME_FROM 196 '196'
208 LOAD_CONST None
210_0 COME_FROM 206 '206'
L. 398 210 LOAD_FAST 'display_snippet'
212 LOAD_ATTR display_tooltip
214 LOAD_CONST None
216 COMPARE_OP is-not
218 POP_JUMP_IF_FALSE 230 'to 230'
220 LOAD_FAST 'display_snippet'
222 LOAD_ATTR display_tooltip
224 LOAD_FAST 'general_tokens'
226 CALL_FUNCTION_EX 0 'positional arguments only'
228 JUMP_FORWARD 232 'to 232'
230_0 COME_FROM 218 '218'
230 LOAD_CONST None
232_0 COME_FROM 228 '228'
232 BUILD_TUPLE_3 3
234 STORE_FAST 'snippet_default_tokens'
L. 400 236 LOAD_FAST 'display_snippet_data'
238 LOAD_ATTR display_snippet_text_tokens
240 LOAD_METHOD get_tokens
242 LOAD_FAST 'resolver'
244 CALL_METHOD_1 1 '1 positional argument'
246 STORE_FAST 'snippet_additional_tokens'
L. 401 248 LOAD_FAST 'general_tokens'
250 LOAD_FAST 'snippet_default_tokens'
252 BINARY_ADD
254 LOAD_FAST 'snippet_additional_tokens'
256 BINARY_ADD
258 STORE_DEREF 'tokens'
L. 404 260 LOAD_FAST 'overrides'
262 LOAD_CONST None
264 COMPARE_OP is-not
266_268 POP_JUMP_IF_FALSE 280 'to 280'
L. 405 270 LOAD_FAST 'overrides'
272 LOAD_FAST 'display_snippet_data'
274 LOAD_ATTR display_snippet
276 CALL_FUNCTION_1 1 '1 positional argument'
278 STORE_FAST 'display_snippet'
280_0 COME_FROM 266 '266'
L. 408 280 LOAD_FAST 'test_result'
282 LOAD_ATTR tooltip
284 LOAD_CONST None
286 COMPARE_OP is
288_290 POP_JUMP_IF_FALSE 296 'to 296'
292 LOAD_CONST None
294 JUMP_FORWARD 314 'to 314'
296_0 COME_FROM 288 '288'
296 LOAD_FAST 'test_result'
298 LOAD_ATTR tooltip
300 LOAD_CONST ('tooltip',)
302 BUILD_CONST_KEY_MAP_1 1
304 LOAD_CLOSURE 'tokens'
306 BUILD_TUPLE_1 1
308 LOAD_LAMBDA '<code_object <lambda>>'
310 LOAD_STR 'DisplaySnippetPickerSuperInteraction.picker_rows_gen.<locals>.<lambda>'
312 MAKE_FUNCTION_10 'keyword-only, closure'
314_0 COME_FROM 294 '294'
314 STORE_FAST 'tooltip'
L. 409 316 LOAD_FAST 'tooltip'
318_320 POP_JUMP_IF_TRUE 358 'to 358'
L. 410 322 LOAD_FAST 'display_snippet'
324 LOAD_ATTR display_tooltip
326 LOAD_CONST None
328 COMPARE_OP is
330_332 POP_JUMP_IF_FALSE 338 'to 338'
334 LOAD_CONST None
336 JUMP_FORWARD 356 'to 356'
338_0 COME_FROM 330 '330'
338 LOAD_FAST 'display_snippet'
340 LOAD_ATTR display_tooltip
342 LOAD_CONST ('tooltip',)
344 BUILD_CONST_KEY_MAP_1 1
346 LOAD_CLOSURE 'tokens'
348 BUILD_TUPLE_1 1
350 LOAD_LAMBDA '<code_object <lambda>>'
352 LOAD_STR 'DisplaySnippetPickerSuperInteraction.picker_rows_gen.<locals>.<lambda>'
354 MAKE_FUNCTION_10 'keyword-only, closure'
356_0 COME_FROM 336 '336'
356 STORE_FAST 'tooltip'
358_0 COME_FROM 318 '318'
L. 411 358 LOAD_GLOBAL BasePickerRow
360 LOAD_FAST 'is_enable'
L. 412 362 LOAD_FAST 'display_snippet'
364 LOAD_ATTR display_name
366 LOAD_DEREF 'tokens'
368 CALL_FUNCTION_EX 0 'positional arguments only'
L. 413 370 LOAD_FAST 'display_snippet'
372 LOAD_ATTR display_icon
L. 414 374 LOAD_FAST 'index'
L. 415 376 LOAD_FAST 'display_snippet'
378 LOAD_ATTR display_description
380 LOAD_DEREF 'tokens'
382 CALL_FUNCTION_EX 0 'positional arguments only'
L. 416 384 LOAD_FAST 'tooltip'
386 LOAD_CONST ('is_enable', 'name', 'icon', 'tag', 'row_description', 'row_tooltip')
388 CALL_FUNCTION_KW_6 6 '6 total positional and keyword args'
390 STORE_FAST 'row'
L. 417 392 LOAD_FAST 'row'
394 YIELD_VALUE
396 POP_TOP
398_0 COME_FROM 162 '162'
L. 418 398 LOAD_FAST 'index'
400 LOAD_CONST 1
402 INPLACE_ADD
404 STORE_FAST 'index'
406 JUMP_BACK 100 'to 100'
408 POP_BLOCK
410_0 COME_FROM_LOOP 90 '90'
Parse error at or near `COME_FROM' instruction at offset 314_0 | [
2,
34318,
2349,
21,
2196,
513,
13,
22,
13,
19,
198,
2,
11361,
18022,
8189,
513,
13,
22,
357,
2091,
5824,
8,
198,
2,
4280,
3361,
3902,
422,
25,
11361,
513,
13,
22,
13,
24,
357,
31499,
14,
85,
18,
13,
22,
13,
24,
25,
1485,
66,... | 1.601367 | 9,071 |
from twisted.internet.defer import succeed
| [
6738,
19074,
13,
37675,
13,
4299,
263,
1330,
6758,
628
] | 4.4 | 10 |