content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2019 Jonathan Dent, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import math
from mathutils import Matrix
import appleseed as asr
from ..translator import Translator
from ...logger import get_logger
from ...utils import util
logger = get_logger()
class InteractiveCameraTranslator(Translator):
"""
This translator is responsible for translating the Blender camera into an appleseed
camera object for final rendering. This includes support for stereoscopic rendering.
"""
@property
# Internal methods.
| [
2,
198,
2,
770,
2723,
2393,
318,
636,
286,
22514,
2308,
13,
198,
2,
16440,
2638,
1378,
1324,
829,
2308,
71,
80,
13,
3262,
14,
329,
3224,
1321,
290,
4133,
13,
198,
2,
198,
2,
770,
3788,
318,
2716,
739,
262,
17168,
5964,
13,
198,
... | 3.820346 | 462 |
# https://stackoverflow.com/questions/18007995/recursive-method-for-parentheses-balancing-python
# define the function, string n with (),
# i in n start at 0, counter start at 0
a = "(()()"
print(verify_parentheses(a))
b = "(()"
print(verify_parentheses(b))
c = "(())"
print(verify_parentheses(c))
d = ")(()"
print(verify_parentheses(d))
e = "())(())("
print(verify_parentheses(e))
f = "()((("
print(verify_parentheses(f))
g = ""
print(verify_parentheses(g)) | [
2,
3740,
1378,
25558,
2502,
11125,
13,
785,
14,
6138,
507,
14,
1507,
25816,
33438,
14,
8344,
30753,
12,
24396,
12,
1640,
12,
8000,
39815,
12,
6893,
5077,
12,
29412,
198,
198,
2,
8160,
262,
2163,
11,
4731,
299,
351,
29994,
220,
198,
... | 2.575419 | 179 |
import json
import pickle
import re
from app import emitter, definitions, values
from six.moves import cStringIO
import os
from pysmt.smtlib.parser import SmtLibParser
def collect_symbolic_expression(log_path):
"""
This function will read the output log of a klee concolic execution and extract symbolic expressions
of variables of interest
"""
# emitter.normal("\textracting symbolic expressions")
var_expr_map = list()
if os.path.exists(log_path):
with open(log_path, 'r') as trace_file:
expr_pair = None
for line in trace_file:
if '[klee:expr]' in line:
line = line.split("[klee:expr] ")[-1]
var_name, var_expr = line.split(" : ")
var_expr = var_expr.replace("\n", "")
if "[program-var]" in var_name:
var_name = var_name.replace("[program-var] ", "")
expr_pair = (var_name, var_expr)
elif "[angelic-var]" in var_name:
var_name = var_name.replace("[angelic-var] ", "")
expr_pair = (expr_pair, (var_name, var_expr))
if expr_pair not in var_expr_map:
var_expr_map.append(expr_pair)
return var_expr_map
def collect_symbolic_path_prefix(log_path, project_path):
"""
This function will read the output log of a klee concolic execution and
extract the prefix of partial path condition that should be omitted in path generation
"""
emitter.normal("\textracting prefix of path condition")
prefix_ppc = ""
if os.path.exists(log_path):
source_path = ""
path_condition = ""
with open(log_path, 'r') as trace_file:
for line in trace_file:
if '[path:ppc]' in line:
if project_path in line:
break
else:
source_path = str(line.replace("[path:ppc]", '')).split(" : ")[0]
source_path = source_path.strip()
source_path = os.path.abspath(source_path)
path_condition = str(line.replace("[path:ppc]", '')).split(" : ")[1]
continue
if source_path:
if "(exit)" not in line:
path_condition = path_condition + line
else:
prefix_ppc = path_condition
source_path = ""
path_condition = ""
return prefix_ppc
def collect_symbolic_path(log_path, project_path):
"""
This function will read the output log of a klee concolic execution and
extract the partial path conditions
"""
emitter.normal("\textracting path conditions")
ppc_list = list()
last_sym_path = ""
if os.path.exists(log_path):
source_path = ""
path_condition = ""
with open(log_path, 'r') as trace_file:
for line in trace_file:
if '[path:ppc]' in line:
if project_path in line or definitions.DIRECTORY_LIB in line:
source_path = str(line.replace("[path:ppc]", '')).split(" : ")[0]
source_path = source_path.strip()
source_path = os.path.abspath(source_path)
path_condition = str(line.replace("[path:ppc]", '')).split(" : ")[1]
continue
if source_path:
if "(exit)" not in line:
path_condition = path_condition + line
else:
ppc_list.append((source_path, path_condition))
last_sym_path = path_condition
source_path = ""
path_condition = ""
# constraints['last-sym-path'] = last_sym_path
# print(constraints.keys())
parser = SmtLibParser()
script = parser.get_script(cStringIO(last_sym_path))
formula = script.get_last_formula()
return ppc_list, formula
def collect_trace(file_path, project_path):
"""
This function will read the output log of a klee concolic execution and
extract the instruction trace
"""
emitter.normal("\textracting instruction trace")
list_trace = list()
if os.path.exists(file_path):
with open(file_path, 'r') as trace_file:
for line in trace_file:
if '[klee:trace]' in line:
if project_path in line:
trace_line = str(line.replace("[klee:trace] ", ''))
trace_line = trace_line.strip()
source_path, line_number = trace_line.split(":")
source_path = os.path.abspath(source_path)
trace_line = source_path + ":" + str(line_number)
if (not list_trace) or (list_trace[-1] != trace_line):
list_trace.append(trace_line)
if values.CONF_LOC_PATCH:
if values.CONF_LOC_PATCH in list_trace:
emitter.note("\t\t[note] patch location detected in trace")
values.COUNT_HIT_PATCH_LOC = values.COUNT_HIT_PATCH_LOC + 1
if values.CONF_LOC_BUG:
if values.CONF_LOC_BUG in list_trace:
emitter.note("\t\t[note] fault location detected in trace")
values.COUNT_HIT_BUG_LOG = values.COUNT_HIT_BUG_LOG + 1
if values.CONF_LOC_LIST_CRASH:
if not set(values.CONF_LOC_LIST_CRASH).isdisjoint(list_trace):
emitter.note("\t\t[note] a crash location detected in trace")
values.COUNT_HIT_CRASH_LOC = values.COUNT_HIT_CRASH_LOC + 1
is_crash = collect_crash_point(values.FILE_MESSAGE_LOG)
if is_crash:
values.IS_CRASH = True
values.COUNT_HIT_CRASH = values.COUNT_HIT_CRASH + 1
emitter.note("\t\t[note] program crashed")
else:
values.IS_CRASH = False
emitter.note("\t\t[note] program did not crash")
return list_trace
def collect_symbolic_path_loc(log_path, project_path):
"""
This function will read the output log of a klee concolic execution and
extract the partial path condition insert locations (i.e. control location)
"""
emitter.normal("\textracting path conditions")
ppc_loc_list = list()
if os.path.exists(log_path):
with open(log_path, 'r') as trace_file:
for line in trace_file:
if '[path:ppc]' in line:
if project_path in line or definitions.DIRECTORY_LIB in line:
source_path = str(line.replace("[path:ppc]", '')).split(" : ")[0]
source_path = source_path.strip()
source_path = os.path.abspath(source_path)
ppc_loc_list.append(source_path)
return ppc_loc_list
def collect_crash_point(trace_file_path):
"""
This function will read the output log of a klee concolic execution and
extract the location of the crash instruction
"""
crash_location = ""
if os.path.exists(trace_file_path):
with open(trace_file_path, 'r') as trace_file:
for read_line in trace_file:
if "KLEE: ERROR:" in read_line:
read_line = read_line.replace("KLEE: ERROR: ", "")
crash_location = read_line.split(": ")[0]
break
return crash_location
def collect_exploit_return_code(output_file_path):
"""
This function will read the output log of a program execution
and extract the exit code of the program
"""
return_code = ""
if os.path.exists(output_file_path):
with open(output_file_path, 'r') as output_file:
for read_line in output_file.readlines():
if "RETURN CODE:" in read_line:
read_line = read_line.replace("RETURN CODE: ", "")
return_code = int(read_line)
break
return return_code
def collect_exploit_output(output_file_path):
"""
This function will read the output log of a program execution
and extract the output text
"""
output = ""
if os.path.exists(output_file_path):
with open(output_file_path, 'r') as output_file:
output = output_file.readlines()
return output
def collect_stack_info(trace_file_path):
"""
This function will read the output log of a klee concolic execution
and extract any stack information avail for error exits
"""
stack_map = dict()
if os.path.exists(trace_file_path):
with open(trace_file_path, 'r') as trace_file:
is_stack = False
for read_line in trace_file:
if is_stack and '#' in read_line:
if " at " in read_line:
read_line, source_path = str(read_line).split(" at ")
source_path, line_number = source_path.split(":")
function_name = str(read_line.split(" in ")[1]).split(" (")[0]
if source_path not in stack_map.keys():
stack_map[source_path] = dict()
stack_map[source_path][function_name] = line_number.strip()
if "Stack:" in read_line:
is_stack = True
continue
return stack_map
| [
11748,
33918,
198,
11748,
2298,
293,
198,
11748,
302,
198,
6738,
598,
1330,
795,
1967,
11,
17336,
11,
3815,
198,
6738,
2237,
13,
76,
5241,
1330,
269,
10100,
9399,
198,
11748,
28686,
198,
6738,
279,
893,
16762,
13,
5796,
83,
8019,
13,
... | 2.039174 | 4,697 |
import cv2
import numpy as np
from keras.models import load_model
from model.image_classification_class import ImageClassification
export_path_keras = "C:\\Users\\catal\\Facultate\\an4\\licenta\\modele antrenate\\modelBinaryClassificationFoodOrNotfood-VGG16.h5"
model = load_model(export_path_keras)
classtolabel = {'0':'food','1':'notfood'}
| [
11748,
269,
85,
17,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
41927,
292,
13,
27530,
1330,
3440,
62,
19849,
201,
198,
6738,
2746,
13,
9060,
62,
4871,
2649,
62,
4871,
1330,
7412,
9487,
2649,
201,
198,
201,
198,
39344,
... | 2.734375 | 128 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'e:/Code/MyQT/typing/typing.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
2,
5178,
7822,
7560,
422,
3555,
334,
72,
2393,
705,
68,
14079,
10669,
14,
3666,
48,
51,
14,
774,
13886,
14,
774,
13886,
13,
9019,
6,
201,
198,
2,
20... | 2.486239 | 109 |
N = int(input())
soma = 0
for k in range(N):
X = int(input())
if X >= 10 and X <= 20: soma+=1
print("%d in" % (soma))
print("%d out" % (abs(N-soma)))
| [
45,
796,
493,
7,
15414,
28955,
198,
198,
82,
6086,
796,
657,
198,
198,
1640,
479,
287,
2837,
7,
45,
2599,
198,
220,
220,
220,
1395,
796,
493,
7,
15414,
28955,
198,
220,
220,
220,
611,
1395,
18189,
838,
290,
1395,
19841,
1160,
25,
... | 2.064103 | 78 |
"""Python wrapper for the object extraction algorithm found in
https://github.com/assamite/CmCode
"""
import os
import tempfile
from ctypes import cdll, c_char_p, c_bool
import cv2
from .utils import file_cache
def _saliency(image_path, saliency_map_path, saliency_mask_path):
"""Python wrapper for running the saliency detection.
:param image_path: path to the image file
:type image_path: str
:param saliency_map_path: path to save the generated saliency map
:type saliency_map_path: str
:param saliency_mask_path: path to save the binarized saliency map
:type saliency_mask_path: str
:returns: bool -- whether the saliency detection was succesful
"""
from .. import get_data
saliency_lib = os.getenv('SALIENCY_SO_PATH')
if saliency_lib is None:
saliency_lib = get_data("object_extraction/saliency.so")
if not os.path.isfile(saliency_lib):
raise IOError("invalid file path for saliency.so: %s" % saliency_lib)
SaliencyDetector = cdll.LoadLibrary(saliency_lib)
SaliencyDetector.saliency.restype = c_bool
cimage_path = c_char_p(image_path)
csaliency_map_path = c_char_p(saliency_map_path)
csaliency_mask_path = c_char_p(saliency_mask_path)
return SaliencyDetector.saliency(cimage_path, csaliency_map_path,
csaliency_mask_path)
def _run_object_extraction(image_path):
"""Runs an object extraction algorithm on an image and returns
paths to the resulting full and binarized saliency maps.
:param image_path: path to the image file
:type image_path: str
:returns: tuple -- paths to the resulting images
"""
mktemp = lambda: tempfile.mkstemp(suffix=".jpg")[1]
temp1, temp2 = mktemp(), mktemp()
if not _saliency(image_path, temp1, temp2):
return None
return temp1, temp2
@file_cache
def extract_object(image_path):
"""Runs an object extraction algorithm on an image and returns
the resulting full and binarized saliency maps as numpy matrices.
:param image_path: path to the image file
:type image_path: str
:returns: tuple -- the resulting saliency maps
"""
if not (isinstance(image_path, str) or
isinstance(image_path, str)):
raise TypeError("image_path should be a string, not %s" %
image_path)
full, binarized = _run_object_extraction(image_path)
data = (cv2.imread(full, cv2.CV_LOAD_IMAGE_GRAYSCALE),
cv2.imread(binarized, cv2.CV_LOAD_IMAGE_GRAYSCALE))
__remove_file(full)
__remove_file(binarized)
return data
| [
37811,
37906,
29908,
329,
262,
2134,
22236,
11862,
1043,
287,
198,
5450,
1378,
12567,
13,
785,
14,
562,
321,
578,
14,
34,
76,
10669,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
20218,
7753,
198,
6738,
269,
19199,
1330,
269,
12736,
... | 2.576923 | 1,014 |
"""Util module."""
import six
from django.db import models
from django.db.models.fields.related import ForeignObjectRel as RelatedObject
from django.forms.forms import pretty_name
from django.utils.encoding import force_str
from django.utils.encoding import force_text
def label_for_field(name, model, return_attr=False):
"""Returns a sensible label for a field name.
The name can be a callable, property (but not created with @property decorator)
or the name of an object's attribute, as well as a genuine fields.
If return_attr is True, the resolved attribute (which could be a callable)
is also returned. This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = model._meta.get_field_by_name(name)[0]
if isinstance(field, RelatedObject):
label = field.opts.verbose_name
else:
label = field.verbose_name
except models.FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
attr = six.text_type
elif name == "__str__":
label = force_str(model._meta.verbose_name)
attr = bytes
else:
if callable(name):
attr = name
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (
name,
model._meta.object_name,
)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif (
isinstance(attr, property)
and hasattr(attr, "fget")
and hasattr(attr.fget, "short_description")
):
label = attr.fget.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
| [
37811,
18274,
346,
8265,
526,
15931,
198,
11748,
2237,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
25747,
13,
5363,
1330,
8708,
10267,
6892,
355,
19809,
10267,
198,
6738,
42625,
14208,
... | 2.093985 | 1,064 |
if __name__ == '__main__':
from sys import argv
cal = Calculator(int(argv[1]), int(argv[2]))
print(cal.square())
| [
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
422,
25064,
1330,
1822,
85,
628,
220,
220,
220,
2386,
796,
43597,
7,
600,
7,
853,
85,
58,
16,
46570,
493,
7,
853,
85,
58,
17,
60,
4008,
198,... | 2.345455 | 55 |
# Time: O(n)
# Space: O(1)
| [
2,
3862,
25,
220,
440,
7,
77,
8,
198,
2,
4687,
25,
440,
7,
16,
8,
198
] | 1.647059 | 17 |
from apscheduler.schedulers.background import BackgroundScheduler
from pyfcm import FCMNotification
from flask import Flask, render_template
from bs4 import BeautifulSoup
import requests
from urllib import parse
import pyrebase
from config import config
from keys import api_key
from collections import OrderedDict
notices = []
push_service = FCMNotification(api_key=api_key)
print("Initialised Push Service")
firebase = pyrebase.initialize_app(config)
db = firebase.database()
print("Initialised Pyrebase")
sched = BackgroundScheduler(daemon=True)
sched.add_job(sensor, 'interval', seconds=20)
sched.start()
print("Started Scheduler")
app = Flask(__name__)
@app.route("/")
if __name__ == "__main__":
app.run(use_reloader=False)
| [
6738,
257,
862,
1740,
18173,
13,
1416,
704,
377,
364,
13,
25249,
1330,
25353,
50,
1740,
18173,
198,
6738,
12972,
69,
11215,
1330,
10029,
44,
3673,
2649,
198,
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
198,
6738,
275,
82,
19,
1330,... | 3.069672 | 244 |
import sys
from .module import register_module, ModuleImporter
if sys.version_info >= (3, 0):
sys.meta_path.append(ModuleImporter(__name__))
register_module(__name__)
| [
11748,
25064,
198,
198,
6738,
764,
21412,
1330,
7881,
62,
21412,
11,
19937,
3546,
26634,
628,
198,
361,
25064,
13,
9641,
62,
10951,
18189,
357,
18,
11,
657,
2599,
198,
220,
220,
220,
25064,
13,
28961,
62,
6978,
13,
33295,
7,
26796,
... | 2.916667 | 60 |
import numpy as np
from SharedProcessors.const import RAW_DATA_PATH, TRIAL_NAMES, MOCAP_SAMPLE_RATE
import matplotlib.pyplot as plt
import scipy.interpolate as interpo
from ViconReader import ViconReader
| [
11748,
299,
32152,
355,
45941,
198,
6738,
39403,
18709,
669,
13,
9979,
1330,
33782,
62,
26947,
62,
34219,
11,
37679,
1847,
62,
45,
29559,
11,
337,
4503,
2969,
62,
49302,
16437,
62,
49,
6158,
198,
11748,
2603,
29487,
8019,
13,
9078,
29... | 3.059701 | 67 |
"""
A word replacer to make a dumb sentence sound verysmart.
Leonardo Martinho
2017
"""
import API
import argparse
import re
from nltk import pos_tag
import sys
from pathlib2 import Path
def sanitize_for_url(word):
"""
Sanitizing of a word with a regex search string - everything that is not alphanumeric, a space or a colon is
substituted by an empty set
Args:
word (str): Word to sanitize
Returns:
str: Sanitized string
"""
return re.sub('[^a-zA-Z\s:]', '', word)
def remove_escapes(word):
"""
Removes escape backslashes that are created by various security mechanisms
Args:
word (str): Word to sanitize
Returns:
Sanitized string
"""
return re.sub(r'\\', '', word)
def fetch_words(url):
"""
Retrieving a json result set from the API module
An API object is instantiated and a json result set is returned by calling
the instance specific API.object.getr() function
Args:
url (str): URL string to instantiate the API object
Returns:
dict: JSON data as python dictionary
"""
api = API.API(url, '')
return api.getr()
def find_max_len(text):
"""
A linear search of the maximum length of a particular string
Every string in the array is looked up by its length and consequently compared
The string with the biggest length is then returned
Args:
text (arr[str]): array of strings that are compared
Returns:
str: Word with the biggest length
"""
return max(text, key=len)
def find_new_word(words, word_type):
"""
Checks if the word type is found in the words dict. If so the word with the biggest length is chosen
and returned
Args:
words (dict): A json result set as dict
word_type (str): The specific word type - this is actually needed as the key in the json result set dict
Raises:
API.requests.exceptions.HTTPError: If the key is not found in the dict (and therefore the word type is
non-existent) - a requests.exceptions.HTTPError is raised for easier logic in the run function
Returns:
str: New word
"""
word_categories = ["sim", "syn"]
word_list = words.get(word_type, "")
for tag in (x for x in word_categories if x in word_list):
word_list = word_list.get(tag)
new_word = find_max_len(word_list)
return new_word
raise API.requests.exceptions.HTTPError
def run(text):
"""
Main function that brings everything together - the first part of the URL is used as a parameter for the instantiation
of the API object. The string (that may be multiple sentences) is then replaced by calling other functions.
First the string is assigned to an array of strings calling splice_words(str). Then a tuple is assigned by
calling NLTK.pos_tag(arr[str]). A loop to the length of the text array is then started - checking if the particular word
is a word in the standard list - check_standard(tuple[str, str]). If not, the sanitization method clean_word[str] is called
and the URL build. The new word is then appended to the result array. If an exception was raised, all operations are skipped
and the unchanged word is added to the result array.
If the API comes to a halt (due to processing limits of the API key), an empty file is set to ensure stopping
and not spamming the server for the time being.
Args:
baseurl (str): URL to instantiate the API object
text (str): String to replace the words from
Returns:
Result string if no ValueError has been found, error message if otherwise
"""
baseurl = "http://words.bighugelabs.com/api/2/0311fc4c609183416bf8bae6780fb886/{}/json"
if len(text) <= 500:
try:
compare = pos_tag(text.split())
result = []
for word, tag in compare:
if check_standard_word(tag):
result.append(word)
else:
url_word = sanitize_for_url(word)
if not url_word: continue
url = baseurl.format(url_word)
try:
new_word = find_new_word(fetch_words(url), determine_word_type(tag))
match = re.match('[\.,\-\?\!\(\)]', word[-1])
if match:
result.append(new_word + match.group()) # only copies over the last character plus the new word
else:
result.append(new_word)
except API.requests.exceptions.HTTPError:
result.append(word) # old, unchanged word
continue
return remove_escapes(' '.join(result))
except ValueError:
return "Try again later. API processing limit reached."
else: return "The text you are typing is too long to process. Sorry."
def check_standard_word(tag):
"""
Checks if the values from the tag are found in the exclude array
Args:
tag (str): Tag from nltk.pos_tag(arr[str]) function
Returns:
bool: If found in the array return True, False if otherwise
"""
exclude = ["MD", "DT", "PRP", "$PRP", "IN", "CC", "CD", "EX", "NNP", "NNPS", "POS", "PDT", "RP", "WDT", "SYM", "TO"]
return tag in exclude
def omitted_words(words):
"""
Checks if new selected word is a composition of multiple words which might include
nonsensical grammatical words which are substituted by an empty set. First regex check is to ensure the new word
actually has spaces
Args:
words(str): Sequence of words with spaces
Returns:
str: The word either unchanged or with the substitution of the grammatical words
"""
if re.match('\w+\s', words):
compare = pos_tag(splice_words(clean_word(words)))
for word, tag in compare:
if check_standard(tag):
print word
words = words.replace(word, '')
return words
def determine_word_type(tag):
"""
Determines the word type by checking the tag returned by the nltk.pos_tag(arr[str]) function.
Each word in the array is marked with a special tag which can be used to find the correct type of a word.
A selection is given in the dictionaries.
Args:
tag : String tag from the nltk.pos_tag(str) function that classified the particular word with a tag
Returns:
str: Word type as a string
"""
types = {
"noun" : {"NN", "NNS", "NNPS", "FW"},
"adjective" : {"JJ", "JJR", "JJS"},
"verb" : {"VB", "VBD", "VBG", "VBN", "VBP", "VBZ"},
"adverb" : {"RB", "RBR"}
}
for type_, set_ in types.iteritems():
if tag in set_:
return type_
return "noun"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='A small word replacer')
parser.add_argument(metavar='text', dest='text', action='store')
args = parser.parse_args()
print run(args.text)
| [
37811,
198,
32,
1573,
2186,
11736,
284,
787,
257,
13526,
6827,
2128,
845,
27004,
13,
198,
36185,
13109,
5780,
8873,
198,
5539,
198,
37811,
198,
11748,
7824,
198,
11748,
1822,
29572,
198,
11748,
302,
198,
6738,
299,
2528,
74,
1330,
1426,... | 2.99283 | 2,092 |
import pyHook,pythoncom,sys,logging
import time,datetime
import win32console
import win32gui
import os
window = win32console.GetConsoleWindow()
win32gui.ShowWindow(window,0)
root_dir = os.path.split(os.path.realpath(__file__))[0]
log_file=os.path.join("C:\\Users\\Solid\\Desktop\\key","log_file.txt")
buffer=""
pause_period=2
last_press=datetime.datetime.now()
pause_delta=datetime.timedelta(seconds=pause_period)
wait_seconds = 10
#last_press=time.datetime.now()
timeout = time.time() + wait_seconds
print("PROGRAM STARTED")
log("PROGRAM STARTED")
hm = pyHook.HookManager()
hm.KeyDown=keypress
hm.HookKeyboard()
pythoncom.PumpMessages()
| [
11748,
12972,
39,
566,
11,
29412,
785,
11,
17597,
11,
6404,
2667,
198,
11748,
640,
11,
19608,
8079,
198,
11748,
1592,
2624,
41947,
198,
11748,
1592,
2624,
48317,
198,
11748,
28686,
198,
198,
17497,
796,
1592,
2624,
41947,
13,
3855,
4758... | 2.619048 | 252 |
import nltk
import string
import requests
import json
from io import StringIO
from html.parser import HTMLParser
import os
import time
from sys import platform
path = ""
if platform == "linux" or platform == "linux2":
path = os.path.dirname(os.path.realpath(__file__)).replace("test","data/nltk")
elif platform == "darwin":
path = os.path.dirname(os.path.realpath(__file__)).replace("test","data/nltk")
elif platform == "win32":
path = os.path.dirname(os.path.realpath(__file__)).replace("test","data\\nltk")
nltk.data.path.append(path)
if __name__ == "__main__":
main() | [
11748,
299,
2528,
74,
201,
198,
11748,
4731,
201,
198,
11748,
7007,
201,
198,
11748,
33918,
201,
198,
6738,
33245,
1330,
10903,
9399,
201,
198,
6738,
27711,
13,
48610,
1330,
11532,
46677,
201,
198,
11748,
28686,
201,
198,
11748,
640,
20... | 2.662281 | 228 |
from backtester.timeRule.time_rule import TimeRule
from datetime import datetime, timedelta
import pandas as pd
from pandas.tseries.offsets import CustomBusinessHour
from pandas.tseries.offsets import CustomBusinessDay
| [
6738,
736,
4879,
353,
13,
2435,
31929,
13,
2435,
62,
25135,
1330,
3862,
31929,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
19798,
292,
13,
912,
10640,
13,
8210,
1039,
133... | 3.65 | 60 |
from selenium import webdriver
from time import sleep
driver = None
driver = webdriver.Firefox()
SENDER = '<SENDER NAME>'
GMAIL_USER = '<Your Gmail ID>'
GMAIL_PASSWORD = '<Your Gmail ID>'
MESSAGE = 'I will get back to you soon. \n Thanks'
MESSAGE = 'I will get back to you soon. \n Thanks'
if __name__ == '__main__':
r_log = login_google()
if r_log:
print('Yay')
access_gmail()
else:
print('Boo!!!')
if driver is not None:
driver.quit()
print('Done')
| [
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
640,
1330,
3993,
198,
198,
26230,
796,
6045,
198,
26230,
796,
3992,
26230,
13,
13543,
12792,
3419,
198,
198,
50,
10619,
1137,
796,
705,
27,
50,
10619,
1137,
36751,
29,
6,
198,
38,
... | 2.354839 | 217 |
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin, current_user
from fuck_papers.extensions import db
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
266,
9587,
2736,
1018,
13,
12961,
1330,
7716,
62,
28712,
62,
17831,
11,
2198,
62,
28712,
62,
17831,
198,
6738,
42903,
62,
38235,
1330,
11787,
35608,
259,
11,
1459,
62,
7220,
198,
198... | 3.62963 | 54 |
from storyscript.compiler.semantics.functions.Mutation import Mutation
from storyscript.compiler.semantics.types.GenericTypes import \
GenericType, ListGenericType, MapGenericType, TypeSymbol
from storyscript.compiler.semantics.types.Types import AnyType, BaseType, \
BooleanType, FloatType, IntType, NoneType, RegExpType, StringType, TimeType
def parse_type_inner(text, start_tok='[', end_tok=']'):
"""
Returns the inner type of a generic type.
Example: List[any] => any
:param text: type text to parse
:param start_tok: type opening token
:param end_tok: type end token
:return: Inner type
"""
level = 0
start = None
for i, c in enumerate(text):
if c == start_tok:
if level == 0:
start = i + 1
level = level + 1
elif c == end_tok:
assert level > 0, 'No start ['
level = level - 1
if level == 0:
return text[start:i]
assert 0, 'No ] found'
def parse_type(type_):
"""
Parses a type string and returns its parsed type which can be a:
- BaseType (e.g. `IntType`)
- TypeSymbol (e.g. `TypeSymbol(A)`)
- GenericType (e.g. `ListGenericType(TypeSymbol(A))`)
"""
assert len(type_) > 0
type_ = type_.strip()
if type_ == 'boolean':
return BooleanType.instance()
if type_ == 'int':
return IntType.instance()
if type_ == 'float':
return FloatType.instance()
if type_ == 'string':
return StringType.instance()
if type_ == 'time':
return TimeType.instance()
if type_ == 'none':
return NoneType.instance()
if type_ == 'regexp':
return RegExpType.instance()
if type_ == 'any':
return AnyType.instance()
if '[' in type_:
types = []
for t in parse_type_inner(type_).split(','):
t2 = parse_type(t)
types.append(t2)
if type_.startswith('List['):
return ListGenericType(types)
else:
assert type_.startswith('Map[')
return MapGenericType(types)
assert ']' not in type_
return TypeSymbol(type_)
def split_type_arguments(text, start_tok='[', end_tok=']'):
"""
Splits a mutation type string into its arguments.
:return: Array of all arguments.
"""
level = 0
start = 0
in_argument = False
for i, c in enumerate(text):
if c == start_tok:
level = level + 1
elif c == end_tok:
assert level > 0, 'No start ['
level = level - 1
elif c == ':':
# ignore whitespace after an argument starts
in_argument = True
elif c == ' ':
if level == 0 and not in_argument:
if start == i:
# skip over multiple whitespaces
start = i + 1
else:
yield text[start:i]
start = i + 1
else:
in_argument = False
assert level == 0
t = text[start:]
yield t
def get_symbols(t):
"""
Returns the symbols of a type instance or an empty list.
"""
if isinstance(t, GenericType):
ts = []
for s in t.symbols:
if isinstance(s, TypeSymbol):
ts.append(s)
elif isinstance(s, GenericType):
ts.append(*s.symbols)
else:
# no resolving required for base types
assert isinstance(s, BaseType)
return ts
else:
return []
def check_type_symbols(t, symbols):
"""
Ensures that the given type only uses a set of known symbols.
"""
t_symbols = get_symbols(t)
for s in t_symbols:
if not isinstance(s, BaseType):
assert s in symbols, f'unknown symbol {s} used'
def mutation_builder(type_text):
"""
Build a mutation from a plain mutation header typing.
Example:
Map[A,B] contains: A -> B
:return: the parsed Mutation
"""
in_types, out_types = [l.strip() for l in type_text.split('->')]
args = [*split_type_arguments(in_types)]
assert len(args) >= 2, f'input type and name required for {in_types}'
main_type, name, *args = args
# in:
in_type = parse_type(main_type)
symbols = get_symbols(in_type)
# arguments:
arguments = {}
for arg in args:
arg_name, arg_type = [a.strip() for a in arg.split(':')]
t = parse_type(arg_type)
# check that only symbols from the in_type are found
check_type_symbols(t, symbols)
arguments[arg_name] = t
# out:
out_type = parse_type(out_types)
# check that only symbols from the in_type are found
check_type_symbols(out_type, symbols)
return Mutation(ti=in_type, name=name, args=arguments, output=out_type)
| [
6738,
1621,
12048,
13,
5589,
5329,
13,
43616,
29320,
13,
12543,
2733,
13,
44,
7094,
1330,
337,
7094,
198,
6738,
1621,
12048,
13,
5589,
5329,
13,
43616,
29320,
13,
19199,
13,
46189,
31431,
1330,
3467,
198,
220,
220,
220,
42044,
6030,
1... | 2.205709 | 2,207 |
import json, requests
content = """**My source code:**
```
import json, requests
quote_response = requests.get("https://quotes.schollz.com/subject/friend.json")
quote = quote_response.json()[0]
if len(quote['Name']) == 0:
quote['Name'] = 'Unknown'
content = "<p>“{q[Text]}”</p><p>- <em>{q[Name]}</em></p>".format(q=quote)
payload={'content':content,'purpose':'share-text','to':['public']}
r = requests.post("http://localhost:8003/letter", data=json.dumps(payload))
print(r.json())
```
"""
payload={'content':content,'purpose':'share-text','to':['public']}
r = requests.post("http://localhost:8003/letter", data=json.dumps(payload))
quote_response = requests.get("https://quotes.schollz.com/subject/friend.json")
quote = quote_response.json()[0]
if len(quote['Name']) == 0:
quote['Name'] = 'Unknown'
content = "<p>“{q[Text]}”</p><p>- <em>{q[Name]}</em></p>".format(q=quote)
payload={'content':content,'purpose':'share-text','to':['public']}
r = requests.post("http://localhost:8003/letter", data=json.dumps(payload))
print(r.json())
| [
11748,
33918,
11,
7007,
198,
198,
11299,
796,
37227,
1174,
3666,
2723,
2438,
25,
1174,
198,
198,
15506,
63,
198,
11748,
33918,
11,
7007,
198,
22708,
62,
26209,
796,
7007,
13,
1136,
7203,
5450,
1378,
421,
6421,
13,
20601,
692,
89,
13,
... | 2.657289 | 391 |
from django.db import models
from django.contrib.auth.models import User
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198
] | 3.47619 | 21 |
# -*- coding: utf-8 -*-
"""
radish
~~~~~~
Behavior Driven Development tool for Python - the root from red to green
Copyright: MIT, Timo Furrer <tuxtimo@gmail.com>
"""
from radish import step
@step("Gegeben sei etwas")
def have_a_step(step):
"Given I have a step"
pass
@step("Wenn ich etwas mache")
def do_something(step):
"When I do something"
pass
@step("Dann erwarte ich etwas")
def expect_something(step):
"Then I expect something"
pass
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
220,
220,
220,
2511,
680,
198,
220,
220,
220,
220,
8728,
4907,
628,
220,
220,
220,
20181,
5809,
574,
7712,
2891,
329,
11361,
532,
262,
6808,
422,
2266... | 2.557292 | 192 |
from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# Time control starts
import time as timer
print(timer.ctime())
# Measure process time
t0p = timer.clock()
# Measure wall time
t0w = timer.time()
# Import kratos core and applications
import KratosMultiphysics
#### PARSING THE PARAMETERS ####
# Import input
parameter_file = open("material_parameters.json",'r')
ProjectParameters = KratosMultiphysics.Parameters( parameter_file.read())
# constitutive process
law_test_module = __import__(ProjectParameters["material_model"]["python_module"].GetString())
material_process = law_test_module.CreateProcess(ProjectParameters["material_model"]["parameters"])
clock_time = StartTimeMeasuring()
material_process.ExecuteInitialize()
clock_time = StartTimeMeasuring()
# time testing (loop for massive calculation):
calls = 0
for i in range(0,calls):
material_process.Execute()
material_process.ExecuteFinalize()
StopTimeMeasuring(clock_time,"integration time",True)
#### END SOLUTION ####
# Measure process time
tfp = timer.clock()
# Measure wall time
tfw = timer.time()
print("::[Material Modeling]:: [Elapsed Time = %.2f" % (tfp - t0p),"seconds] (%.2f" % (tfw - t0w),"seconds of cpu/s time)")
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
4112,
62,
11748,
11,
7297,
1303,
49123,
509,
10366,
418,
15205,
13323,
23154,
19528,
11670,
351,
21015,
362,
13,
21,
290,
362,
13,
22,
198,
198,
2,
3862,
1630,
4940,
198,
11748,
640,
... | 3.19802 | 404 |
import warnings
from functools import partial
import torch
from torch import nn
from .functional import cross_entropy_with_smoothing, cross_entropy_with_softlabels
| [
11748,
14601,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
198,
6738,
764,
45124,
1330,
3272,
62,
298,
28338,
62,
4480,
62,
5796,
1025,
722,
11,
3272,
62,
298,
28338,
62,
4480... | 3.520833 | 48 |
#!/usr/bin/env python
# Basic TCProtobufClient usage
import sys
from tcpb import TCProtobufClient
# Water system
atoms = ["O", "H", "H"]
geom = [
0.00000,
0.00000,
-0.06852,
0.00000,
-0.79069,
0.54370,
0.00000,
0.79069,
0.54370,
]
# Default geom is bohr, but this in angstrom
if len(sys.argv) != 3:
print("Usage: {} host port".format(sys.argv[0]))
exit(1)
# Set up client for h2o job
TC = TCProtobufClient(host=sys.argv[1], port=int(sys.argv[2]))
tc_opts = {
"atoms": atoms,
"charge": 0,
"spinmult": 1,
"closed_shell": True,
"restricted": True,
"method": "pbe0",
"basis": "6-31g",
}
TC.connect()
# Check if the server is available
avail = TC.is_available()
print("TCPB Server available: {}".format(avail))
# Energy calculation
# energy = TC.compute_energy(geom, "angstrom", **tc_opts) # Default is BOHR
# print("H2O Energy: {}".format(energy))
# Gradient calculation
# energy, gradient = TC.compute_gradient(geom, "angstrom", **tc_opts)
result = TC.compute_job_sync("gradient", geom, "angstrom", **tc_opts)
print(result)
print("H2O Gradient:\n{}".format(result["gradient"]))
# # Forces calculation (just like gradient call with -1*gradient)
# energy, forces = TC.compute_forces(geom, "angstrom", **tc_opts)
# print("H2O Forces:\n{}".format(forces))
# # General calculation
# results = TC.compute_job_sync("gradient", geom, "angstrom", **tc_opts)
# print("H2O Results:\n{}".format(results))
# # Can get information from last calculation
# print("Last H2O Energy: {}".format(TC.prev_results["energy"]))
TC.disconnect()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
14392,
17283,
19703,
672,
3046,
11792,
8748,
198,
11748,
25064,
198,
198,
6738,
48265,
65,
1330,
17283,
19703,
672,
3046,
11792,
198,
198,
2,
5638,
1080,
198,
265,
3150,
796,
14631,
... | 2.462366 | 651 |
import requests
from n26 import config
url = 'https://api.tech26.de'
# Api class can be imported as a library in order to use it within applications
# constructor accepting None to maintain backward compatibility
# TODO: this method will check if token is valid, if not it will run get_token
# IDEA: @get_token decorator
| [
11748,
7007,
198,
6738,
299,
2075,
1330,
4566,
198,
198,
6371,
796,
705,
5450,
1378,
15042,
13,
13670,
2075,
13,
2934,
6,
628,
198,
2,
5949,
72,
1398,
460,
307,
17392,
355,
257,
5888,
287,
1502,
284,
779,
340,
1626,
5479,
198,
220,
... | 3.634409 | 93 |
import requests
| [
11748,
7007,
628
] | 5.666667 | 3 |
# from watchmen.common.storage.engine.storage_engine import get_client
# from watchmen.topic.topic_relationship import TopicRelationship
#
# db = get_client()
#
# topic_relation_collections = db.get_collection('topic_relation_collections')
#
#
# def save_topic_relationship(topic_relation: TopicRelationship):
# result = load_relationship_by_source_id_and_target_id(topic_relation.sourceTopicId, topic_relation.targetTopicId)
# # print(result)
# if result:
# topic_relation_collections.update_one({"relationId": result["relationId"]}, {"$set": topic_relation.dict()})
# else:
# topic_relation_collections.insert(topic_relation.dict())
# return TopicRelationship.parse_obj(topic_relation)
#
#
# def load_relationship_by_source_id_and_target_id(source_topic_id, target_topic_id):
# result = topic_relation_collections.find_one({"sourceTopicId": source_topic_id, "targetTopicId": target_topic_id})
# return result
#
#
# def load_relationships_by_topic_ids(topic_ids):
# result = topic_relation_collections.find({"sourceTopicId": {"$in": topic_ids}})
# # print("result",result)
# return list(result)
#
#
# def load_relationships_by_topic_ids_target(topic_ids):
# result = topic_relation_collections.find({"targetTopicId": {"$in": topic_ids}})
# # print("result",result)
# return list(result)
| [
2,
422,
2342,
3653,
13,
11321,
13,
35350,
13,
18392,
13,
35350,
62,
18392,
1330,
651,
62,
16366,
198,
2,
422,
2342,
3653,
13,
26652,
13,
26652,
62,
39468,
1056,
1330,
47373,
47117,
1056,
198,
2,
198,
2,
20613,
796,
651,
62,
16366,
... | 2.784394 | 487 |
import os
import re
from importlib import import_module
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, TextIO, Type, Union
import yaml
from snowshu.logger import Logger
if TYPE_CHECKING:
from snowshu.adapters.base_sql_adapter import BaseSQLAdapter
from snowshu.adapters.source_adapters.base_source_adapter import BaseSourceAdapter
from snowshu.adapters.target_adapters.base_target_adapter import BaseTargetAdapter
logger = Logger().logger
def correct_case(val: str, upper: bool = True):
""" Returns the case corrected value based on general sql identifier rules
If the value is entirely one case, made up of only word characters
and doesn't begin with a number, we can conform the case
ARGS:
- val: string that is the value to correct case for
- upper: flag to determine the case to conform to. Defaults to True (uppercase)
RETURNS:
the case corrected value
"""
if any({val.isupper(), val.islower()}) and \
re.fullmatch(r'^\w*$', val) and \
not re.fullmatch(r'^[0-9].*', val):
val = val.upper() if upper else val.lower()
return val
def case_insensitive_dict_value(dictionary, caseless_key) -> Any:
"""finds a key in a dict without case sensitivity, returns value.
Searches for the FIRST match (insensitive dict keys can have multiple matches) and returns that value.
ARGS:
- dictionary: The dictionary to traverse.
- caseless_key: The key case-insensitive search the dictionary for.
RETURNS:
the value of insensitive key. Raises KeyError if not found.
"""
lowered = {key.lower(): key for key in dictionary.keys()}
return dictionary[lowered[caseless_key.lower()]]
def key_for_value(dictionary, value):
"""finds the key for a given value in a dict."""
return list(dictionary.keys())[list(dictionary.values()).index(value)]
def fetch_adapter(name: str,
section: str) -> Union[Type['BaseSourceAdapter'], Type['BaseTargetAdapter'], Type['BaseSQLAdapter']]:
"""Locates and returns the specified adapter.
Args:
name: The name of the adapter to look up.
section: One of ('source','target','storage').
Returns:
The adapter if found, raises :class:`AdapterNotFound <snowshu.exceptions.AdapterNotFound>`.
"""
try:
return getattr(import_module(f'snowshu.adapters.{section}_adapters'),
name.capitalize() + 'Adapter')
except AttributeError as err:
logger.critical('No %s adapter found by the name of %s', section, name)
raise err
| [
11748,
28686,
198,
11748,
302,
198,
6738,
1330,
8019,
1330,
1330,
62,
21412,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
41876,
62,
50084,
2751,
11,
4377,
11,
32233,
11,
8255,
9399,
11,
5994,
11,
4479,
198,
198,
11748,
... | 2.775 | 960 |
import os
import nose
from qubits import command_line as cl
# SETUP AND TEARDOWN FIXTURE FUNCTIONS FOR THE ENTIRE MODULE
def setUpModule():
"set up test fixtures"
moduleDirectory = os.path.dirname(__file__) + "/../tests"
# SETUP PATHS TO COMMONG DIRECTORIES FOR TEST DATA
global pathToOutputDir, pathToInputDir
pathToInputDir = moduleDirectory + "/input/"
pathToOutputDir = moduleDirectory + "/output/"
# SETUP THE TEST LOG FILE
global testlog
testlog = open(pathToOutputDir + "tests.log", 'w')
return None
def tearDownModule():
"tear down test fixtures"
# CLOSE THE TEST LOG FILE
testlog.close()
return None
| [
11748,
28686,
198,
11748,
9686,
198,
6738,
627,
9895,
1330,
3141,
62,
1370,
355,
537,
198,
198,
2,
25823,
8577,
5357,
13368,
9795,
14165,
44855,
51,
11335,
29397,
4177,
11053,
7473,
3336,
47353,
41736,
33893,
628,
198,
4299,
900,
4933,
... | 2.947598 | 229 |
"""Common helpers for gree test cases."""
from unittest.mock import AsyncMock, Mock
def build_device_info_mock(
name="fake-device-1", ipAddress="1.1.1.1", mac="aabbcc112233"
):
"""Build mock device info structure."""
mock = Mock(ip=ipAddress, port=7000, mac=mac)
mock.name = name
return mock
def build_device_mock(name="fake-device-1", ipAddress="1.1.1.1", mac="aabbcc112233"):
"""Build mock device object."""
mock = Mock(
device_info=build_device_info_mock(name, ipAddress, mac),
name=name,
bind=AsyncMock(),
update_state=AsyncMock(),
push_state_update=AsyncMock(),
temperature_units=0,
mode=0,
fan_speed=0,
horizontal_swing=0,
vertical_swing=0,
target_temperature=25,
power=False,
sleep=False,
quiet=False,
turbo=False,
power_save=False,
steady_heat=False,
)
return mock
| [
37811,
17227,
49385,
329,
308,
631,
1332,
2663,
526,
15931,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
1081,
13361,
44,
735,
11,
44123,
628,
198,
4299,
1382,
62,
25202,
62,
10951,
62,
76,
735,
7,
198,
220,
220,
220,
1438,
2625,
... | 2.181609 | 435 |
import re
PLAYER_NAME_AND_ID_REGEX = re.compile(r"[\s\d]\d\:[0123456]\d\:[0123456]\d\sPlayer\s(?P<player_name>(?!headlessclient).*)\s(connected)\s\(id\=(?P<steam_id>\d+)\)", re.IGNORECASE)
CONNECT_REGEX = re.compile(r"^\s?(?P<hour>[0-24]?\d)\:(?P<minute>[0123456]\d)\:(?P<second>[0123456]\d)\sPlayer\s(?P<player_name>.*)\s(connected)\s\(id\=(?P<steam_id>.*)\)\.$", re.IGNORECASE)
| [
11748,
302,
628,
198,
31519,
1137,
62,
20608,
62,
6981,
62,
2389,
62,
31553,
6369,
796,
302,
13,
5589,
576,
7,
81,
17912,
59,
82,
59,
67,
60,
59,
67,
59,
33250,
486,
1954,
29228,
60,
59,
67,
59,
33250,
486,
1954,
29228,
60,
59,
... | 1.924623 | 199 |
from .interactive_guess import InteractiveGuessSession
from .util import load, save
| [
6738,
764,
3849,
5275,
62,
5162,
408,
1330,
21365,
8205,
408,
36044,
198,
6738,
764,
22602,
1330,
3440,
11,
3613,
198
] | 4 | 21 |
#Python program to display calender for particular month
import calendar
if __name__=='__main__':
main()
| [
2,
37906,
1430,
284,
3359,
2386,
2194,
329,
1948,
1227,
198,
11748,
11845,
198,
361,
11593,
3672,
834,
855,
6,
834,
12417,
834,
10354,
198,
197,
12417,
3419,
198
] | 3.655172 | 29 |
"""TODO(jietan): DO NOT SUBMIT without one-line documentation for train_ars.
blaze build -c opt //experimental/users/jietan/ARS:train_ars
blaze-bin/experimental/users/jietan/ARS/train_ars \
--logdir=/cns/ij-d/home/jietan/experiment/ARS/test1 \
--config_name=MINITAUR_GYM_CONFIG
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import ars
import config_ars
FLAGS = flags.FLAGS
flags.DEFINE_string('logdir', None, 'The directory to write the log file.')
flags.DEFINE_string('config_name', None, 'The name of the config dictionary')
if __name__ == '__main__':
flags.mark_flag_as_required('logdir')
flags.mark_flag_as_required('config_name')
app.run(main)
| [
37811,
51,
3727,
46,
7,
73,
1155,
272,
2599,
8410,
5626,
28932,
36393,
1231,
530,
12,
1370,
10314,
329,
4512,
62,
945,
13,
198,
198,
2436,
6201,
1382,
532,
66,
2172,
3373,
23100,
9134,
14,
18417,
14,
73,
1155,
272,
14,
27415,
25,
... | 2.918216 | 269 |
#!/usr/bin/env python3
'''
This file tests student written cachelab traces. It tests them by running
traces on csim-ref and comparing the hits/misses/evictions with the expected
hits/misses/evictions. It also checks if the trace is well formatted.
If run with the -f [tracefile] it only judges whether tracefile is well written
or not.
author: Jeremy Dropkin
'''
import subprocess
import re
import os
import argparse
# Trace files used for grading
path_to_traces = "traces/traces/"
traces = [
os.path.join(path_to_traces, "tr1.trace"),
os.path.join(path_to_traces, "tr2.trace"),
os.path.join(path_to_traces, "tr3.trace"),
]
# Format: [s, E, b]
params = [[3, 1, 4], [1, 3, 4], [2, 3, 4]]
max_ops = [5, 5, 10]
# Format: [hits, misses, evictions]
expected = [[2, None, 1], [2, 2, None], [5, 4, 1]]
maxPoints = [3, 3, 4]
def is_valid_trace(trace):
"""Verify that the trace file being run is well written.
Each line of the trace must be in the format "(L|S) addr,len"
"""
try:
with open(trace) as f:
trace_data = f.read()
except:
print("Could not open {}".format(trace))
return False
# Check for empty file
if not trace_data:
print("{} is empty, trace contents expected.".format(trace))
return False
# Check each line in the file is a valid instruction
instructions = trace_data.rstrip().splitlines()
for i, instr in enumerate(instructions, start=1):
# Note: extra characters at the end of the line are probably ok
if re.match(r"[LS] [0-9a-fA-F]+,[0-9]+", instr) is None:
print("\"{}\" is not a well written instruction (line {})".format(instr, i))
return False
return True
def run_trace(trace, parameters, max_ops):
"""Return the hits, misses, and evictions from running a trace
This uses the csim-ref program to compute statistics for the given trace.
"""
# Read trace data
try:
with open(trace) as f:
trace_data = f.read()
except FileNotFoundError:
print("Could not find {}".format(trace))
return None
# Check operation count in trace is within the limit
if count_ops(trace_data) > max_ops:
print("{} contains too many instructions, use a maximum of {} for this trace".format(
trace, max_ops))
return None
# Check trace is valid
if not is_valid_trace(trace):
return None
# Remove previous simultaion results
try:
os.remove(".csim_results")
except FileNotFoundError:
pass
# Run csim-ref with the given trace
p = subprocess.run(
[
"./csim-ref",
"-s", str(parameters[0]),
"-E", str(parameters[1]),
"-b", str(parameters[2]),
"-t", trace,
],
stdout=subprocess.DEVNULL)
# Not run successfully
if p.returncode != 0:
print("Running {} on csim-ref failed!".format(trace))
return None
# Read results from simulation
try:
with open(".csim_results", "r") as f:
results = f.read()
except:
print("Could not find results file!")
return None
results = list(map(int, results.split()))
# Return hits, misses, evictions
# Only care about the first 3 values recorded
return (results[0], results[1], results[2])
def cmp_expected(real, exp):
"""Compares a value from a trace with an expected value.
If expected is -1, the result doesn't matter
"""
for real_val, exp_val in zip(real, exp):
if exp_val is not None and real_val != exp_val:
return False
return True
def fmt_expected_val(exp_val):
"""Formats an expected trace value as a string."""
if exp_val is None:
return 'any'
return '{}'.format(exp_val)
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
7061,
6,
198,
1212,
2393,
5254,
3710,
3194,
269,
9636,
397,
20675,
13,
632,
5254,
606,
416,
2491,
198,
2213,
2114,
319,
269,
14323,
12,
5420,
290,
14176,
262,
7127,
14,
3927... | 2.492931 | 1,556 |
"""Util for training on SIS/backselect subsets."""
import glob
import numpy as np
import os
import torch
import torchvision
import imagenet_backselect
class BackselectSubsetsImageNetDataset(torchvision.datasets.ImageNet):
"""Dataset of backselect pixel subsets on ImageNet.
Args:
sis_dir (str): Path to directory containing SIS.
imagenet_root: Path to original images dataset root dir.
frac_to_keep (float): Fraction of each images to retain (rest masked).
fully_masked_image (array): Array containing fully masked images.
Shape should be broadcastable to images).
transform (function): Transform to apply to final images.
target_transform (function): Transform to apply to targets.
"""
def _preprocess_sis_dir(self):
"""Initializes map from image filename to backselect file."""
image_to_backselect_file = {}
bs_file_format = os.path.join(self.sis_dir, '**', '*.npz')
for image_path in glob.glob(bs_file_format, recursive=True):
image_basename = _get_basename_without_extension(image_path)
image_to_backselect_file[image_basename] = image_path
self.image_to_backselect_file = image_to_backselect_file
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = super(
BackselectSubsetsImageNetDataset, self).__getitem__(index)
# Load backselect data and mask image.
img_path = self.samples[index][0]
img_basename = _get_basename_without_extension(img_path)
backselect_filepath = self.image_to_backselect_file[img_basename]
backselect_result = imagenet_backselect.BackselectResult.from_file(
backselect_filepath)
num_iters = backselect_result.mask_order.max()
mask_after_iter = int((1 - self.frac_to_keep) * num_iters)
# print(num_iters, mask_after_iter)
mask = backselect_result.mask_order >= mask_after_iter
mask = torch.from_numpy(mask)
# TODO: use self.fully_masked_image
img = img * mask
return img, target
| [
37811,
18274,
346,
329,
3047,
319,
311,
1797,
14,
1891,
19738,
6352,
1039,
526,
15931,
198,
198,
11748,
15095,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
28034,
198,
11748,
28034,
10178,
198,
198,
11748,
3590,
268,... | 2.468202 | 912 |
import sys
import os
import time
import math
from itertools import groupby
import operator as op
import numpy as np
from scipy.cluster.hierarchy import dendrogram
from scipy.cluster.hierarchy import linkage
from scipy.cluster.hierarchy import fcluster
import itertools
import inspect
import collections
# for testing drun dcall
import subprocess
illab =['/', '+', '?', '*', '&', '$', '\\']
gaps = ['.','-',' ']
# abnormal AA
abaa = ['.','-','*','X','Z','U','B','O','J']
# ambiguous AA
ambaa = ['X','Z','U','B','O']
aafull = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V', 'B', 'Z', 'X', '-',
'a', 'r', 'n', 'd', 'c', 'q', 'e', 'g', 'h', 'i', 'l', 'k', 'm', 'f', 'p', 's', 't', 'w', 'y', 'v', 'b', 'z', 'x', '.']
# AA alphabet for emboss format
aa201 = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I',
'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']
# AA alphabet sorted by singlet name
aas01 = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L',
'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
# AA alphabet sorted by type
aat01 = ['A','I','L','V','M','F','W','G','P','C','N','Q','S','T','Y','D','E','R','H','K']
aa2a={'ARG':'R','HIS':'H','LYS':'K','ASP':'D','GLU':'E',
'SER':'S','THR':'T','ASN':'N','GLN':'Q','CYS':'C',
'SEC':'U','GLY':'G','PRO':'P','ALA':'A','VAL':'V',
'ILE':'I','LEU':'L','MET':'M','PHE':'F','TYR':'Y',
'TRP':'W'}
a2aa={'R':'ARG','H':'HIS','K':'LYS','D':'ASP','E':'GLU',
'S':'SER','T':'THR','N':'ASN','Q':'GLN','C':'CYS',
'U':'SEC','G':'GLY','P':'PRO','A':'ALA','V':'VAL',
'I':'ILE','L':'LEU','M':'MET','F':'PHE','Y':'TYR',
'W':'TRP'}
a2t={'D':'C','E':'C','H':'C','K':'C','R':'C',
'P':'P','V':'H','M':'H','I':'H','L':'H',
'F':'H','W':'H','G':'G','A':'H','C':'C',
'T':'P','Q':'P','N':'P','Y':'P','S':'P'}
smaa1 = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V', 'B', 'Z', 'X', '*']
smaa2 = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']
aablast = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V', 'B', 'J', 'Z', 'X', '*']
aaemboss = smaa1
b62edge = np.array([
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., -2., -1., 0., -4.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., -1., 0., -1., -4.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 3., 0., -1., -4.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 4., 1., -1., -4.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., -3., -3., -2., -4.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 3., -1., -4.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 1., 4., -1., -4.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., -1., -2., -1., -4.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., -1., -4.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., -3., -3., -1., -4.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., -4., -3., -1., -4.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 1., -1., -4.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., -3., -1., -1., -4.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., -3., -3., -1., -4.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., -2., -1., -2., -4.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., -4.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., -1., -1., 0., -4.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., -4., -3., -2., -4.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., -3., -2., -1., -4.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., -3., -2., -1., -4.],
[-2., -1., 3., 4., -3., 0., 1., -1., 0., -3., -4., 0., -3.,
-3., -2., 0., -1., -4., -3., -3., 4., 1., -1., -4.],
[-1., 0., 0., 1., -3., 3., 4., -2., 0., -3., -3., 1., -1.,
-3., -1., 0., -1., -3., -2., -2., 1., 4., -1., -4.],
[ 0., -1., -1., -1., -2., -1., -1., -1., -1., -1., -1., -1., -1.,
-1., -2., 0., 0., -2., -1., -1., -1., -1., -1., -4.],
[-4., -4., -4., -4., -4., -4., -4., -4., -4., -4., -4., -4., -4.,
-4., -4., -4., -4., -4., -4., -4., -4., -4., -4., 1.]])
b80blast = np.array([
[ 5, -2, -2, -2, -1, -1, -1, 0, -2, -2, -2, -1, -1, -3, -1, 1, 0, -3, -2, 0, -2, -2, -1, -1, -6],
[-2, 6, -1, -2, -4, 1, -1, -3, 0, -3, -3, 2, -2, -4, -2, -1, -1, -4, -3, -3, -1, -3, 0, -1, -6],
[-2, -1, 6, 1, -3, 0, -1, -1, 0, -4, -4, 0, -3, -4, -3, 0, 0, -4, -3, -4, 5, -4, 0, -1, -6],
[-2, -2, 1, 6, -4, -1, 1, -2, -2, -4, -5, -1, -4, -4, -2, -1, -1, -6, -4, -4, 5, -5, 1, -1, -6],
[-1, -4, -3, -4, 9, -4, -5, -4, -4, -2, -2, -4, -2, -3, -4, -2, -1, -3, -3, -1, -4, -2, -4, -1, -6],
[-1, 1, 0, -1, -4, 6, 2, -2, 1, -3, -3, 1, 0, -4, -2, 0, -1, -3, -2, -3, 0, -3, 4, -1, -6],
[-1, -1, -1, 1, -5, 2, 6, -3, 0, -4, -4, 1, -2, -4, -2, 0, -1, -4, -3, -3, 1, -4, 5, -1, -6],
[ 0, -3, -1, -2, -4, -2, -3, 6, -3, -5, -4, -2, -4, -4, -3, -1, -2, -4, -4, -4, -1, -5, -3, -1, -6],
[-2, 0, 0, -2, -4, 1, 0, -3, 8, -4, -3, -1, -2, -2, -3, -1, -2, -3, 2, -4, -1, -4, 0, -1, -6],
[-2, -3, -4, -4, -2, -3, -4, -5, -4, 5, 1, -3, 1, -1, -4, -3, -1, -3, -2, 3, -4, 3, -4, -1, -6],
[-2, -3, -4, -5, -2, -3, -4, -4, -3, 1, 4, -3, 2, 0, -3, -3, -2, -2, -2, 1, -4, 3, -3, -1, -6],
[-1, 2, 0, -1, -4, 1, 1, -2, -1, -3, -3, 5, -2, -4, -1, -1, -1, -4, -3, -3, -1, -3, 1, -1, -6],
[-1, -2, -3, -4, -2, 0, -2, -4, -2, 1, 2, -2, 6, 0, -3, -2, -1, -2, -2, 1, -3, 2, -1, -1, -6],
[-3, -4, -4, -4, -3, -4, -4, -4, -2, -1, 0, -4, 0, 6, -4, -3, -2, 0, 3, -1, -4, 0, -4, -1, -6],
[-1, -2, -3, -2, -4, -2, -2, -3, -3, -4, -3, -1, -3, -4, 8, -1, -2, -5, -4, -3, -2, -4, -2, -1, -6],
[ 1, -1, 0, -1, -2, 0, 0, -1, -1, -3, -3, -1, -2, -3, -1, 5, 1, -4, -2, -2, 0, -3, 0, -1, -6],
[ 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -2, -1, -1, -2, -2, 1, 5, -4, -2, 0, -1, -1, -1, -1, -6],
[-3, -4, -4, -6, -3, -3, -4, -4, -3, -3, -2, -4, -2, 0, -5, -4, -4, 11, 2, -3, -5, -3, -3, -1, -6],
[-2, -3, -3, -4, -3, -2, -3, -4, 2, -2, -2, -3, -2, 3, -4, -2, -2, 2, 7, -2, -3, -2, -3, -1, -6],
[ 0, -3, -4, -4, -1, -3, -3, -4, -4, 3, 1, -3, 1, -1, -3, -2, 0, -3, -2, 4, -4, 2, -3, -1, -6],
[-2, -1, 5, 5, -4, 0, 1, -1, -1, -4, -4, -1, -3, -4, -2, 0, -1, -5, -3, -4, 5, -4, 0, -1, -6],
[-2, -3, -4, -5, -2, -3, -4, -5, -4, 3, 3, -3, 2, 0, -4, -3, -1, -3, -2, 2, -4, 3, -3, -1, -6],
[-1, 0, 0, 1, -4, 4, 5, -3, 0, -4, -3, 1, -1, -4, -2, 0, -1, -3, -3, -3, 0, -3, 5, -1, -6],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -6],
[-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, 1]])
b62blast = np.array([
[ 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, -1, -1, -4],
[-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, -2, 0, -1, -4],
[-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 4, -3, 0, -1, -4],
[-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, -3, 1, -1, -4],
[ 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -1, -3, -1, -4],
[-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, -2, 4, -1, -4],
[-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, -3, 4, -1, -4],
[ 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -4, -2, -1, -4],
[-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, -3, 0, -1, -4],
[-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, 3, -3, -1, -4],
[-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, 3, -3, -1, -4],
[-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, -3, 1, -1, -4],
[-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, 2, -1, -1, -4],
[-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, 0, -3, -1, -4],
[-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -3, -1, -1, -4],
[ 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, -2, 0, -1, -4],
[ 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, -1, -1, -4],
[-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -2, -2, -1, -4],
[-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -1, -2, -1, -4],
[ 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, 2, -2, -1, -4],
[-2, -1, 4, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, -3, 0, -1, -4],
[-1, -2, -3, -3, -1, -2, -3, -4, -3, 3, 3, -3, 2, 0, -3, -2, -1, -2, -1, 2, -3, 3, -3, -1, -4],
[-1, 0, 0, 1, -3, 4, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -2, -2, -2, 0, -3, 4, -1, -4],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -4],
[-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1]])
# average resi mass, van der waals volume, polarities
aadef={
'A':('Alanine',71.0788,67,9),
'R':('Arginine',156.1876,148,15),
'N':('Asparagine',114.1039,96,16),
'D':('Aspartic.acid',115.0886,91,19),
'C':('Cysteine',103.1448,86,7),
'Q':('Glutamine',128.1308,114,17),
'E':('Glutamic.acid',129.1155,109,18),
'G':('Glycine',57.0520,48,11),
'H':('Histidine',137.1412,118,10),
'I':('Isoleucine',113.1595,124,1),
'L':('Leucine',113.1595,124,3),
'K':('Lysine',128.1742,135,20),
'M':('Methionine',131.1986,124,5),
'F':('Phenylalanine',147.1766,135,2),
'P':('Proline',97.1167,90,13),
'S':('Serine',87.0782,73,14),
'T':('Threonine',101.1051,93,12),
'W':('Tryptophan',186.2133,163,6),
'Y':('Tyrosine',163.1760,141,8),
'V':('Valine',99.1326,105,4),
'U':('Cysteine',103.1448,86,7)
}
'''
Column Headings:
1) One-letter code (key)
2) Three-letter code
3) Name
4) Chou-Fasman code for helix propensity
5) Chou-Fasman code for sheet propensity
6) Chou-Fasman helix propensity values
7) Chou-Fasman sheet propensity values
8) Amino acid molecular weight
9) pKa value for free amino acid carboxylate
10) pKa value for free amino acid amine
11) pKa value for amino acid side chain
12) Number of carbon atoms in amino acid
13) Number of hydrogen atoms in amino acid zwitterion
14) Number of nitrogen atoms in amino acid
15) Number of oxygen atoms in amino acid
16) Number of sulfur atoms in amino acid
17) Area in the standard state (standard state accessibility is defined
as the average surface area that residue has in a ensemble
of Gly-X-Gly tripeptides)
18) Average accessible area in proteins
19) Average are buried upon transfer from the standard state to the folded protein.
20) Mean fractional area loss, equal to the average area buried normalized
by the standard state area
21) Residue mass
22) Monoisotopic Mass
23) Total number of heavy atom
'''
aaprop = {
'A':('ALA' , 'Alanine' , 'H', 'I' ,1.45 ,0.97 ,89.09 ,2.3 ,9.9 ,0 ,3 ,7 ,1,2,0,118.1,31.5 ,86.6 ,.74,71.08 ,71.03711 , 6),
'C':('CYS' , 'Cysteine' , 'i', 'h' ,0.77 ,1.30 ,121.16 ,1.8 ,10.8 ,8.65 ,3 ,7 ,1,2,1,146.1,13.9 ,132.3,.91,103.14,103.00919, 7),
'D':('ASP' , 'Aspartic Acid' , 'i', 'i' ,0.98 ,0.80 ,133.10 ,2.0 ,10.0 ,4.04 ,4 ,7 ,1,4,0,158.7,60.9 ,97.8 ,.62,115.09,115.02694, 9),
'E':('GLU' , 'Glutamic Acid' , 'H', 'B' ,1.53 ,0.26 ,147.13 ,2.2 ,9.7 ,4.39 ,5 ,9 ,1,4,0,186.2,72.3 ,113.9,.62,129.12,129.04259,10),
'F':('PHE' , 'Phenylalanine' , 'h', 'h' ,1.12 ,1.28 ,165.19 ,1.8 ,9.1 ,0 ,9 ,11,1,2,0,222.8,28.7 ,194.1,.88,147.18,147.06841,12),
'G':('GLY' , 'Glycine' , 'B', 'i' ,0.53 ,0.81 ,75.07 ,2.4 ,9.8 ,0 ,2 ,5 ,1,2,0,88.1 ,25.2 ,62.9 ,.72,57.05 ,57.02146 , 5),
'H':('HIS' , 'Histidine' , 'h', 'b' ,1.24 ,0.71 ,155.16 ,1.8 ,9.2 ,6.75 ,6 ,9 ,3,2,0,202.5,46.7 ,155.8,.78,137.14,137.05891,11),
'I':('ILE' , 'Isoleucine' , 'I', 'H' ,1.00 ,1.60 ,131.17 ,2.4 ,9.7 ,0 ,6 ,13,1,2,0,181 ,23 ,158 ,.88,113.16,113.08406, 9),
'K':('LYS' , 'Lysine' , 'I', 'b' ,1.07 ,0.74 ,146.19 ,2.2 ,9.2 ,11.0 ,6 ,14,2,2,0,225.8,110.3,115.5,.52,128.17,128.09496,10),
'L':('LEU' , 'Leucine' , 'H', 'h' ,1.34 ,1.22 ,131.17 ,2.4 ,9.60 ,0 ,6 ,13,1,2,0,193.1,29 ,164.1,.85,113.16,113.08406, 9),
'M':('MET' , 'Methionine' , 'h', 'H' ,1.20 ,1.67 ,149.21 ,2.3 ,9.2 ,0 ,5 ,11,1,2,1,203.4,30.5 ,172.9,.85,131.19,131.04049, 9),
'N':('ASN' , 'Asparagine' , 'b', 'b' ,0.73 ,0.65 ,132.12 ,2.0 ,8.8 ,0 ,4 ,8 ,2,3,0,165.5,62.2 ,103.3,.63,114.10,114.04293, 9),
'P':('PRO' , 'Proline' , 'B', 'b' ,0.59 ,0.62 ,115.13 ,2.0 ,10.6 ,0 ,5 ,9 ,1,2,0,146.8,53.7 ,92.9 ,.64,97.12 ,97.05276 , 8),
'Q':('GLN' , 'Glutamine' , 'h', 'h' ,1.17 ,1.23 ,146.15 ,2.2 ,9.1 ,0 ,5 ,10,2,3,0,193.2,74 ,119.2,.62,128.13,128.05858,10),
'R':('ARG' , 'Arginine' , 'i', 'i' ,0.79 ,0.90 ,174.20 ,1.8 ,9.0 ,12.5 ,6 ,14,4,2,0,256 ,93.8 ,162.2,.64,156.19,156.10111,12),
'S':('SER' , 'Serine' , 'i', 'b' ,0.79 ,0.72 ,105.09 ,2.1 ,9.2 ,0 ,3 ,7 ,1,3,0,129.8,44.2 ,85.6 ,.66,87.08 ,87.03203 , 7),
'T':('THR' , 'Threonine' , 'i', 'h' ,0.82 ,1.20 ,119.12 ,2.6 ,10.4 ,0 ,4 ,9 ,1,3,0,152.5,46 ,106.5,.70,101.11,101.04768, 8),
'V':('VAL' , 'Valine' , 'h', 'H' ,1.14 ,1.65 ,117.15 ,2.3 ,9.6 ,0 ,5 ,11,1,2,0,164.5,23.5 ,141 ,.86,99.13 ,99.06841 , 8),
'W':('TRP' , 'Tryptophan' , 'h', 'h' ,1.14 ,1.19 ,204.22 ,2.4 ,9.4 ,0 ,11,12,2,2,0,266.3,41.7 ,224.6,.85,186.21,186.07931,15),
'Y':('TYR' , 'Tyrosine' , 'b', 'h' ,0.61 ,1.29 ,181.19 ,2.20,9.1 ,9.75 ,9 ,11,1,3,0,236.8,59.1 ,177.7,.76,163.18,163.06333,13),
'.':('gap' , 'gap' , '.' , '.' ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0,0,0,0 ,0 ,0 ,0 ,0 ,0 ,0 ),
'-':('gap' , 'gap' , '.' , '.' ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0,0,0,0 ,0 ,0 ,0 ,0 ,0 ,0 ),
'O':('gap' , 'gap' , '.' , '.' ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0,0,0,0 ,0 ,0 ,0 ,0 ,0 ,0 ),
'Z':('gap' , 'gap' , '.' , '.' ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0,0,0,0 ,0 ,0 ,0 ,0 ,0 ,0 ),
'X':('gap' , 'gap' , '.' , '.' ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0,0,0,0 ,0 ,0 ,0 ,0 ,0 ,0 ),
'U':('gap' , 'gap' , '.' , '.' ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0,0,0,0 ,0 ,0 ,0 ,0 ,0 ,0 ),
'J':('gap' , 'gap' , '.' , '.' ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0,0,0,0 ,0 ,0 ,0 ,0 ,0 ,0 ),
'B':('gap' , 'gap' , '.' , '.' ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0,0,0,0 ,0 ,0 ,0 ,0 ,0 ,0 )
}
# MSA score
aapscore = {
'ssp': {'.':0, 'H':1, 'I':2, 'B':3, 'h':4, 'i':5, 'b':6},
'hat': {0:0, 5:1, 6:2, 7:3, 8:4, 9:5, 10:6, 11:7, 12:8, 13:9, 15:10}
}
# print repr(dict((k,cp.aapscore['ssp'][cp.aaprop[k][2]]) for k in cp.aaprop))
aascore = {
'aa' : {
'J':0, 'O':0, 'Z':0, 'U':0,'X':0,'-': 0,'.': 0,'A': 1,'C': 2,'D': 3,'E': 4,'F': 5,'G': 6,'H': 7,'I': 8,'K': 9,
'L': 10,'M': 11,'N': 12,'P': 13,'Q': 14,'R': 15,'S': 16,'T': 17,'V': 18,'W': 19,'Y': 20, 'B': 0
},
'ssp' : {
'-': 0, '.': 0, 'A': 1, 'C': 5, 'E': 1, 'D': 5, 'G': 3, 'F': 4, 'I': 2, 'H': 4, 'K': 2, 'J': 0, 'M': 4, 'B': 0,
'L': 1, 'O': 0, 'N': 6, 'Q': 4, 'P': 3, 'S': 5, 'R': 5, 'U': 0, 'T': 5, 'W': 4, 'V': 4, 'Y': 6, 'X': 0, 'Z': 0
},
'hat' : {
'-': 0, '.': 0, 'A': 2, 'C': 3, 'B': 0, 'E': 6, 'D': 5, 'G': 1, 'F': 8, 'I': 5, 'H': 7, 'K': 6, 'J': 0, 'M': 5,
'L': 5, 'O': 0, 'N': 5, 'Q': 6, 'P': 4, 'S': 3, 'R': 8, 'U': 0, 'T': 4, 'W': 10, 'V': 4, 'Y': 9, 'X': 0, 'Z': 0
},
'gthat':{
'B':0, 'J':0, 'O':0, 'Z':0, 'U':0, 'X':0, '-':0, '.':0,
'A':3, 'G':3,
'C':2, 'S':2, 'T':2, 'N':2, 'Q':2,
'D':1, 'R':1, 'E':1, 'K':1, 'H':1,
'F':5, 'Y':5, 'W':5,
'P':4, 'V':4, 'I':4, 'L':4, 'M':4
}
}
# print repr(dict((cp.aascore['aa'][k], k) for k in cp.aascore['aa'] if cp.aascore['aa'][k]!=0))
scoreaa = {
'aa' : {
0: '.', 1: 'A', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I', 9: 'K', 10: 'L', 11: 'M', 12: 'N',
13: 'P', 14: 'Q', 15: 'R', 16: 'S', 17: 'T', 18: 'V', 19: 'W', 20: 'Y'
}
}
scorerdict = {
0:'.',1:'A',2:'C',3:'D',4:'E',5:'F',6:'G',7:'H',8:'I',9:'K',
10:'L',11:'M',12:'N',13:'P',14:'Q',15:'R',16:'S',17:'T',18:'V',19:'W',20:'Y'
}
msaaa = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M',
'F', 'P', 'S', 'T', 'W', 'Y', 'V', 'B', 'Z', 'X', '.', '-']
# EBLOSUM matrices gaps preset
#gapb80 = [(9,2), (8,2), (7,2), (6,2), (11,1), (10,1), (9,1)]
gapb62=[(9,2), (8,2), (7,2), (6,2), (11,1), (10,1), (9,1), (11, 2), (10, 2), (13, 1), (12, 1)]
gapb45=[(13, 3),(12, 3),(11, 3),(10, 3),(16, 2),(15, 2),(14, 2),(13, 2),(12, 2),(19, 1),(18, 1),(17, 1),(16, 1)]
gapb50=[(13, 3),(12, 3),(11, 3),(10, 3),(9, 3),(16, 2),(15, 2),(14, 2),(13, 2),(12, 2),(19, 1),(18, 1),(17, 1),(16, 1),(15, 1)]
gapb80=[(25, 2),(13, 2),(9, 2),(8, 2),(7, 2),(6, 2),(11, 1),(10, 1),(9, 1)]
gapb90=[(9, 2),(8, 2),(7, 2),(6, 2),(11, 1),(10, 1),(9, 1)]
gappam30=[(7, 2),(6, 2),(5, 2),(10, 1),(9, 1),(8, 1),(15, 3),(14, 2),(14, 1),(13, 3)]
gappam70=[(8, 2),(7, 2),(6, 2),(11, 1),(10, 1),(9, 1),(11, 2),(12, 3)]
gappam250=[(15, 3),(14, 3),(13, 3),(12, 3),(11, 3),(17, 2),(16, 2),(15, 2),(14, 2),(13, 2),(21, 1),(20, 1),(19, 1),(18, 1),(17, 1)]
gapdict = {
'BLOSUM80':gapb80,
'BLOSUM62':gapb62,
'BLOSUM50':gapb50,
'BLOSUM45':gapb45,
'PAM250':gappam250,
'BLOSUM90':gapb90,
'PAM30':gappam30,
'PAM70':gappam70
}
time0 = time.time()
# return ith column of matrix (list(list))
# [...]
# [ [...] ]
# [...]
# calculate the Euclidean distance between two vectors
# mp constant definition
mp_info = 0
mp_log = 1
mp_checkin = 2
# used in utils_mprun.py
# used in utils_mprun.py
# (joint) entropy calculation
# input: a list of np.array()
# generate fasta entries from a fasta file
# return the freuency table for a given string
# X: data[:,colset].T
# 0 1 2 3 4
#[[ 1. 1. 3. 2. 4.]
# [ 1. 1. 3. 1. 1.]]
# output:
# (1.0, 1.0), [0, 1]
# (2.0, 1.0), [3]
# (3.0, 3.0), [2]
# (4.0, 1.0), [4]
# x: str list in np.array format
#$ python proc_hammingweight.py t_hamming_weight.score 0.9
#array([1, 2, 2, 3, 4, 1, 1], dtype=int32)
#defaultdict(<type 'int'>, {1: 3, 2: 2, 3: 1, 4: 1})
# 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 : 0.333
# 1, 1, 1, 1, 1, 1, 1, 1, 3, 3 : 0.500
# 1, 1, 1, 1, 1, 1, 1, 1, 3, 3 : 0.500
# 1, 1, 1, 1, 1, 3, 3, 3, 4, 4 : 1.000
# 1, 1, 1, 1, 1, 3, 3, 3,15,15 : 1.000
# 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 : 0.333
# 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 : 0.333
# jaccard distance for two sets
# calculate value of n choose r
# return a index set of n choose m
# given two strings
# normal sequence & aligned sequence
# return map 1. key=1 pos[s1] = s2; 2. key=2 pos[s2] = s1
# s1: aligned string index, s2: pdb sequence index
# given two strings
# normal sequence & aligned sequence
# return map 1. key=1 pos[s1] = s2; 2. key=2 pos[s2] = s1
# s1: aligned string index, s2: pdb sequence index
# map between two gap extened sequences with the same original sequence
# normal sequence or aligned sequence
# return map pos[s1_index] = s2_index;
# index starts from 0
# extend version of posmap_homoseq()
# map between two gap extened sequences with one substring to the another without gaps
# after pfamscan, a MSA sequence will be trimmed and insert gaps in different positions
# normal sequence or aligned sequence
# return map pos[s1_index] = s2_index;
# index starts from 0
# pairwise substitution
# return unified key
#def quad_permu(pair1, pair2):
# quad: list : ['A', 'C', 'D', 'G']
def quad_permu(quad):
'''
rank = 0 rank = 1 rank = 2 rank = 3
A 0 C 1 C 0 A 1 D 0 G 1 G 0 D 1
D 2 G 3 G 2 D 3 A 2 C 3 C 2 A 3
'''
rank = quad.index(min(quad))
if rank == 1:
quad[0],quad[1]=quad[1],quad[0]
quad[2],quad[3]=quad[3],quad[2]
elif rank == 2:
quad[0],quad[2] = quad[2],quad[0]
quad[1],quad[3] = quad[3],quad[1]
elif rank == 3:
quad[0],quad[3] = quad[3],quad[0]
quad[2],quad[1] = quad[1],quad[2]
return ''.join(quad)
# return which type of pair substitution the quadstr is
# quadstr = 'ACDG'
# transfer a set of float number into ranking of [0,1]
# input a dictionary
# convert a set of float number into value of d: d = (v - mean') / std'
# a = {'a':1,'b':2,'c':3,'d':4,'e':10,'f':15}
# {'a': -0.9486, 'b': -0.6324, 'c': -0.3162, 'd': 0.0, 'e': 1.8973, 'f': 3.4785}
# given two lists of coordinates. {1,..,i,..., n} in [x,y,z] format
# return RMSD
# RMSD = sqrt( 1/n * \sum_i (|| v_i - w_i ||^2) )
# generate sm string
# called in posmap_subseq()
# return a list of tuples: [(idx_long, idx_short), (, ), ...]
# main routine. for testing
if __name__ == '__main__':
main()
| [
11748,
25064,
201,
198,
11748,
28686,
201,
198,
11748,
640,
201,
198,
11748,
10688,
201,
198,
6738,
340,
861,
10141,
1330,
1448,
1525,
201,
198,
11748,
10088,
355,
1034,
201,
198,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,... | 1.610293 | 14,262 |
from server.modules import accounts
import pickle
"""
Manages friend connections between users.
"""
class FriendManager:
"""
Gets a list of friends for a given user.
"""
"""
Adds a user as a friend for a given user.
"""
"""
Remove a user as a friend for a given user.
"""
"""
Saves the list of friend mappings.
"""
manager = FriendManager()
| [
6738,
4382,
13,
18170,
1330,
5504,
198,
11748,
2298,
293,
628,
198,
37811,
198,
5124,
1095,
1545,
8787,
1022,
2985,
13,
198,
37811,
198,
4871,
9182,
13511,
25,
628,
220,
220,
220,
37227,
198,
220,
220,
220,
29620,
257,
1351,
286,
2460... | 2.984962 | 133 |
import bokeh.plotting
import bokeh.models.layouts
import collections
import numpy as np
import warnings
from ilv.utils import filter_dict
from ilv.utils import list_of_dict_to_dict_of_list
from ilv.utils import moving_average_1d
np.random.seed(1)
colors_10_indices = np.random.permutation(10)
colors_255_indices = np.random.permutation(255)
def get_valid_keys(args_list, black_list=['out', 'gpu']):
"""Get list of keys that are going to be used.
We define "valid" in the following sense:
1. There need to be variations in the values of keys
among the experiments.
2. The key is not included in the black_list
Returns:
List of strings
"""
keys = args_list[0].keys()
valid_keys = []
for key in keys:
if key not in black_list:
cur = None
for args in args_list:
if cur is None:
cur = args[key]
if key not in args:
warnings.warn('{} not in args={}'.format(key, args))
continue
if cur != args[key]:
valid_keys.append(key)
break
return valid_keys
def get_identifiers(args_list, valid_keys):
"""Find string identifiers for each experiment. Used by pop-ups.
Args:
args_list (list of dictionary): dictionary consists of keys and values
that uniquely identify the plot.
Returns:
identifiers (list of strings)
"""
# ignore keys which have no variation among results
identifiers = []
for args in args_list:
identifier = ''
for key in valid_keys:
if key in args:
identifier += '{}={},'.format(key, args[key])
identifiers.append(identifier)
return identifiers
def filter_dataframes(dfs, xs, ys, table_ys, args_list, valid_keys):
"""Process necessary information from dataframes in the Bokeh format.
In the following explanation, N is assumed to be the number of experiments.
For xs_dict and ys_dict:
These are dictionary of list of list.
To make it simple, we focus on particular `x` in `xs`. Everything is
the same for `ys_dict`.
`x` is usually a timescale values such as iteration or epoch.
Here are some characteristics:
1. xs_dict[x] is list of list
2. len(xs_dict[x]) == N
3. xs_dict[x][i] is list. For example, if log is recorded every
epoch and `x` is epoch, xs_dict[x][i] == [1, 2, 3, 4, ...].
For tables:
This is a dictionary of list of scalars or strings.
The keys correspond to the column keys of the data table.
The keys are the combination of all `valid_keys` and `table_ys`.
tables[key][i] is `key` value recorded in the i-th experiment.
For example, if key=='main/loss', this is the minimum loss value during
training time recorded for the i-th experiment.
Args:
dfs (list of pd.DataFrame)
xs (list of strings)
ys (list of strings)
table_ys (dictionary)
args_list (list of dictionaries)
valid_keys (list of strings)
"""
# Descs: descriptions
# ys_dict == {string (y): List(Serial Data)}
xs_dict = {x: [] for x in xs}
ys_dict = {y: [] for y in ys}
tables = collections.OrderedDict(
[(key, []) for key in ['index'] + valid_keys + list(table_ys.keys())])
for i, args in enumerate(args_list):
# get df from a result
tmp = dfs
for key, val in args.items():
if val is None:
tmp = tmp[tmp[key].isnull()]
else:
tmp = tmp[tmp[key] == val]
for x in xs:
xs_dict[x].append(tmp[x].values.tolist())
for y in ys:
ys_dict[y].append(tmp[y].values.tolist())
for table_y, value_type in table_ys.items():
if value_type == 'min':
tables[table_y].append(tmp[table_y].min())
elif value_type == 'max':
tables[table_y].append(tmp[table_y].max())
else:
raise ValueError
for key in valid_keys:
if key in args:
tables[key].append(args[key])
else:
tables[key].append(None)
tables['index'] = list(range(len(args_list)))
return xs_dict, ys_dict, tables
# this code is based on bokeh/examples/app/line_on_off.py
def vis_log(dfs, xs, ys=None, table_ys=None, args_list=None,
ignore_keys=[], table_width=600):
"""Merge all results on values ys
Args:
dfs (list of pd.DataFrame)
xs (list of strings)
ys (list of strings)
table_ys (dictionary of strings): key is name of y to display on
dictionary. The value is how to turn the vector value into
scalar ({'min', 'max'}).
args_list (list of dictionary): dictionary consists of keys and values
that uniquely identify the plot.
ignore_kes (list of strings): Keys to stop showing on table.
table_width (int): Width of table. The default value is 600.
"""
# This function can be divided into five parts.
# 1. Process necessary information from given dataframes.
# 2. Initialize the components (Static part of the visualization)
# This includes setting up the figure size,
# creating data tables and buttons.
# 3. Confiure dynamic part.
# This function contains an element of user-interaction.
# User can click buttons and slides to configure what and how to
# visualize.
# 4. Add tools
# 5. Organize how different elements can be put together in a screen.
if ys is None:
ys = table_ys.keys()
ignore_keys += ['index']
# 1. prepare and preprocess dataframes
dict_args = list_of_dict_to_dict_of_list(args_list)
valid_keys = get_valid_keys(args_list)
dict_args = filter_dict(dict_args, valid_keys)
identifiers = get_identifiers(args_list, valid_keys)
xs_dict, ys_dict, tables = filter_dataframes(
dfs, xs, ys, table_ys, args_list, valid_keys)
# 2. Construct elements
p = bokeh.plotting.figure(plot_width=1800 - table_width, plot_height=825)
# build empty multi line graph
multi_l_source = bokeh.plotting.ColumnDataSource(
{'xs': [], 'ys': [], 'descs': [], 'legend': []})
multi_l = p.multi_line(
xs='xs', ys='ys', source=multi_l_source, legend='legend')
# build datatable
columns = [bokeh.models.widgets.TableColumn(field=key, title=key) for
key in tables.keys() if key not in ignore_keys]
data_table_source = bokeh.models.ColumnDataSource(tables)
data_table = bokeh.models.widgets.DataTable(
source=data_table_source,
columns=columns,
width=table_width, height=825)
# Sliders, buttons, menus, legends
window_slider = bokeh.models.Slider(
start=1, end=101, value=1, step=10,
title='window size')
xs_button = bokeh.models.widgets.RadioButtonGroup(
labels=xs, active=0, width=600)
ys_button = bokeh.models.widgets.RadioButtonGroup(
labels=ys, active=0, width=600)
menu = ['off', 'top_right', 'top_left', 'bottom_right', 'bottom_left']
legend_button = bokeh.models.widgets.RadioButtonGroup(
labels=menu, active=0, width=600)
# 3. Start configuring user-interaction
data_table_source.on_change('selected', update)
window_slider.on_change('value', update)
ys_button.on_change('active', update)
xs_button.on_change('active', update)
legend_button.on_change('active', update)
# 4. add tools
p.add_tools(bokeh.models.BoxZoomTool())
p.add_tools(bokeh.models.ResizeTool())
p.add_tools(bokeh.models.SaveTool())
p.add_tools(bokeh.models.WheelZoomTool())
p.add_tools(bokeh.models.RedoTool())
p.add_tools(bokeh.models.ResetTool())
p.add_tools(bokeh.models.UndoTool())
p.add_tools(bokeh.models.ZoomOutTool())
p.add_tools(bokeh.models.ZoomInTool())
p.add_tools(
bokeh.models.HoverTool(
tooltips=[("y", "$y"), ("label", "@legend")])
)
# 5. build layout
sliders = bokeh.layouts.widgetbox(window_slider)
xs_ys_widgets = bokeh.layouts.widgetbox(
xs_button, ys_button)
legend_widget = bokeh.layouts.widgetbox(legend_button)
layout = bokeh.layouts.gridplot(
[[data_table, p],
[sliders, xs_ys_widgets, legend_widget]], sizing_mode='fixed')
bokeh.io.curdoc().add_root(layout)
| [
11748,
1489,
365,
71,
13,
29487,
889,
198,
11748,
1489,
365,
71,
13,
27530,
13,
10724,
5269,
198,
11748,
17268,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
14601,
198,
198,
6738,
4229,
85,
13,
26791,
1330,
8106,
62,
11600,
198,
67... | 2.329001 | 3,693 |
_C=True
_B=False
_A=None
import inspect,io,itertools,os,struct,sys
from ._compat import DEFAULT_COLUMNS,get_winterm_size,isatty,strip_ansi,WIN
from .exceptions import Abort
from .exceptions import UsageError
from .globals import resolve_color_default
from .types import Choice
from .types import convert_type
from .types import Path
from .utils import echo
from .utils import LazyFile
visible_prompt_func=input
_ansi_colors={'black':30,'red':31,'green':32,'yellow':33,'blue':34,'magenta':35,'cyan':36,'white':37,'reset':39,'bright_black':90,'bright_red':91,'bright_green':92,'bright_yellow':93,'bright_blue':94,'bright_magenta':95,'bright_cyan':96,'bright_white':97}
_ansi_reset_all='\x1b[0m'
_getchar=_A | [
62,
34,
28,
17821,
198,
62,
33,
28,
25101,
198,
62,
32,
28,
14202,
198,
11748,
10104,
11,
952,
11,
270,
861,
10141,
11,
418,
11,
7249,
11,
17597,
198,
6738,
47540,
5589,
265,
1330,
5550,
38865,
62,
25154,
5883,
8035,
11,
1136,
62,... | 2.804781 | 251 |
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
# MIT License
#
# Copyright (c) 2021 Nathan Juraj Michlo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
from typing import Tuple
from disent.data.groundtruth.base import GroundTruthData
import numpy as np
# ========================================================================= #
# xy grid data #
# ========================================================================= #
class XYObjectData(GroundTruthData):
"""
Dataset that generates all possible permutations of a square placed on a square grid,
with varying scale and colour
- Does not seem to learn with a VAE when square size is equal to 1
(This property may be explained in the paper "Understanding disentanglement in Beta-VAEs")
"""
COLOR_PALETTES_1 = {
'white': [
[255],
],
'greys_halves': [
[128],
[255],
],
'greys_quarters': [
[64],
[128],
[192],
[255],
],
'colors': [
[64],
[128],
[192],
[255],
],
}
COLOR_PALETTES_3 = {
'white': [
[255, 255, 255],
],
'greys_halves': [
[128, 128, 128],
[255, 255, 255],
],
'greys_quarters': [
[64, 64, 64],
[128, 128, 128],
[192, 192, 192],
[255, 255, 255],
],
'rgb': [
[255, 000, 000],
[000, 255, 000],
[000, 000, 255],
],
'colors': [
[255, 000, 000], [000, 255, 000], [000, 000, 255],
[255, 255, 000], [000, 255, 255], [255, 000, 255],
[255, 255, 255],
],
'colors_halves': [
[128, 000, 000], [000, 128, 000], [000, 000, 128],
[128, 128, 000], [000, 128, 128], [128, 000, 128],
[128, 128, 128],
[255, 000, 000], [000, 255, 000], [000, 000, 255],
[255, 255, 000], [000, 255, 255], [255, 000, 255],
[255, 255, 255],
],
}
factor_names = ('x', 'y', 'scale', 'color')
@property
@property
# ========================================================================= #
# END #
# ========================================================================= #
# if __name__ == '__main__':
# print(len(XYScaleColorData()))
# for i in XYScaleColorData(6, 2, 2, 4, 2):
# print(i[:, :, 0])
# print(i[:, :, 1])
# print(i[:, :, 2])
# print()
| [
2,
220,
5299,
33813,
33813,
33813,
33813,
33813,
33813,
33813,
33813,
33813,
33813,
33813,
33813,
33813,
33813,
33813,
33813,
33813,
33813,
33813,
198,
2,
220,
17168,
13789,
198,
2,
198,
2,
220,
15069,
357,
66,
8,
33448,
18106,
23383,
122... | 2.306135 | 1,679 |
from torchvision import transforms
from torchvision.datasets import ImageFolder
from torch.utils.data.dataloader import DataLoader
from config import cfg
import os
import shutil
def get_dataloader(path, shuffle=True):
"""
create a Dataloader of the images in the folder
:param path: path containing the images
:param shuffle: True if shuffle
:return: the created dataloader
"""
# the ImageFolder requires at least one subfolder
for root, directory, files in os.walk(path):
if root == path:
# we need to create a subfolder
os.makedirs(os.path.join(root, '1'), exist_ok=True)
for file in files:
src = os.path.join(root, file)
dst = os.path.join(root, '1', file)
shutil.move(src, dst)
transform = transforms.Compose(
[
transforms.Resize((cfg.input_size, 2 * cfg.input_size)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
)
dataset = ImageFolder(path, transform=transform)
loader = DataLoader(dataset, batch_size=cfg.batch_size, shuffle=shuffle)
return loader | [
6738,
28034,
10178,
1330,
31408,
198,
6738,
28034,
10178,
13,
19608,
292,
1039,
1330,
7412,
41092,
198,
6738,
28034,
13,
26791,
13,
7890,
13,
67,
10254,
1170,
263,
1330,
6060,
17401,
198,
6738,
4566,
1330,
30218,
70,
198,
11748,
28686,
... | 2.390342 | 497 |
from .SLCT import * | [
6738,
764,
8634,
4177,
1330,
1635
] | 3.166667 | 6 |
##
# Part of `SmartNodeMonitorBot`
#
# Copyright 2018 dustinface
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##
import logging
import telegram
import json
import time
import threading
import uuid
from telegram.error import (TelegramError, Unauthorized, BadRequest,
TimedOut, ChatMigrated, NetworkError, RetryAfter)
from telegram.ext import CommandHandler,MessageHandler,Filters
from telegram.ext import Updater
from src import util
from src import messages
from src import faq as questions
from src.commandhandler import node
from src.commandhandler import user
from src.commandhandler import common
from src.smartexplorer import WebExplorer
from smartcash.rewardlist import SNReward
logger = logging.getLogger("bot")
####
# Message which gets used in the MessageQueue
####
####
# Message queue for the telegram api rate limit management: MessagingMachine
####
######
# Makes the queue printable
######
######
# Refresh the current rate limit state
######
######
# Check if the queue has messages and has not hit the rate limit yet
######
######
# Add a message to the queue
######
######
# Get the next message, remove those with 3 send attempts.
######
######
# Remove a message and decrease the ratelimit counter
######
######
# Lock the queue for a given number of seconds.
######
######
# Called when an error occured. Give the highest rated message a shot.
######
####
# Telegram API Rate limit management. Handles all the user queues and tries
# to send messages periodically.
####
######
# Start the messaging timer
######
######
# Stop the messaging timer
######
######
# Refresh the current rate limit state
######
######
# Check if the queue has messages and has not hit the rate limit yet
######
######
# Add a message for a specific userId. If there is a queue it gets just
# added to it otherwise one will be created.
######
######
# Timer Callback. Main part of this class. Goes through all the queues, checks
# if any rate limit got hit and sends messages if its allowed to.
######
######
# Starts the bot and block until the programm will be stopped.
######
######
# Add a message to the queue
######
############################################################
# Node handler calls #
############################################################
############################################################
# User handler calls #
############################################################
############################################################
# Common handler calls #
############################################################
############################################################
# Callbacks #
############################################################
######
# Callback which get called when there is a new releases in the smartcash repo.
#
# Called by: Nothing yet, SmartGitHubUpdates later.
#
######
######
# Callback for evaluating if someone in the database had an upcomming event
# and send messages to all chats with activated notifications
#
# Called by: SmartNodeList
#
######
######
# Callback for evaluating if someone in the database has won the reward
# and send messages to all chats with activated notifications
#
# Called by: SNRewardList from python-smartcash
#
######
######
# Callback for evaluating if someone in the database has won the reward
# and send messages to all chats with activated notifications
#
# Called by: SNRewardList from python-smartcash
#
######
######
# Callback for SNRewardList errors
#
# Called by: SNRewardList from python-smartcash
#
######
######
# Callback for evaluating if someone has enabled network notifications
# and send messages to all relevant chats
######
######
# Callback which gets called from the SmartNodeList when a balance request triggered by any user
# is done. It sends the result to the related user.
#
# Called by: SmartExplorer
#
######
######
# Push the message to the admin
#
# Called by: SmartNodeList
#
######
| [
2235,
198,
2,
2142,
286,
4600,
25610,
19667,
35479,
20630,
63,
198,
2,
198,
2,
15069,
2864,
8977,
259,
2550,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
2,
286,
428... | 3.192833 | 1,758 |
"""
module docstring
"""
import os
import sys
import torch
import glob
from random import shuffle
import numpy as np
from PIL import Image
from skimage import io
from torch import nn
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from torchvision import models, transforms
import matplotlib.pyplot as plt
def load_model(state_dict_path):
"""load a pretrained model"""
model_state_dict = torch.load(state_dict_path)
model = models.resnet18(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, 8)
model.load_state_dict(model_state_dict)
model.eval()
return model.cuda()
scale = transforms.Scale(256)
crop = transforms.CenterCrop(224)
to_tensor = transforms.ToTensor()
norm = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
def load_image(img_path):
"""load a single image and label"""
image = Image.open(img_path)
transformed_image = scale(image)
transformed_image = crop(transformed_image)
transformed_image = to_tensor(transformed_image)
transformed_image = norm(transformed_image)
# different array dimensions to training as we don't have batches anymore
# so this dimension can be removed from the tensor
transformed_image.unsqueeze_(0)
label = img_path.split(os.sep)[-2]
return [transformed_image, label]
def find_classes(dir_name):
"""return a dictionary relating class number to the class name"""
classes = os.listdir(dir_name)
classes.sort()
class_to_idx = {i: classes[i] for i in range(len(classes))}
return class_to_idx
def predict(model, image, label_dict):
"""
Given a model, single image and the label dictionary:
Return the predicted class of the image.
"""
image = Variable(image).cuda()
ans = model(image)
ans = ans.data.cpu().numpy()[0]
ans = softmax(ans)
idx_max = np.argmax(ans)
predicted = label_dict[idx_max]
return predicted
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def get_img_number(img_path):
"""record image number on predictions,
useful for the consenus classification"""
final_path = img_path.split(os.sep)[-1]
return int(final_path.split("_")[1])
if __name__ == "__main__":
cell_line = sys.argv[1]
if cell_line not in ["MDA-231", "MDA-157", "MCF7", "HCC1954", "HCC1569", "SKBR3", "T47D", "KPL4"]:
raise ValueError("{} is not a valid cell-line".format(cell_line))
# use train dir, not used in model training and more images than test dir
# TODO: combine train and test dir to increase the datasize
IMG_DIR = "/exports/eddie/scratch/s1027820/chopped/nncell_data_300_{}/test".format(cell_line)
MODEL_PATH = "/exports/igmm/eddie/Drug-Discovery/scott/pytorch_stuff/models/2018-03-29_nn_loo_models/{}_excluded_trained_model_adam.pynn".format(cell_line)
img_list = glob.glob(IMG_DIR + "/*/*")
shuffle(img_list)
label_dict = find_classes(IMG_DIR)
model = load_model(MODEL_PATH)
output_path = "/exports/igmm/eddie/Drug-Discovery/scott/pytorch_stuff/transfer_learning/predictions/2018-03-29_repeat/{}_predictions.csv"
with open(output_path.format(cell_line), "w") as f:
f.write("actual,predicted,img_num\n")
for idx, image_path in enumerate(img_list):
image, label = load_image(image_path)
img_num = get_img_number(image_path)
predicted = predict(model, image, label_dict)
f.write("{},{},{}\n".format(label, predicted, img_num))
| [
37811,
198,
21412,
2205,
8841,
198,
37811,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
28034,
198,
11748,
15095,
198,
6738,
4738,
1330,
36273,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
1341,
90... | 2.578612 | 1,412 |
"""
Define an enum of all services to health check
"""
from .healthchecks.healthcheck import HealthCheck
from .healthchecks import ElasticsearchHealthCheck, DpFastTextHealthCheck
from enum import Enum
| [
37811,
198,
7469,
500,
281,
33829,
286,
477,
2594,
284,
1535,
2198,
198,
37811,
198,
6738,
764,
13948,
42116,
13,
13948,
9122,
1330,
3893,
9787,
198,
198,
6738,
764,
13948,
42116,
1330,
48567,
12947,
18081,
9787,
11,
360,
79,
22968,
820... | 4 | 51 |
# ! /usr/bin/env python
# -*- coding: utf-8 -*-
from enum import Enum
from tornado.web import HTTPError
status_0 = dict(status_code=405, reason='Method not allowed.')
status_1 = dict(status_code=404, reason='API not found.')
# 示例相关
# 测试相关
| [
2,
5145,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
33718,
13,
12384,
1330,
14626,
12331,
198,
198,
13376,
62,
15,
796,
... | 2.330189 | 106 |
# -*- coding: utf-8 -*-
# Part of BrowseInfo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields, api, _
from datetime import date,datetime
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:s
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2142,
286,
44775,
12360,
13,
4091,
38559,
24290,
2393,
329,
1336,
6634,
290,
15665,
3307,
13,
198,
198,
6738,
16298,
2238,
1330,
4981,
11,
7032,
11,
40391,
11,
4808,
... | 2.964286 | 84 |
import cv2 as cv
WIDTH = 400
HEIGHT = 300
ID_WIDTH = 3
ID_HEIGHT = 4
cap = cv.VideoCapture(0)
cap.set(ID_WIDTH, WIDTH)
cap.set(ID_HEIGHT, HEIGHT)
face_engine = cv.CascadeClassifier('data/haarcascades/haarcascade_frontalface_default.xml')
eye_engine = cv.CascadeClassifier('data/haarcascades/haarcascade_eye.xml')
smile_engine = cv.CascadeClassifier('data/haarcascades/haarcascade_smile.xml')
frames()
| [
11748,
269,
85,
17,
355,
269,
85,
198,
198,
54,
2389,
4221,
796,
7337,
198,
13909,
9947,
796,
5867,
198,
2389,
62,
54,
2389,
4221,
796,
513,
198,
2389,
62,
13909,
9947,
796,
604,
198,
11128,
796,
269,
85,
13,
10798,
49630,
7,
15,
... | 2.394118 | 170 |
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import find_lane_pixels_convolution
def find_good_indexis_in_window(out_img, window_idx, nonzerox, nonzeroy, window_height, current_pos, margin=100,
visu=False):
'''
Calculate good (==nonzero) indices inside sliding window defined by input parameters
:param out_img: binary image the same shape as input wrapped image on which result will be visualized if visu parameter set to True
:param window_idx: current idx of window for y position calculation
:param nonzerox: x indices of the elements that are non-zero
:param nonzeroy: y indices of the elements that are non-zero
:param window_height: height of the sliding window
:param current_pos: current x position
:param margin: width of the windows +/- margin
:param visu: draw sliding window onto out_img
:return: nonzero indices inside sliding window
'''
# Identify window boundaries in x and y
height = out_img.shape[0]
win_y_low = height - (window_idx + 1) * window_height
win_y_high = height - window_idx * window_height
win_x_low = current_pos - margin
win_x_high = current_pos + margin
if visu == True:
# Draw the windows on the visualization image
cv2.rectangle(out_img, (win_x_low, win_y_low), (win_x_high, win_y_high), (0, 255, 0), 2)
# Identify the nonzero pixels in x and y within the window #
good_inds = \
((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_x_low) & (
nonzerox < win_x_high)).nonzero()[0]
return good_inds, out_img
def find_lane_pixels(binary_warped, visu=False):
'''
find lane lines pixels using histogram results
as starting point and then looking for nonzero points
in each sliding window
'''
# Take a histogram of the bottom half of the image
binary_filtered, histogram = get_hist(binary_warped)
midpoint = np.int(histogram.shape[0] // 2)
# Find the peak of the left and right halves of the histogram
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# HYPERPARAMETERS
# number of sliding windows
nwindows = 9
# width of the windows +/- margin
margin = 100
# minimum number of pixels found to recenter window
minpix = 50
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_filtered.shape[0] // nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_filtered.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
if visu == True:
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped * 255, binary_warped * 255, binary_warped * 255))
else:
out_img = binary_warped
# Step through the windows one by one
for window_idx in range(nwindows):
# Identify window boundaries in x and y (and right and left)
good_left_inds, out_img = find_good_indexis_in_window(out_img, window_idx, nonzerox, nonzeroy, window_height,
leftx_current, margin, visu)
good_right_inds, out_img = find_good_indexis_in_window(out_img, window_idx, nonzerox, nonzeroy, window_height,
rightx_current, margin, visu)
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty, out_img
def calculate_polynomial_coefficients(binary_warped, lefty, leftx, righty, rightx, ym_per_pix=1, xm_per_pix=1):
'''
return polynomial coefficient in pixels and meters (real world)
'''
# Fit a second order polynomial to each using `np.polyfit`
left_fit_cr = np.polyfit(lefty * ym_per_pix, leftx * xm_per_pix, 2)
right_fit_cr = np.polyfit(righty * ym_per_pix, rightx * xm_per_pix, 2)
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
try:
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('The function failed to fit a line!')
left_fitx = 1 * ploty ** 2 + 1 * ploty
right_fitx = 1 * ploty ** 2 + 1 * ploty
return left_fitx, right_fitx, left_fit, right_fit, left_fit_cr, right_fit_cr
def fit_polynomial(binary_warped, visu=False):
'''
pipeline for finding lane pixels onto binary wrapped image
'''
# Find our lane pixels first
leftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped, visu)
# uncomment the function below and comment the find_lane_pixels function to see result of convolution approach instead
# leftx, lefty, rightx, righty, out_img = find_lane_pixels_convolution.find_lane_pixels_convolution(binary_warped)
left_fitx, right_fitx, left_fit, right_fit, left_fit_cr, right_fit_cr = calculate_polynomial_coefficients(
binary_warped, lefty, leftx, righty, rightx)
## Visualization ##
# Colors in the left and right lane regions
if visu == True:
# visu wrapped image with sliding windows (result from find_lane_pixels)
plt.imshow(out_img.astype(np.uint8))
plt.show()
out_img = np.dstack((binary_warped * 255, binary_warped * 255, binary_warped * 255))
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
return left_fitx, right_fitx, left_fit, right_fit, out_img
return left_fitx, right_fitx, left_fit, right_fit, binary_warped
def visu_histogram(binary_warped):
'''
plot histogram
'''
binary_filtered, histogram = get_hist(binary_warped)
# Visualize
fig, (ax1, ax2) = plt.subplots(ncols=1, nrows=2, figsize=(20, 10))
ax1.set_title('Binary warped image')
ax1.imshow(binary_filtered)
ax2.set_title('Histogram')
ax2.plot(histogram)
plt.show()
return
def get_hist(binary_warped):
'''
calculate histogram
'''
[height, width] = binary_warped.shape
histogram = np.sum(binary_warped[height // 2:height, :], axis=0)
return binary_warped, histogram
def transform_inverse(warped, distorted_img, left_fitx, right_fitx, mat_inv):
'''
Apply inverse transformation on lane pixels and draw resulting polygon on
original image
'''
img_size = (distorted_img.shape[1], distorted_img.shape[0])
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
ploty = np.linspace(0, warped.shape[0] - 1, warped.shape[0])
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, mat_inv, img_size, flags=cv2.INTER_LINEAR)
# Combine the result with the original image
result = cv2.addWeighted(distorted_img, 1, newwarp, 0.3, 0)
return result
def find_lane_pixels_test():
'''
This is test function. Reads intermediate results stored at the previous step.
'''
arrays_names = glob.glob('../test_images/warped/test*.npy')
images_names = glob.glob('../test_images/test*.jpg')
mat_names = glob.glob('../test_images/warped/M_test*.npy')
mat_inv_names = glob.glob('../test_images/warped/Minv_test*.npy')
for fname, img_name, mat_name, mat_inv_name in zip(arrays_names, images_names, mat_names, mat_inv_names):
img_bin_orig = np.load(fname)
img_undist = cv2.imread(img_name)
mat = np.load(mat_name)
mat_inv = np.load(mat_inv_name)
img_bin = img_bin_orig[:, :, 0] / 255
left_fitx, right_fitx, left_fit, right_fit, binary_lanes = fit_polynomial(img_bin, visu=True)
# 4. Transform back
result = transform_inverse(img_bin, img_undist, left_fitx, right_fitx, mat_inv)
# Plots the left and right polynomials on the lane lines
visualize_polynomials(result, binary_lanes, left_fitx, right_fitx)
def test_histogram():
'''
Calculate and visualize histograms calculated on images from the previous step
:return:
'''
arrays_names = glob.glob('../test_images/warped/test*.npy')
for fname in arrays_names:
img = np.load(fname)
img_bin = img[:, :, 0] / 255
visu_histogram(img_bin)
if __name__ == '__main__':
# uncomment function below to see histograms
# test_histogram()
find_lane_pixels_test()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
11748,
15095,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
1064,
62,
33533,
62,
79,
14810,
62,
42946,
2122,
628,
198,
4299,
1064,
62,
11274,
62,... | 2.452162 | 4,233 |
numero = []
while True:
numeros = int(input('Digite um número '))
if numeros not in numero:
numero.append(numeros)
numero.sort()
else:
print('Valor duplicado! Ignorado')
sn = str(input('Deseja continuar? [S/N]: '))
while not sn in 'SsNn':
sn = str(input('Deseja continuar? [S/N]: '))
if sn in 'Nn':
break
print(numero)
| [
22510,
3529,
796,
17635,
198,
4514,
6407,
25,
198,
220,
220,
220,
5470,
418,
796,
493,
7,
15414,
10786,
19511,
578,
23781,
299,
21356,
647,
78,
705,
4008,
198,
220,
220,
220,
611,
5470,
418,
407,
287,
997,
3529,
25,
198,
220,
220,
... | 2.048128 | 187 |
#Any image input no matter what scale it belongs, should through following steps that used for Image segmentation task
#Including two parts:
# 1. Segmentation training set prepare 8000->800->choose 5 label->label me
# 2. Segmentation model predict
from PIL import Image
import cv2
import os
import random
import json
import labelme
import numpy as np
####################需要提供的数据####################
out_folder = 'H:/tmp/DEEP_WATERROCK_CODE/codetest/' #数据存储路径
label_number=5 #想要标记图片的数量
if_labelme=0 #0不打开,1打开labelme
file_path = 'H:/tmp/DEEP_WATERROCK_CODE/test_image/test_1.png' #要分割的图片地址
json_path='H:/tmp/json/' #labelme之后翻译得到的地址,路径不要出现中文
#####################################################
'''
1. Segmentation training set prepare
path=train_set_create(file_path,out_folder)
label_me(if_labelme)
cmd->labelme_json_to_dataset
mask_path,Classes=json2dataset(json_path,label_number,out_folder)
2. Segmentation model predict
model_predict_set_create(file_path,out_folder)
'''
| [
2,
7149,
2939,
5128,
645,
2300,
644,
5046,
340,
14448,
11,
815,
832,
1708,
4831,
326,
973,
329,
7412,
10618,
341,
4876,
198,
198,
2,
818,
6360,
734,
3354,
25,
220,
198,
2,
352,
13,
1001,
5154,
341,
3047,
900,
8335,
38055,
3784,
74... | 2.346793 | 421 |
import cv2
import numpy as np
import torchvision
# import albumentations
from torch import Tensor
from typing import Tuple
import torchlm
if __name__ == "__main__":
print(torchlm.albumentations_is_available())
test_torchlm_transforms_pipeline()
test_torchlm_transform_mask()
test_torchlm_transform_patches_mixup()
test_torchlm_transform_backgrounds_mixup()
test_torchlm_transform_center_crop()
test_torchlm_transform_horizontal()
test_torchlm_transform_rotate()
test_torchlm_transform_shear()
test_torchlm_transform_blur()
test_torchlm_transform_translate()
test_torchlm_transform_brightness()
test_torchlm_transform_hsv()
test_torchlm_transform_scale()
test_torchlm_transform_align()
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
10178,
198,
2,
1330,
435,
65,
1713,
602,
198,
6738,
28034,
1330,
309,
22854,
198,
6738,
19720,
1330,
309,
29291,
198,
198,
11748,
28034,
75,
76,
628,
628,
628,... | 2.443077 | 325 |
"""Run and interact with a Localstack container."""
import logging
import os
import string
import time
from copy import copy
from packaging import version
from pytest_localstack import (
constants,
container,
exceptions,
plugin,
service_checks,
utils,
)
logger = logging.getLogger(__name__)
class RunningSession:
"""Connects to an already running localstack server"""
@property
def hostname(self):
"""Return hostname of Localstack."""
return self._hostname
@property
def service_aliases(self):
"""Return a full list of possible names supported."""
services = set(self.services)
result = set()
for alias, service_name in constants.SERVICE_ALIASES.items():
if service_name in services:
result.add(service_name)
result.add(alias)
return result
def start(self, timeout=60):
"""Starts Localstack if needed."""
plugin.manager.hook.session_starting(session=self)
self._check_services(timeout)
plugin.manager.hook.session_started(session=self)
def _check_services(self, timeout, initial_retry_delay=0.01, max_delay=1):
"""Check that all Localstack services are running and accessible.
Does exponential backoff up to `max_delay`.
Args:
timeout (float): Number of seconds to wait for services to
be available.
initial_retry_delay (float, optional): Initial retry delay value
in seconds. Will be multiplied by `2^n` for each retry.
Default: 0.01
max_delay (float, optional): Max time in seconds to wait between
checking service availability. Default: 1
Returns:
None
Raises:
pytest_localstack.exceptions.TimeoutError: If not all services
started before `timeout` was reached.
"""
services = set(self.services)
num_retries = 0
start_time = time.time()
while services and (time.time() - start_time) < timeout:
for service_name in list(
services
): # list() because set may change during iteration
try:
service_checks.SERVICE_CHECKS[service_name](self)
services.discard(service_name)
except exceptions.ServiceError as e:
if (time.time() - start_time) >= timeout:
raise exceptions.TimeoutError(
"Localstack service not started: {0}".format(service_name)
) from e
if services:
delay = (2 ** num_retries) * initial_retry_delay
if delay > max_delay:
delay = max_delay
time.sleep(delay)
num_retries += 1
def stop(self, timeout=10):
"""Stops Localstack."""
plugin.manager.hook.session_stopping(session=self)
plugin.manager.hook.session_stopped(session=self)
def map_port(self, port):
"""Return host port based on Localstack port."""
return port
def service_hostname(self, service_name):
"""Get hostname and port for an AWS service."""
service_name = constants.SERVICE_ALIASES.get(service_name, service_name)
if service_name not in self.services:
raise exceptions.ServiceError(
"{0!r} does not have {1} enabled".format(self, service_name)
)
port = self.map_port(self.services[service_name])
return "%s:%i" % (self.hostname, port)
def endpoint_url(self, service_name):
"""Get the URL for a service endpoint."""
url = ("https" if self.use_ssl else "http") + "://"
url += self.service_hostname(service_name)
return url
class LocalstackSession(RunningSession):
"""Run a localstack Docker container.
This class can start and stop a Localstack container, as well as capture
its logs. It also implments a plugin system to add factories
for the various AWS client libraries (botocore, boto3, etc).
Can be used as a context manager:
>>> import docker
>>> client = docker.from_env()
>>> with LocalstackSession(client) as session:
... s3 = session.boto3.resource('s3')
Args:
docker_client: A docker-py Client object that will be used
to talk to Docker.
services (list|dict, optional): One of
- A list of AWS service names to start in the
Localstack container.
- A dict of service names to the port they should run on.
Defaults to all services. Setting this
can reduce container startup time and therefore test time.
region_name (str, optional): Region name to assume.
Each Localstack container acts like a single AWS region.
Defaults to 'us-east-1'.
kinesis_error_probability (float, optional): Decimal value between
0.0 (default) and 1.0 to randomly inject
ProvisionedThroughputExceededException errors
into Kinesis API responses.
dynamodb_error_probability (float, optional): Decimal value
between 0.0 (default) and 1.0 to randomly inject
ProvisionedThroughputExceededException errors into
DynamoDB API responses.
container_log_level (int, optional): The logging level to use
for Localstack container logs. Defaults to :attr:`logging.DEBUG`.
localstack_version (str, optional): The version of the Localstack
image to use. Defaults to `latest`.
auto_remove (bool, optional): If True, delete the Localstack
container when it stops.
container_name (str, optional): The name for the Localstack
container. Defaults to a randomly generated id.
use_ssl (bool, optional): If True use SSL to connect to Localstack.
Default is False.
**kwargs: Additional kwargs will be stored in a `kwargs` attribute
in case test resource factories want to access them.
"""
image_name = "localstack/localstack"
factories = []
def start(self, timeout=60):
"""Start the Localstack container.
Args:
timeout (float, optional): Wait at most this many seconds
for the Localstack services to start. Default is 1 minute.
Raises:
pytest_localstack.exceptions.TimeoutError:
If *timeout* was reached before all Localstack
services were available.
docker.errors.APIError: If the Docker daemon returns an error.
"""
if self._container is not None:
raise exceptions.ContainerAlreadyStartedError(self)
logger.debug("Starting Localstack container %s", self.container_name)
logger.debug("%r running starting hooks", self)
plugin.manager.hook.session_starting(session=self)
image_name = self.image_name + ":" + self.localstack_version
if self.pull_image:
logger.debug("Pulling docker image %r", image_name)
self.docker_client.images.pull(image_name)
start_time = time.time()
services = ",".join("%s:%s" % pair for pair in self.services.items())
kinesis_error_probability = "%f" % self.kinesis_error_probability
dynamodb_error_probability = "%f" % self.dynamodb_error_probability
use_ssl = str(self.use_ssl).lower()
self._container = self.docker_client.containers.run(
image_name,
name=self.container_name,
detach=True,
auto_remove=self.auto_remove,
environment={
"DEFAULT_REGION": self.region_name,
"SERVICES": services,
"KINESIS_ERROR_PROBABILITY": kinesis_error_probability,
"DYNAMODB_ERROR_PROBABILITY": dynamodb_error_probability,
"USE_SSL": use_ssl,
},
ports={port: None for port in self.services.values()},
)
logger.debug(
"Started Localstack container %s (id: %s)",
self.container_name,
self._container.short_id,
)
# Tail container logs
container_logger = logger.getChild("containers.%s" % self._container.short_id)
self._stdout_tailer = container.DockerLogTailer(
self._container,
container_logger.getChild("stdout"),
self.container_log_level,
stdout=True,
stderr=False,
)
self._stdout_tailer.start()
self._stderr_tailer = container.DockerLogTailer(
self._container,
container_logger.getChild("stderr"),
self.container_log_level,
stdout=False,
stderr=True,
)
self._stderr_tailer.start()
try:
timeout_remaining = timeout - (time.time() - start_time)
if timeout_remaining <= 0:
raise exceptions.TimeoutError("Container took too long to start.")
self._check_services(timeout_remaining)
logger.debug("%r running started hooks", self)
plugin.manager.hook.session_started(session=self)
logger.debug("%r finished started hooks", self)
except exceptions.TimeoutError:
if self._container is not None:
self.stop(0.1)
raise
def stop(self, timeout=10):
"""Stop the Localstack container.
Args:
timeout (float, optional): Timeout in seconds to wait for the
container to stop before sending a SIGKILL. Default: 10
Raises:
docker.errors.APIError: If the Docker daemon returns an error.
"""
if self._container is not None:
logger.debug("Stopping %r", self)
logger.debug("Running stopping hooks for %r", self)
plugin.manager.hook.session_stopping(session=self)
logger.debug("Finished stopping hooks for %r", self)
self._container.stop(timeout=10)
self._container = None
self._stdout_tailer = None
self._stderr_tailer = None
logger.debug("Stopped %r", self)
logger.debug("Running stopped hooks for %r", self)
plugin.manager.hook.session_stopped(session=self)
logger.debug("Finished stopped hooks for %r", self)
def __del__(self):
"""Stop container on garbage collection."""
self.stop(0.1)
def map_port(self, port):
"""Return host port based on Localstack container port."""
if self._container is None:
raise exceptions.ContainerNotStartedError(self)
result = self.docker_client.api.port(self._container.id, int(port))
if not result:
return None
return int(result[0]["HostPort"])
def generate_container_name():
"""Generate a random name for a Localstack container."""
valid_chars = set(string.ascii_letters)
chars = []
while len(chars) < 6:
new_chars = [chr(c) for c in os.urandom(6 - len(chars))]
chars += [c for c in new_chars if c in valid_chars]
return "pytest-localstack-" + "".join(chars)
| [
37811,
10987,
290,
9427,
351,
257,
10714,
25558,
9290,
526,
15931,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
4731,
198,
11748,
640,
198,
6738,
4866,
1330,
4866,
198,
198,
6738,
16846,
1330,
2196,
198,
198,
6738,
12972,
9288,
62,
... | 2.323146 | 4,908 |
from rdkit import Chem
from functools import partial
from multiprocessing import Pool
from fuseprop import find_clusters, extract_subgraph, get_mol, get_smiles, find_fragments
from copy import deepcopy
import numpy as np
import torch
import argparse
from private import *
from agent import sample
| [
6738,
374,
67,
15813,
1330,
12870,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
18540,
305,
919,
278,
1330,
19850,
198,
6738,
32738,
22930,
1330,
1064,
62,
565,
13654,
11,
7925,
62,
7266,
34960,
11,
651,
62,
43132,
11,
651,
62... | 3.670732 | 82 |
#!/usr/bin/env python
"""
Apply a patch that adds Rust doc comments to bindgen-generated Rust source.
To run unittests:
python -m unittest -v add_doc_comments
"""
# pylint: disable=too-few-public-methods
from __future__ import print_function
import argparse
import logging
import re
import unittest
INDENT_PAT = re.compile(r'^(\s+)\S'.encode())
LOGGER = logging.getLogger(__name__)
_LOGGER_CACHE = {}
class AddDocCommentsBaseException(Exception):
"""Base exception for add_doc_comments module"""
pass
class InvalidPatchException(AddDocCommentsBaseException):
"""Got a bad formatted patch"""
pass
class AddDocCommentsException(AddDocCommentsBaseException):
"""Error adding document comments to a file"""
pass
class DocCommentInsertIndexBaseException(AddDocCommentsBaseException):
"""Base exception for doc_comment_insert_index"""
pass
class DocCommentsNoMatchException(DocCommentInsertIndexBaseException):
"""No match was found for the doc comment"""
pass
class DocCommentsMultipleMatchesException(DocCommentInsertIndexBaseException):
"""
Multiple matches were found for the doc comment
:ivar Optional[List[int]] candidate_indices: candidate insert indices
"""
def patch_plus_parts(patch):
"""
Yields a "sliding window" over the additive parts of the patch.
:param str patch: patch/diff
:return: (prefix_lines, plus_lines, suffix_lines)
:rtype: (List[str], List[str], List[str])
"""
class State(object):
"""Parser state"""
WAIT_ADD, ON_ADD = range(2)
logger = _local_logger()
# Convert unicode types to bytes
if not isinstance(patch, bytes):
patch = patch.encode()
state = State.WAIT_ADD
prefix_lines = []
plus_lines = []
suffix_lines = []
seen_plus = False
for line in patch.split(b'\n'):
# Skip empty lines
if not line:
continue
# Skip these lines
splits = line.split()
if splits and splits[0] in [b'diff', b'index', b'---', b'+++', b'@@']:
continue
logger.debug('state=%d, prefix=%s, plus=%s, suffix=%s, seen_plus=%s',
state, prefix_lines, plus_lines, suffix_lines, seen_plus)
# We avoid using a single index [0] because Python3 returns an int
first_char, line = line[0:1], line[1:]
if first_char == b'+':
if state == State.WAIT_ADD:
state = State.ON_ADD
if seen_plus:
yield prefix_lines, plus_lines, suffix_lines
# Do a shallow copy to avoid weird aliasing
prefix_lines = suffix_lines[:]
plus_lines = []
suffix_lines = []
seen_plus = True
plus_lines.append(line)
elif first_char == b'-':
# ignore minus lines
continue
elif first_char == b' ':
# Context line
if state == State.ON_ADD:
state = State.WAIT_ADD
context_lines = suffix_lines if seen_plus else prefix_lines
context_lines.append(line)
else:
raise InvalidPatchException(
'Invalid patch line %s; does not start with "+- "' % repr(line))
yield prefix_lines, plus_lines, suffix_lines
# pylint: disable=missing-docstring
# Example: 'CS_MODE_LITTLE_ENDIAN = 0,',
RUST_IDENT_PAT = re.compile(r'^[a-zA-Z][a-zA-Z0-9_]*|_[a-zA-Z0-9_]+$'.encode())
def is_rust_ident(ident):
"""Returns whether a string is a valid rust identifier"""
if not isinstance(ident, bytes):
ident = ident.encode()
return RUST_IDENT_PAT.match(ident)
# pylint: disable=missing-docstring
def rust_def_name(rust_expr):
"""
Returns the Rust "identifier" defined in a Rust expression, otherwise None.
This assumes rust_expr is "simplified" and has no leading or trailing
whitespace.
:param str rust_expr: Line of Rust code that may define a type
:return: defined Rust type
:rtype: str
"""
logger = _local_logger()
splits = rust_expr.split()
def find_def_with_trailing(splits_start, extract_def):
"""
Find a definition that starts with given splits
:type extract_def: (str) -> str
"""
sub_split_len = len(splits_start)
if not (len(splits) >= sub_split_len + 1 and
splits[:sub_split_len] == splits_start):
return None
name_part = splits[sub_split_len]
def_name = extract_def(name_part)
logger.debug('Returning rust_def_name %s from %s',
def_name, repr(rust_expr))
return def_name
def extract_trailer(trailer):
"""Extracts fn name from fn_part"""
def func(fn_part):
"""Function to be returned"""
try:
trailer_index = fn_part.index(trailer)
return fn_part[:trailer_index]
except ValueError:
return fn_part
return func
two_index_tokens = [b'mod', b'type', b'enum', b'struct']
for token in two_index_tokens:
if len(splits) >= 3 and splits[:2] == [b'pub', token]:
def_name = splits[2]
logger.debug('Returning rust_def_name %s from %s',
def_name, repr(rust_expr))
try:
select_idx = def_name.index('(')
except ValueError:
select_idx = len(def_name)
return def_name[:select_idx]
def_with_trailing_args = [
([b'pub', b'fn'], extract_trailer(b'(')),
([b'pub', b'const'], extract_trailer(b':')),
]
for args in def_with_trailing_args:
candidate = find_def_with_trailing(*args)
if candidate:
return candidate
# Example: 'CS_MODE_LITTLE_ENDIAN = 0,',
if len(splits) == 3 and is_rust_ident(splits[0]) and splits[1] == b'=':
return splits[0]
return None
# pylint: disable=missing-docstring
def _simplify(str_):
"""Simplify source line"""
return str_.strip()
def doc_comment_insert_index(doc_lines, context_line, fs_path_rust_defs):
"""Find where to insert based on context"""
candidate_indices = []
context_line_simple = _simplify(context_line)
def_name = rust_def_name(context_line_simple)
for idx, candidate_line in enumerate(doc_lines):
if isinstance(candidate_line, InsertLines):
continue
while True:
candidate_line_simple = _simplify(candidate_line)
if def_name and def_name == fs_path_rust_defs.get(
candidate_line_simple):
lines_match = True
break
lines_match = context_line_simple == candidate_line_simple
break
if lines_match:
candidate_indices.append(idx)
if not candidate_indices:
raise DocCommentsNoMatchException()
if len(candidate_indices) == 1:
return candidate_indices[0]
mult_matches = DocCommentsMultipleMatchesException(
'Found multiple insert indices %s for context line %s' %
candidate_indices, repr(context_line))
mult_matches.candidate_indices = candidate_indices
raise mult_matches
class InsertLines(object):
"""
Wrapper around a list of strings that shows the line was manually
inserted
:ivar List[str | InsertLines] inner: list of internal strings
:ivar str indent: indent to prefix each line with
"""
def rstrip_line(line):
"""Removes trailing whitespace from a string (preserving any newline)"""
if line[-1] == '\n':
return line[:-1].rstrip() + '\n'
return line.rstrip()
def add_doc_comments(doc_patch, fs_path, output_path):
"""
Add Rust doc comments to a file from a commit that added ONLY doc comments
:param file doc_patch: patch that added doc comments
:param str fs_path: path to file that needs doc comments added
:param str output_path: path to file that should be written with with doc
comments
"""
# pylint: disable=too-many-locals
logger = _local_logger()
patch = doc_patch.read()
with open(fs_path, 'rb') as fs_path_file:
# type: List[Union[str, InsertLines]]
doc_lines = list(fs_path_file.readlines())
matched_parts = 0
total_parts = 0
logger.info('Pre-computing rust_def_names')
fs_path_rust_defs = {}
for line in doc_lines:
line = _simplify(line)
line_def = rust_def_name(line)
if line_def:
fs_path_rust_defs[line] = line_def
logger.info('Done pre-computing rust_def_names')
for _, plus_lines, post_lines in patch_plus_parts(patch):
logger.info('Plus lines: %s', plus_lines)
logger.info('Post context: %s', post_lines)
total_parts += 1
if not post_lines:
logger.debug('Skipping part, no post context lines')
continue
# Only look at one line of context
context_line = post_lines[0]
try:
insert_index = doc_comment_insert_index(doc_lines, context_line,
fs_path_rust_defs)
logger.info('CONTEXT: found context line for %s',
repr(context_line))
doc_lines.insert(insert_index, InsertLines(plus_lines))
matched_parts += 1
except DocCommentsNoMatchException:
logger.info('NO CONTEXT: found no context lines for %s',
repr(context_line))
except DocCommentsMultipleMatchesException as exc:
logger.info('NO CONTEXT: mound multiple indices %s for %s',
exc.candidate_indices, repr(context_line))
with open(output_path, 'wb') as output_file:
for idx, line in enumerate(doc_lines):
if isinstance(line, InsertLines):
try:
context_line = doc_lines[idx + 1]
indent_matches = INDENT_PAT.findall(context_line)
line.indent = indent_matches[0] if indent_matches else None
except IndexError:
pass
output_file.write(rstrip_line(bytes(line)))
logger.warning('Matched %d / %d patch parts', matched_parts, total_parts)
def setup_logger(verbosity):
"""Set up module level ogger"""
levels = [logging.WARN, logging.INFO, logging.DEBUG]
level_index = min(verbosity, len(levels) - 1)
logging.basicConfig()
LOGGER.setLevel(levels[level_index])
EPILOG = """
Example usage:
git diff e67b72b8^ e67b72b8 | \\
./scripts/add_doc_comments.py \\
--doc-patch - \\
--fs-path pre_generated/capstone.rs \\
-o pre_generated/capstone.doc.rs
"""
def main():
"""Main driver"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter, epilog=EPILOG)
parser.add_argument('--doc-patch', '-p', type=argparse.FileType('rb'),
required=True,
help='File with patch (or - for stdin)')
parser.add_argument('--fs-path', required=True,
help='Path to documented file in current filesystem')
output_mutex = parser.add_mutually_exclusive_group(required=True)
output_mutex.add_argument('--in-place', '-i', action='store_true',
help='Update fs-path in-place')
output_mutex.add_argument('--output', '-o',
help='Output Rust source with doc comments added')
parser.add_argument(
'--verbose', '-v', action='count', default=0,
help='Log more verbosely (can be passed multiple times)')
args = parser.parse_args()
setup_logger(args.verbose)
LOGGER.info(
'Set verbosity to %s',
logging.getLevelName(logging.getLogger().level))
if args.in_place:
output_path = args.fs_path
else:
output_path = args.output
add_doc_comments(args.doc_patch, args.fs_path, output_path)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
44836,
257,
8529,
326,
6673,
17103,
2205,
3651,
284,
11007,
5235,
12,
27568,
17103,
2723,
13,
198,
198,
2514,
1057,
555,
715,
3558,
25,
198,
220,
220,
220,
21015,
532,... | 2.302109 | 5,263 |
# Program to clean out the filesystem cache
import numpy
a = numpy.arange(1000 * 100 * 125, dtype='f8') # 100 MB of RAM
b = a * 3 # Another 100 MB
# delete the reference to the booked memory
del a
del b
# Do a loop to fully recharge the python interpreter
j = 2
for i in range(1000 * 1000):
j += i * 2
| [
2,
6118,
284,
3424,
503,
262,
29905,
12940,
198,
11748,
299,
32152,
198,
198,
64,
796,
299,
32152,
13,
283,
858,
7,
12825,
1635,
1802,
1635,
13151,
11,
288,
4906,
11639,
69,
23,
11537,
220,
1303,
1802,
10771,
286,
13931,
198,
65,
79... | 3.039216 | 102 |
#!/usr/bin/env python3
# Test code for functions that load halcon intrinsics and extrinsics
from load_camera_info import *
testDataPath = Path('data_for_unit_tests/extrinsics_for_testing')
if __name__=='__main__':
test_that_pose_types_are_consistent()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
6208,
2438,
329,
5499,
326,
3440,
10284,
1102,
22496,
873,
290,
22820,
1040,
873,
198,
6738,
3440,
62,
25695,
62,
10951,
1330,
1635,
198,
198,
9288,
6601,
15235,
796,
10644... | 2.877778 | 90 |
from __future__ import division
import numpy as np
from pydicom import dcmread
from pydicom.filebase import DicomBytesIO
from zipfile import ZipFile
from torch.utils.data import Dataset
from skimage.exposure import rescale_intensity
import pandas as pd
from scipy.ndimage import center_of_mass
from dataset.processing import CropResize
from dataset.processing import ResizePad
##################################################
## resize to (196, 196), then crop down
## to (128, 128), so lose 34 pixels on each side.
##################################################
def preprocess_image(image,
scale_size=(196,196),
data_size=(128,128)):
"""
- rescale image intensity to uint8
- crop the image to square shape
then resize to scale_size
- crop the edges to be data_size
Params
------
- image
- scale_size
- data_size
Return
------
- image
"""
image = rescale_intensity(image, out_range=np.uint8).astype(np.uint8)
image_scaled = CropResize(scale_size).crop_resize_image(image)
cy, cx = center_of_mass(image_scaled)
crop = (int(min(max(cy-data_size[0]/2,0),scale_size[0]-data_size[0])),
int(min(max(cx-data_size[1]/2,0),scale_size[1]-data_size[1])))
image_cropped = image_scaled[crop[0]:crop[0]+data_size[0],
crop[1]:crop[1]+data_size[1]]
paddings = [(crop[0], scale_size[0]-data_size[0]-crop[0]),
(crop[1], scale_size[1]-data_size[1]-crop[1]),
image.shape]
return image_cropped, paddings
##################################################
## AoDistDataset for loading Ao_Dist data
##################################################
class AoDistDataset(Dataset):
"""
Dataset for Ao_dist MRI data
scale to scale_size
crop down to data_size
"""
def __init__(self, root_dir, csv_data,
scale_size=(196,196),
data_size=(128,128),
seriesDescription="CINE_segmented_Ao_dist",
meta=["PixelSpacing", "Rows", "Columns"],
return_paddings=False):
"""
Params
------
- root_dir
- csv_data
- scale_size
- data_size
"""
self.root_dir = root_dir
self.csv_data = csv_data
self.scale_size = scale_size
self.data_size = data_size
#self.ImageCropResize = CropResize(data_size)
self.labels = pd.read_csv(csv_data) if type(csv_data) is str else csv_data
self.seriesDescription = seriesDescription
self.meta = meta
self.return_paddings = return_paddings
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
279,
5173,
291,
296,
1330,
288,
11215,
961,
198,
6738,
279,
5173,
291,
296,
13,
7753,
8692,
1330,
360,
291,
296,
45992,
9399,
198,
6738,
199... | 2.247557 | 1,228 |
import random
import socket
import time
import math
server = socket.socket()
host = '127.0.0.1'
port = 8000
server.bind((host, port))
print('Start on:', host, port)
print('URL', (host, port))
server.listen(5)
while True:
conn, (client_host, client_port) = server.accept()
print('Got connection', client_host, client_port)
data = conn.recv(1024).decode('utf-8').split(' ')
data = list(map(int, data))
response = math.sqrt(data[0] ** 2 + data[1] ** 2)
conn.send(bytes('Ващ гипотинуза = '+ str(response), "utf-8"))
conn.close()
| [
11748,
4738,
201,
198,
11748,
17802,
201,
198,
11748,
640,
201,
198,
11748,
10688,
201,
198,
201,
198,
15388,
796,
17802,
13,
44971,
3419,
201,
198,
4774,
796,
705,
16799,
13,
15,
13,
15,
13,
16,
6,
201,
198,
634,
796,
38055,
201,
... | 2.334694 | 245 |
#!/usr/bin/env python
#
# Author: Hari Sekhon
# Date: 2007-02-20 17:49:00 +0000 (Tue, 20 Feb 2007)
#
# http://github.com/harisekhon
#
# License: see accompanying LICENSE file
#
"""Nagios plugin to test the state of all 3ware raid arrays and/or drives
on all 3ware controllers on the local machine. Requires the tw_cli program
written by 3ware, which should be called tw_cli_64 if running on a 64-bit
system. May be remotely executed via any of the standard remote nagios
execution mechanisms"""
__author__ = "Hari Sekhon"
__title__ = "Nagios Plugin for 3ware RAID"
__version__ = "1.1"
# Standard Nagios return codes
OK = 0
WARNING = 1
CRITICAL = 2
UNKNOWN = 3
import os
import re
import sys
try:
from subprocess import Popen, PIPE, STDOUT
except ImportError:
print "Failed to import subprocess module.",
print "Perhaps you are using a version of python older than 2.4?"
sys.exit(CRITICAL)
from optparse import OptionParser
SRCDIR = os.path.dirname(sys.argv[0])
def end(status, message, disks=False):
"""Exits the plugin with first arg as the return code and the second
arg as the message to output"""
check = "RAID"
if disks == True:
check = "DISKS"
if status == OK:
print "%s OK: %s" % (check, message)
sys.exit(OK)
elif status == WARNING:
print "%s WARNING: %s" % (check, message)
sys.exit(WARNING)
elif status == CRITICAL:
print "%s CRITICAL: %s" % (check, message)
sys.exit(CRITICAL)
else:
print "UNKNOWN: %s" % message
sys.exit(UNKNOWN)
if os.geteuid() != 0:
end(UNKNOWN, "You must be root to run this plugin")
ARCH = os.uname()[4]
BIN = None
def _set_twcli_binary(path=None):
""" set the path to the twcli binary"""
global BIN
if path:
BIN = path
elif re.match("i[3456]86", ARCH):
BIN = SRCDIR + "/tw_cli"
elif ARCH == "x86_64":
BIN = SRCDIR + "/tw_cli_64"
else:
end(UNKNOWN, "architecture is not x86 or x86_64, cannot run 3ware " \
"utility")
if not os.path.exists(BIN):
end(UNKNOWN, "3ware utility for this architecture '%s' cannot be " \
"found" % BIN)
if not os.access(BIN, os.X_OK):
end(UNKNOWN, "3ware utility '%s' is not executable" % BIN)
def run(cmd):
"""runs a system command and returns stripped output"""
if cmd == "" or cmd == None:
end(UNKNOWN, "internal python error - " \
+ "no cmd supplied for 3ware utility")
try:
process = Popen(BIN, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
except OSError, error:
error = str(error)
if error == "No such file or directory":
end(UNKNOWN, "Cannot find 3ware utility '%s'" % BIN)
else:
end(UNKNOWN, "error trying to run 3ware utility - %s" % error)
if process.poll():
end(UNKNOWN, "3ware utility process ended prematurely")
try:
stdout, stderr = process.communicate(cmd)
except OSError, error:
end(UNKNOWN, "unable to communicate with 3ware utility - %s" % error)
if stdout == None or stdout == "":
end(UNKNOWN, "No output from 3ware utility")
output = str(stdout).split("\n")
if output[1] == "No controller found.":
end(UNKNOWN, "No 3ware controllers were found on this machine")
stripped_output = output[3:-2]
if process.returncode != 0:
stderr = str(stdout).replace("\n"," ")
end(UNKNOWN, "3ware utility returned an exit code of %s - %s" \
% (process.returncode, stderr))
else:
return stripped_output
def test_all(verbosity, warn_true=False, no_summary=False, show_drives=False):
"""Calls the raid and drive testing functions"""
array_result, array_message = test_arrays(verbosity, warn_true, no_summary)
if array_result != OK and not show_drives:
return array_result, array_message
drive_result, drive_message = test_drives(verbosity, warn_true, no_summary)
if drive_result > array_result:
result = drive_result
else:
result = array_result
if drive_result != OK:
if array_result == OK:
message = "Arrays OK but... " + drive_message
else:
message = array_message + ", " + drive_message
else:
if show_drives:
message = array_message + ", " + drive_message
else:
message = array_message
return result, message
def test_arrays(verbosity, warn_true=False, no_summary=False):
"""Tests all the raid arrays on all the 3ware controllers on
the local machine"""
lines = run("show")
#controllers = [ line.split()[0] for line in lines ]
controllers = [ line.split()[0] for line in lines if line and line[0] == "c" ]
status = OK
message = ""
number_arrays = 0
arrays_not_ok = 0
number_controllers = len(controllers)
for controller in controllers:
unit_lines = run("/%s show unitstatus" % controller)
if verbosity >= 3:
for unit_line in unit_lines:
print unit_line
print
for unit_line in unit_lines:
number_arrays += 1
unit_line = unit_line.split()
state = unit_line[2]
if state == "OK":
continue
elif state == "REBUILDING" or \
state == "VERIFY-PAUSED" or \
state == "VERIFYING" or \
state == "INITIALIZING":
unit = int(unit_line[0][1:])
raid = unit_line[1]
if state == "VERIFY-PAUSED" or \
state == "VERIFYING" or \
state == "INITIALIZING":
percent_complete = unit_line[4]
else:
percent_complete = unit_line[3]
message += "Array %s status is '%s'(%s on adapter %s) - " \
% (unit, state, raid, controller[1:])
if state == "REBUILDING":
message += "Rebuild "
elif state == "VERIFY-PAUSED" or state == "VERIFYING":
message += "Verify "
elif state == "INITIALIZING":
message += "Initializing "
message += "Status: %s%% complete, " % percent_complete
if warn_true:
arrays_not_ok += 1
if status == OK:
status = WARNING
else:
arrays_not_ok += 1
unit = int(unit_line[0][1:])
raid = unit_line[1]
message += "Array %s status is '%s'" % (unit, state)
message += "(%s on adapter %s), " % (raid, controller[1:])
status = CRITICAL
message = message.rstrip(", ")
message = add_status_summary(status, message, arrays_not_ok, "arrays")
if not no_summary:
message = add_checked_summary(message, \
number_arrays, \
number_controllers, \
"arrays")
return status, message
def test_drives(verbosity, warn_true=False, no_summary=False):
"""Tests all the drives on the all the 3ware raid controllers
on the local machine"""
lines = run("show")
controllers = []
for line in lines:
parts = line.split()
if len(parts):
controllers.append(parts[0])
status = OK
message = ""
number_drives = 0
drives_not_ok = 0
number_controllers = len(controllers)
for controller in controllers:
drive_lines = run("/%s show drivestatus" % controller)
number_drives += len(drive_lines)
if verbosity >= 3:
for drive_line in drive_lines:
print drive_line
print
for drive_line in drive_lines:
drive_line = drive_line.split()
state = drive_line[1]
if state == "OK" or state == "NOT-PRESENT":
continue
if not warn_true and \
state in ('VERIFYING', 'REBUILDING', 'INITIALIZING'):
continue
else:
drives_not_ok += 1
drive = drive_line[0]
if drive[0] == "d":
drive = drive[1:]
array = drive_line[2]
if array[0] == "u":
array = array[1:]
message += "Status of drive in port "
message += "%s is '%s'(Array %s on adapter %s), " \
% (drive, state, array, controller[1:])
status = CRITICAL
message = message.rstrip(", ")
message = add_status_summary(status, message, drives_not_ok, "drives")
if not no_summary:
message = add_checked_summary(message, \
number_drives, \
number_controllers, \
"drives")
return status, message
def add_status_summary(status, message, number_failed, device):
"""Adds a status summary string to the beginning of the message
and returns the message"""
if device == "arrays":
if number_failed == 1:
device = "array"
elif device == "drives":
if number_failed == 1:
device = "drive"
else:
device = "[unknown devices, please check code]"
if status == OK:
if message == "":
message = "All %s OK" % device + message
else:
message = "All %s OK - " % device + message
else:
message = "%s %s not OK - " % (number_failed, device) + message
return message
def add_checked_summary(message, number_devices, number_controllers, device):
"""Adds a summary string of what was checked to the end of the message
and returns the message"""
if device == "arrays":
if number_devices == 1:
device = "array"
elif device == "drives":
if number_devices == 1:
device = "drive"
else:
device = "[unknown devices, please check code]"
if number_controllers == 1:
controller = "controller"
else:
controller = "controllers"
message += " [%s %s checked on %s %s]" % (number_devices, device, \
number_controllers, controller)
return message
def main():
"""Parses command line options and calls the function to
test the arrays/drives"""
parser = OptionParser()
parser.add_option( "-a",
"--arrays-only",
action="store_true",
dest="arrays_only",
help="Only test the arrays. By default both arrays " \
+ "and drives are checked")
parser.add_option( "-b",
"--binary",
dest="binary",
help="Full path of the tw_cli binary to use.")
parser.add_option( "-d",
"--drives-only",
action="store_true",
dest="drives_only",
help="Only test the drives. By default both arrays " \
+ "and drives are checked")
parser.add_option( "-n",
"--no-summary",
action="store_true",
dest="no_summary",
help="Do not display the number of arrays/drives " \
+ "checked. By default the number of arrays and " \
+ "drives checked are printed at the end of the " \
+ "line. This is useful information and helps to " \
+ "know that they are detected properly")
parser.add_option( "-s",
"--show-drives",
action="store_true",
dest="show_drives",
help="Show drive status. By default drives are " \
+ "checked as well as arrays, but there is no " \
+ "output regarding them unless there is a " \
+ "problem. Use this is you want drive details as " \
+ "well when there is an array problem (default " \
+ "behaviour is to only show the array problem to " \
+ "avoid too much cluttering information), " \
+ "or if you want to see the drive information " \
+ "even when all drives are ok")
parser.add_option( "-w",
"--warn-rebuilding",
action="store_true",
dest="warn_true",
help="Warn when an array is Rebuilding, Initializing " \
+ "or Verifying. You might want to do this to keep " \
+ "a closer eye on things. Also, these conditions " \
+ "can affect performance so you might want to " \
+ "know this is going on. Default is to not warn " \
+ "during these states as they are not usually " \
+ "problems")
parser.add_option( "-v",
"--verbose",
action="count",
dest="verbosity",
help="Verbose mode. Good for testing plugin. By default\
only one result line is printed as per Nagios standards")
parser.add_option( "-V",
"--version",
action="store_true",
dest="version",
help="Print version number and exit")
(options, args) = parser.parse_args()
if args:
parser.print_help()
sys.exit(UNKNOWN)
arrays_only = options.arrays_only
binary = options.binary
drives_only = options.drives_only
no_summary = options.no_summary
show_drives = options.show_drives
warn_true = options.warn_true
verbosity = options.verbosity
version = options.version
if version:
print __version__
sys.exit(OK)
if arrays_only and drives_only:
print "You cannot use the -a and -d switches together, they are",
print "mutually exclusive\n"
parser.print_help()
sys.exit(UNKNOWN)
elif arrays_only and show_drives:
print "You cannot use the -a and -s switches together"
print "No drive information can be printed if you only check arrays\n"
parser.print_help()
sys.exit(UNKNOWN)
elif drives_only and warn_true:
print "You cannot use the -d and -w switches together"
print "Array warning states are invalid when testing only drives\n"
parser.print_help()
sys.exit(UNKNOWN)
_set_twcli_binary(binary)
if arrays_only:
result, output = test_arrays(verbosity, warn_true, no_summary)
elif drives_only:
result, output = test_drives(verbosity, warn_true, no_summary)
end(result, output, True)
else:
result, output = test_all(verbosity, warn_true, no_summary, show_drives)
end(result, output)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print "Caught Control-C..."
sys.exit(CRITICAL)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
220,
6434,
25,
2113,
72,
37558,
24130,
198,
2,
220,
7536,
25,
4343,
12,
2999,
12,
1238,
1596,
25,
2920,
25,
405,
1343,
2388,
357,
41392,
11,
1160,
3158,
4343,
8,
198,
... | 2.096541 | 7,458 |
import tensorflow as tf
import model
import shutil
# Kerasのモデルを読み込む
model = model.make()
# ニューラルの重みを読み込む
model.load_weights("weight.hdf5")
# TensorFlowのセッションを取得
sess = tf.keras.backend.get_session()
# SavedModelを出力
shutil.rmtree("saved_model/",True)
tf.saved_model.simple_save(sess,"saved_model/",
inputs={'input': model.inputs[0]},
outputs={'output': model.outputs[0]})
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
2746,
198,
11748,
4423,
346,
198,
198,
2,
17337,
292,
5641,
40361,
21959,
9202,
31758,
45739,
255,
2515,
123,
164,
122,
120,
1792,
222,
198,
19849,
796,
2746,
13,
15883,
3419,
198,
2,
... | 2 | 190 |
#!/usr/bin/python3
#
# Copyright 2021 Dustin Kleckner
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
An example which generates a volume with a distorted cube of points
'''
import sys
from muvi import VolumetricMovie, VolumeProperties, open_3D_movie
from muvi.distortion import get_distortion_model
import numpy as np
import time
from scipy import ndimage
import os
from scipy.spatial.transform import Rotation
import pickle
from scipy.interpolate import UnivariateSpline
GAUSSIAN_SIZE = 2
FRAMES = 60
DT = 1 / FRAMES
output_dir = "tracking_example"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Our sample points are a cube, which is then rotated.
cube = 5 + 20 * ((np.arange(8).reshape(-1, 1) // 2**np.arange(3)) % 2 - 0.5).astype('f')
vec = np.array([1, 2, 4], 'f')
vec *= 2 * np.pi / mag(vec)
print(mag(vec))
rot_cube = [
Rotation.from_rotvec(n / FRAMES * vec).apply(cube)
for n in range(FRAMES)]
# MUVI file name
mfn = os.path.join(output_dir, "tracking.vti")
if not os.path.exists(mfn):
print(f'Generating volumetric movie: {mfn}')
# Generate a properties object so we can get a distortion object
# Note that by specifying dx/dz, which are introducing distortion, which
# is quite severe in this case. Try setting the distortion corection
# factors to 0 in the viewer to see how warped the raw data is!
info = VolumeProperties(
Lx = 70,
Ly = 90,
Lz = 100,
Nx = 64,
Ny = 64,
Nz = 128,
dx = 75,
dz = 100
)
# Create a grid of indices, which correspond to the voxel indices...
indices = np.mgrid[:info['Nx'], :info['Ny'], :info['Nz']].T # Tranposing changes shape to [Nz, Ny, Nx, 3]
# ... and convert this to physical positions in the distorted volume.
distortion = get_distortion_model(info)
X = distortion.convert(indices, 'index-xyz', 'physical')
# Each point will be a Gaussian blob
# Generate the frames
frames = [np.clip(gaussians(r_cube, X, GAUSSIAN_SIZE) * 200, 0, 255).astype('u1') for r_cube in rot_cube]
# ... and save them!
VolumetricMovie(frames, info).save(mfn)
else:
print(f'Found volumetric movie: {mfn}')
print('Skipping creation; delete file to regenerate...')
dfn = os.path.join(output_dir, 'tracking.pickle')
if not os.path.exists(dfn):
print("Tracking points...")
import trackpy as tp
# Open the 3D movie
vol = open_3D_movie(mfn)
# Identify the points in the volume
data = tp.batch(vol, 5)
# Note: the second parameter is the size in the frame. In this case,
# 5 is approximately right, but you would need to tweak for a different
# data set!
# Link the particles into tracks
data = tp.link(data, 10, memory=3)
print(f'Found {data.particle.max()+1} particle tracks')
# Find the physical coordinates. These will appear as the columns "xc", "yc",
# and "zc" in the data frame *after* you run this command, and will be in the
# physical units of the data (i.e. L and not N)
vol.distortion.update_data_frame(data)
# Save to a pickle file
with open(dfn, "wb") as f:
pickle.dump(data, f)
else:
print(f"Found existing track data, delete {dfn} to regenerate...")
with open(dfn, "rb") as f:
data = pickle.load(f)
# Let's make a movie where each point is a sphere!
pfn = os.path.join(output_dir, 'tracking_points_frame%d.ply')
from muvi.mesh import generate_glyphs
for i in range(FRAMES):
points = np.array(data[data.frame == i][["xc", "yc", "zc"]])
generate_glyphs(points, "sphere", a=GAUSSIAN_SIZE*1.5).save(pfn % i)
# Let's make a movie with vector arrows for each point
vfn = os.path.join(output_dir, 'tracking_vel_frame%d.ply')
# First we need to construct a trajectory for each particle track, which
# we'll do with UnivariateSplines.
# This is a lot to keep track of, so we'll define a class for this!
# Note: err is the expected error for each particle location. Setting this
# higher will result in a smoother trajectory -- note that the output
# locations from the trajectories will *not* exactly match the input positions
# (unless you set s=0, which you probably shouldn't!)
# Build the trajactory object
traj = Trajectories(data, dt=DT)
# And make the frames!
for i in range(FRAMES):
X, V = traj(i * DT)
generate_glyphs(X, "tick", a=GAUSSIAN_SIZE*5, N=V, color=mag(V), clim=(0, 150)).save(vfn % i)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
198,
2,
15069,
33448,
37616,
15983,
694,
1008,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,... | 2.774664 | 1,784 |
from datetime import datetime
from ..schema import BaseTransformer
class Transformer(BaseTransformer):
"""Transform Utah raw data for consolidation."""
postal_code = "UT"
fields = dict(
company="Company Name",
location="Location",
notice_date="Date of Notice",
jobs="Affected Workers",
)
date_format = ("%m/%d/%Y", "%m/%d/%y")
date_corrections = {
"03/09/2020&": datetime(2020, 3, 9),
"01/05/18/": datetime(2018, 1, 5),
"03/05/14 Updated": datetime(2014, 3, 5),
"09/31/10": datetime(2010, 9, 30),
"05/2009": datetime(2009, 5, 1),
"01/07//09": datetime(2009, 1, 7),
}
jobs_corrections = {
"645 Revised": 645,
}
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
11485,
15952,
2611,
1330,
7308,
8291,
16354,
628,
198,
4871,
3602,
16354,
7,
14881,
8291,
16354,
2599,
198,
220,
220,
220,
37227,
41762,
10202,
8246,
1366,
329,
31941,
526,
15931,
628,
... | 2.189911 | 337 |
#encoding:utf8
'''
Created on 2016-4-5
@author: hadoop
'''
import os
# t.write(ll[1])
parse_ldac_to_mrslda()
| [
2,
12685,
7656,
25,
40477,
23,
198,
7061,
6,
198,
41972,
319,
1584,
12,
19,
12,
20,
198,
198,
31,
9800,
25,
550,
11224,
198,
7061,
6,
198,
11748,
28686,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.703704 | 81 |
# Atharv Kolhar
# Python Bytes
"""
Functions
"""
# Definition of Function
# Use of function
c = 10
d = 8
total = add(c, d)
diff = subtract(c, d)
print(total)
print(diff)
e = 30
f = 20
ef_total = add(e, f)
ef_diff = subtract(e, f)
print(ef_total, ef_diff)
# End
| [
2,
13548,
283,
85,
25910,
9869,
198,
2,
11361,
2750,
4879,
198,
198,
37811,
198,
24629,
2733,
198,
37811,
198,
198,
2,
30396,
286,
15553,
628,
198,
2,
5765,
286,
2163,
198,
66,
796,
838,
198,
67,
796,
807,
198,
198,
23350,
796,
75... | 2.277311 | 119 |
import numpy as np
from abc import ABC
import air_hockey.vector as V
import air_hockey.phy_const as P
# updates position and velocity
# updates position and velocity | [
11748,
299,
32152,
355,
45941,
198,
6738,
450,
66,
1330,
9738,
198,
11748,
1633,
62,
71,
8337,
13,
31364,
355,
569,
198,
11748,
1633,
62,
71,
8337,
13,
6883,
62,
9979,
355,
350,
628,
220,
220,
220,
1303,
5992,
2292,
290,
15432,
628,... | 3.431373 | 51 |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
import hug
from typot.pull_request import PullRequest
from typot.spell_checker import SpellChecker
version = 1
@hug.get("/ping", versions=version)
@hug.local()
@hug.post("/typot", versions=version)
@hug.local()
| [
11748,
28686,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
828,
366,
492,
30487,
4008,
198,
11748,
16225,
198,
6738,
2170,
313,
13,
31216,
62,... | 2.72973 | 111 |
from pyamg import gallery, smoothed_aggregation_solver
from numpy import ones
from pylab import *
A = gallery.poisson( (100,100), format='csr')
ml = smoothed_aggregation_solver(A, smooth='simple')
b = ones((A.shape[0],1));
res=[]
x = ml.solve(b, tol=1e-8, residuals=res)
semilogy(res[1:])
xlabel('iteration')
ylabel('residual norm')
show()
| [
6738,
12972,
321,
70,
1330,
15604,
11,
32746,
704,
62,
9460,
43068,
62,
82,
14375,
198,
6738,
299,
32152,
1330,
3392,
198,
6738,
279,
2645,
397,
1330,
1635,
198,
198,
32,
796,
15604,
13,
7501,
30927,
7,
357,
3064,
11,
3064,
828,
579... | 2.533333 | 135 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
from typing import Dict, List, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import Module
from .compressor import Compressor, LayerInfo
_logger = logging.getLogger(__name__)
__all__ = ['Pruner']
class Pruner(Compressor):
"""
The abstract class for pruning algorithm. Inherit this class and implement the `_reset_tools` to customize a pruner.
"""
def _wrap_modules(self, layer: LayerInfo, config: Dict):
"""
Create a wrapper module to replace the original one.
Parameters
----------
layer
The layer to instrument the mask.
config
The configuration for generating the mask.
"""
_logger.debug("Module detected to compress : %s.", layer.name)
wrapper = PrunerModuleWrapper(layer.module, layer.name, config, self)
assert hasattr(layer.module, 'weight'), "module %s does not have 'weight' attribute" % layer.name
# move newly registered buffers to the same device of weight
wrapper.to(layer.module.weight.device)
return wrapper
def load_masks(self, masks: Dict[str, Dict[str, Tensor]]):
"""
Load an exist masks on the wrapper. You can train the model with an exist masks after load the masks.
Parameters
----------
masks
The masks dict with format {'op_name': {'weight': mask, 'bias': mask}}.
"""
wrappers = self.get_modules_wrapper()
for name, layer_mask in masks.items():
assert name in wrappers, '{} is not in wrappers of this pruner, can not apply the mask.'.format(name)
if layer_mask.get('weight') is not None:
assert hasattr(wrappers[name], 'weight_mask'), 'There is no attribute weight_mask in wrapper.'
setattr(wrappers[name], 'weight_mask', layer_mask.get('weight'))
if layer_mask.get('bias') is not None:
assert hasattr(wrappers[name], 'bias_mask'), 'There is no attribute bias_mask in wrapper.'
setattr(wrappers[name], 'bias_mask', layer_mask.get('bias'))
def compress(self) -> Tuple[Module, Dict[str, Dict[str, Tensor]]]:
"""
Returns
-------
Tuple[Module, Dict]
Return the wrapped model and mask.
"""
return self.bound_model, {}
# NOTE: need refactor dim with supporting list
def show_pruned_weights(self, dim: int = 0):
"""
Log the simulated prune sparsity.
Parameters
----------
dim
The pruned dim.
"""
for _, wrapper in self.get_modules_wrapper().items():
weight_mask = wrapper.weight_mask
mask_size = weight_mask.size()
if len(mask_size) == 1:
index = torch.nonzero(weight_mask.abs() != 0, as_tuple=False).tolist()
else:
sum_idx = list(range(len(mask_size)))
sum_idx.remove(dim)
index = torch.nonzero(weight_mask.abs().sum(sum_idx) != 0, as_tuple=False).tolist()
_logger.info(f'simulated prune {wrapper.name} remain/total: {len(index)}/{weight_mask.size(dim)}')
def export_model(self, model_path: str, mask_path: Optional[str] = None):
"""
Export pruned model weights, masks and onnx model(optional)
Parameters
----------
model_path
Path to save pruned model state_dict. The weight and bias have already multiplied the masks.
mask_path
Path to save mask dict.
"""
assert self.bound_model is not None, 'The bound model reference has been cleared.'
assert model_path is not None, 'model_path must be specified.'
mask_dict = {}
self._unwrap_model()
for name, wrapper in self.get_modules_wrapper().items():
weight_mask = wrapper.weight_mask
bias_mask = wrapper.bias_mask
if weight_mask is not None:
mask_sum = weight_mask.sum().item()
mask_num = weight_mask.numel()
_logger.debug('Layer: %s Sparsity: %.4f', name, 1 - mask_sum / mask_num)
wrapper.module.weight.data = wrapper.module.weight.data.mul(weight_mask)
if bias_mask is not None:
wrapper.module.bias.data = wrapper.module.bias.data.mul(bias_mask)
# save mask to dict
mask_dict[name] = {"weight": weight_mask, "bias": bias_mask}
torch.save(self.bound_model.state_dict(), model_path)
_logger.info('Model state_dict saved to %s', model_path)
if mask_path is not None:
torch.save(mask_dict, mask_path)
_logger.info('Mask dict saved to %s', mask_path)
self._wrap_model()
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
198,
2,
49962,
739,
262,
17168,
5964,
13,
198,
198,
11748,
18931,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
11,
32233,
11,
309,
29291,
198,
198,
11748,
28034,
198,
6738,
28034,
1330,
309,
228... | 2.33908 | 2,088 |
# -*- coding: utf-8 -*-
"""root link layer protocol
:mod:`pcapkit.protocols.link.link` contains :class:`~pcapkit.protocols.link.link.Link`,
which is a base class for link layer protocols, e.g. :class:`~pcapkit.protocols.link.link.arp.ARP`/InARP,
:class:`~pcapkit.protocols.link.link.ethernet.Ethernet`, :class:`~pcapkit.protocols.link.link.l2tp.L2TP`,
:class:`~pcapkit.protocols.link.link.ospf.OSPF`, :class:`~pcapkit.protocols.link.link.rarp.RARP`/DRARP and etc.
"""
import collections
from typing import TYPE_CHECKING
from pcapkit.const.reg.ethertype import EtherType as RegType_EtherType
from pcapkit.protocols.protocol import Protocol
from pcapkit.utilities.exceptions import UnsupportedCall
if TYPE_CHECKING:
from typing import NoReturn
from typing_extensions import Literal
__all__ = ['Link']
class Link(Protocol): # pylint: disable=abstract-method
"""Abstract base class for link layer protocol family.
This class currently supports parsing of the following protocols, which are
registered in the :attr:`self.__proto__ <pcapkit.protocols.link.Link.__proto__>`
attribute:
.. list-table::
:header-rows: 1
* - Index
- Protocol
* - 0x0806
- :class:`~pcapkit.protocols.link.arp.ARP`
* - 0x8035
- :class:`~pcapkit.protocols.link.rarp.RARP`
* - 0x8100
- :class:`~pcapkit.protocols.link.vlan.VLAN`
* - 0x0800
- :class:`~pcapkit.protocols.internet.ipv4.IPv4`
* - 0x86DD
- :class:`~pcapkit.protocols.internet.ipv6.IPv6`
* - 0x8137
- :class:`~pcapkit.protocols.internet.ipx.IPX`
"""
##########################################################################
# Defaults.
##########################################################################
#: Layer of protocol.
__layer__ = 'Link' # type: Literal['Link']
#: DefaultDict[int, tuple[str, str]]: Protocol index mapping for decoding next layer,
#: c.f. :meth:`self._decode_next_layer <pcapkit.protocols.protocol.Protocol._decode_next_layer>`
#: & :meth:`self._import_next_layer <pcapkit.protocols.link.link.Link._import_next_layer>`.
__proto__ = collections.defaultdict(
lambda: ('pcapkit.protocols.raw', 'Raw'),
{
RegType_EtherType.Address_Resolution_Protocol: ('pcapkit.protocols.link.arp', 'ARP'),
RegType_EtherType.Reverse_Address_Resolution_Protocol: ('pcapkit.protocols.link.rarp', 'RARP'),
RegType_EtherType.Customer_VLAN_Tag_Type: ('pcapkit.protocols.link.vlan', 'VLAN'),
RegType_EtherType.Internet_Protocol_version_4: ('pcapkit.protocols.internet.ipv4', 'IPv4'),
RegType_EtherType.Internet_Protocol_version_6: ('pcapkit.protocols.internet.ipv6', 'IPv6'),
# c.f., https://en.wikipedia.org/wiki/EtherType#Values
0x8137: ('pcapkit.protocols.internet.ipx', 'IPX'),
},
)
##########################################################################
# Properties.
##########################################################################
# protocol layer
@property
def layer(self) -> 'Literal["Link"]':
"""Protocol layer."""
return self.__layer__
##########################################################################
# Methods.
##########################################################################
@classmethod
def register(cls, code: 'RegType_EtherType', module: str, class_: str) -> 'None':
"""Register a new protocol class.
Arguments:
code: protocol code as in :class:`~pcapkit.const.reg.ethertype.EtherType`
module: module name
class_: class name
Notes:
The full qualified class name of the new protocol class
should be as ``{module}.{class_}``.
"""
cls.__proto__[code] = (module, class_)
##########################################################################
# Data models.
##########################################################################
@classmethod
def __index__(cls) -> 'NoReturn': # pylint: disable=invalid-index-returned
"""Numeral registry index of the protocol.
Raises:
UnsupportedCall: This protocol has no registry entry.
"""
raise UnsupportedCall(f'{cls.__name__!r} object cannot be interpreted as an integer')
##########################################################################
# Utilities.
##########################################################################
def _read_protos(self, size: int) -> 'RegType_EtherType':
"""Read next layer protocol type.
Arguments:
size buffer size
Returns:
Internet layer protocol enumeration.
"""
_byte = self._read_unpack(size)
_prot = RegType_EtherType.get(_byte)
return _prot
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
15763,
2792,
7679,
8435,
198,
198,
25,
4666,
25,
63,
79,
11128,
15813,
13,
11235,
4668,
82,
13,
8726,
13,
8726,
63,
4909,
1058,
4871,
25,
63,
93,
79,
11128,
1... | 2.545132 | 1,972 |
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2019 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.vapi.metadata.
#---------------------------------------------------------------------------
"""
The :mod:`com.vmware.vapi.metadata_client` module provides metadata classes.
These are classes that provide different facets of API information. Clients can
use these classes to:
* Discover APIs available in the infrastructure.
* Fetch metadata that can be used to build presentation layers like CLI, REST,
etc.
* Fetch authentication and authorization metadata.
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class SourceType(Enum):
"""
The ``SourceType`` class defines the types of sources for API metadata. You
specify the type of source when adding a metadata source to a metadata
service.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
FILE = None
"""
Indicates the metadata source is a JSON file.
"""
REMOTE = None
"""
Indicates the metadata source is a remote server.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`SourceType` instance.
"""
Enum.__init__(string)
SourceType._set_values([
SourceType('FILE'),
SourceType('REMOTE'),
])
SourceType._set_binding_type(type.EnumType(
'com.vmware.vapi.metadata.source_type',
SourceType))
class SourceCreateSpec(VapiStruct):
"""
The ``SourceCreateSpec`` class contains the registration information for a
metadata source.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_validator_list = [
UnionValidator(
'type',
{
'FILE' : [('filepath', True)],
'REMOTE' : [('address', True)],
}
),
]
def __init__(self,
description=None,
type=None,
filepath=None,
address=None,
):
"""
:type description: :class:`str`
:param description: English language human readable description of the source.
:type type: :class:`SourceType`
:param type: Type of the metadata source.
:type filepath: :class:`str`
:param filepath: Absolute file path of the metamodel metadata file that has the
metamodel information about one component element.
This attribute is optional and it is only relevant when the value
of ``type`` is :attr:`SourceType.FILE`.
:type address: :class:`str`
:param address: Connection information of the remote server. This should be of the
format http(s)://IP:port/namespace.
The remote server should contain the classes in
:mod:`com.vmware.vapi.metadata.metamodel_client` module. It could
expose metamodel information of one or more components.
This attribute is optional and it is only relevant when the value
of ``type`` is :attr:`SourceType.REMOTE`.
"""
self.description = description
self.type = type
self.filepath = filepath
self.address = address
VapiStruct.__init__(self)
SourceCreateSpec._set_binding_type(type.StructType(
'com.vmware.vapi.metadata.source_create_spec', {
'description': type.StringType(),
'type': type.ReferenceType(__name__, 'SourceType'),
'filepath': type.OptionalType(type.StringType()),
'address': type.OptionalType(type.URIType()),
},
SourceCreateSpec,
False,
None))
class SourceInfo(VapiStruct):
"""
Metadata source info
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_validator_list = [
UnionValidator(
'type',
{
'FILE' : [('file_name', True)],
'REMOTE' : [('remote_addr', True), ('msg_protocol', True)],
}
),
]
def __init__(self,
type=None,
file_name=None,
remote_addr=None,
msg_protocol=None,
):
"""
:type type: :class:`SourceType`
:param type: Type of the metadata source
:type file_name: :class:`str`
:param file_name: Name of the metadata source file
This attribute is optional and it is only relevant when the value
of ``type`` is :attr:`SourceType.FILE`.
:type remote_addr: :class:`str`
:param remote_addr: Address of the remote metadata source
This attribute is optional and it is only relevant when the value
of ``type`` is :attr:`SourceType.REMOTE`.
:type msg_protocol: :class:`str`
:param msg_protocol: Message protocol to be used
This attribute is optional and it is only relevant when the value
of ``type`` is :attr:`SourceType.REMOTE`.
"""
self.type = type
self.file_name = file_name
self.remote_addr = remote_addr
self.msg_protocol = msg_protocol
VapiStruct.__init__(self)
SourceInfo._set_binding_type(type.StructType(
'com.vmware.vapi.metadata.source_info', {
'type': type.ReferenceType(__name__, 'SourceType'),
'file_name': type.OptionalType(type.StringType()),
'remote_addr': type.OptionalType(type.StringType()),
'msg_protocol': type.OptionalType(type.StringType()),
},
SourceInfo,
False,
None))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
10097,
32284,
198,
2,
15069,
13130,
37754,
11,
3457,
13,
220,
1439,
2489,
10395,
13,
198,
198,
2,
47044,
46,
24700,
1137,
11617,
45811,
1377,
8410,
5626,
19164,
5064,... | 2.547024 | 2,722 |
__copyright__ = "Copyright (C) 2021 Andreas Andersson"
import unittest, datetime, os
from levelmodel import LevelModel
TEST_DB = "test_pelletmeter.db"
class TestLevelModel(unittest.TestCase):
"""Unit tests for the levelmodel.py module."""
@classmethod
class TimeStamp:
"""Class to produce fake timestamps where "now" is an absolute value."""
TS = 1634760000 # 20211020, 22:00 (Why not?)
def __init__(self):
"""Create a new TimeStamp object.
After initialization now() will return TimeStamp.TS.
"""
self.reset()
def now(self):
"""Return a fake "now" timestamp."""
return self._now.timestamp()
def reset(self):
"""Reset now() to TimeStamp.TS."""
self._now = datetime.datetime.fromtimestamp(self.TS)
def timetravel(self, **kwargs):
"""Move now() relative to it's current value using arguments valid for datetime.timedelta."""
self._now += datetime.timedelta(**kwargs)
def get(self, **kwargs):
"""Return a timestamp relative now() using arguments valid for datetime.timedelta."""
return (self._now + datetime.timedelta(**kwargs)).timestamp()
if __name__ == '__main__':
unittest.main() | [
834,
22163,
4766,
834,
796,
366,
15269,
357,
34,
8,
33448,
33728,
25519,
1559,
1,
198,
198,
11748,
555,
715,
395,
11,
4818,
8079,
11,
28686,
198,
6738,
1241,
19849,
1330,
5684,
17633,
198,
198,
51,
6465,
62,
11012,
796,
366,
9288,
6... | 2.626327 | 471 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
if __name__ == '__main__':
with open('text.txt', 'r', encoding="utf8") as f:
text = f.read()
# Заменить символы конца предложения.
text = text.replace("!", ".")
text = text.replace("?", ".")
# Удалить все многоточия.
while ".." in text:
text = text.replace("..", ".")
# Разбить текст на предложения.
sentences = text.split(".")
# Вывод предложений с запятыми.
for sentence in sentences:
if "," in sentence:
symb = list(sentence)
symb.append('.')
strin = ''
strin = strin.join(symb)
print("{}".format(strin))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,
220,
220,
220,
351,
1280,... | 1.632184 | 435 |
from os import remove
from os.path import isfile
from tempfile import NamedTemporaryFile
from unittest import TestCase
from opwen_email_server.utils import temporary
| [
6738,
28686,
1330,
4781,
198,
6738,
28686,
13,
6978,
1330,
318,
7753,
198,
6738,
20218,
7753,
1330,
34441,
12966,
5551,
8979,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
1034,
21006,
62,
12888,
62,
15388,
13,
26791,
133... | 3.840909 | 44 |
#!/usr/bin/python
#
#The MIT CorrelX Correlator
#
#https://github.com/MITHaystack/CorrelX
#Contact: correlX@haystack.mit.edu
#Project leads: Victor Pankratius, Pedro Elosegui Project developer: A.J. Vazquez Alvarez
#
#Copyright 2017 MIT Haystack Observatory
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#
#------------------------------
#------------------------------
#Project: CorrelX.
#File: rsvf.py.
#Author: A.J. Vazquez Alvarez (ajvazquez@haystack.mit.edu)
#Description:
"""
Reducer: performs VLBI correlation from a text file with the lines for all the stations,
sorted as defined in const_mapred.py based on the key and format defined in msvf.get_pair_str().
Parameters
----------
See lib_mapredcorr.get_reducer_params_str()
Returns
-------
See rsvf.get_lines_out_for_all() for visibilities.
See rsvf.get_lines_stats() for statistics.
Notes
-----
|
| **Reader:
|
| Expecting lines with
| See rsvf.split_input_line()
"""
#History:
#initial version: 2015.12 ajva
#MIT Haystack Observatory
from __future__ import print_function,division
import sys
import os
import base64
import numpy as np
if os.environ.get("is_legacy"):
import lib_quant
from lib_quant import *
import lib_fx_stack
from lib_fx_stack import *
# Constants for mapper and reducer
from const_mapred import *
from lib_pcal import *
# Vector quantization # VQ disabled
#from lib_vq import *
from lib_debug import *
from const_performance import *
from const_ini_files import *
else:
from cxs.app.helpers.const_debug import BYPASS_REDUCER, DEBUG_GENERAL_R, DEBUG_DELAYS, DEBUG_HSTACK, DEBUG_FRAC_OVER
from cxs.app.helpers.lib_debug import print_debug_r_delays_header, print_debug_r_hstack_header,\
print_debug_r_frac_over_header, print_key
from cxs.app.base.const_mapred import FIELD_SEP, KEY_SEP, SF_SEP, META_LEN
from cxs.app.base.const_mapred import INDEX_SHIFT_DELAY, INDEX_FRAC_DELAY, INDEX_ABS_DELAY,\
INDEX_RATE_DELAY_0, INDEX_RATE_DELAY_1, INDEX_RATE_DELAY_2, INDEX_RATE_DELAY_REF,\
INDEX_RATE_CLOCK_0, INDEX_RATE_CLOCK_1, INDEX_RATE_ZC_0, INDEX_RATE_ZC_1, INDEX_RATE_CLOCK_REF,\
INDEX_RATE_M_ONLY, INDEX_RATE_C_ONLY, INDEX_RATE_DIFF_FRAC, INDEX_NUM_SAMPLES, INDEX_FS,\
INDEX_BITS_PER_SAMPLE, INDEX_FIRST_SAMPLE, INDEX_DATA_TYPE, INDEX_NBINS_PCAL, INDEX_PCAL_FREQ,\
INDEX_CHANNEL_INDEX, INDEX_CHANNEL_FREQ, INDEX_ACC_TIME, INDEX_ENCODING, INDEX_SIDEBAND
from cxs.app.base.const_mapred import ENCODE_B64, ENCODE_B64_REDUCER_OUTPUT, B64_VIS_PREFIX, SP_VIS_PREFIX, DP_VIS_PREFIX
from cxs.config.const_ini_files import C_INI_MEDIA_C_VQ, C_INI_MEDIA_C_NO
from cxs.config.const_ini_files import C_INI_CR_WINDOW_SQUARE
from cxs.computation.base.const_performance import COMPUTE_FOR_SUB_ACC_PERIOD
from cxs.computation.quantization.lib_quant import get_samples
from cxs.computation.fx.lib_fx_stack import window_and_fft, compute_fx_for_all, normalize_mat, multiply_accumulate
from cxs.computation.pcal.lib_pcal import normalize_pcal, adjust_shift_acc_pcal, reshape_pcal, accumulate_pcal
# fix
from cxs.app.helpers.lib_vq import get_vq_decoded_samples, dictc01bit, v1_quant, v2_quant
# -Debugging-
# By default: 0
# If 1: inputs are by-passed to the output
DEBUGGING = BYPASS_REDUCER # 0
###########################################
# Input
###########################################
def split_input_line(line):
"""
Get sub-keys from read line.
Parameters
----------
line : str
whole line read from input.
Returns
-------
key_pair_accu : str
part of the key with pair (station-pol A and station-pol B) and accumulation period.
key_sample : str
part of they key with sample number.
key_station
station identifier
vector_split
second part of the header with information necessary for the processing of the samples.
is_autocorr
used in one-baseline-per-task mode, indicates that this pair is an autocorrelation and therefore
these samples will be correlated with themselves.
key_station_pol
station-polarization identifier.
char_type
identifies the mode of operation, as defined in the mapper:
| 'x' for all-baselines-per-task,
| 'r' for linear scaling with the number of stations,
| 'y' for one-baseline-per-task.
accu_block
integration period number.
Notes
-----
|
| **TO DO:**
|
| Move char_type (char_p in mapper) to constants section.
"""
# Multi-key separator into common format (Hadoop will change last FIELD_SEP of key into KEY_SEP
line=line.replace(FIELD_SEP+KEY_SEP,KEY_SEP)
line=line.replace(KEY_SEP+KEY_SEP,KEY_SEP)
key, vector = line.split(KEY_SEP,1)
key_pair_accu_sample, key_station_pol = key.split('s',1)
key_station = key_station_pol.split(SF_SEP)[0]
key_pair_accu, key_sample = key_pair_accu_sample.split('f',1)
vector_split = vector.split(' ')
key_pair_accu_split = key_pair_accu.split(FIELD_SEP)
is_autocorr=0
if key_pair_accu_split[1]==key_pair_accu_split[2]:
is_autocorr=1
char_type=key[1]
accu_block=float(key_pair_accu_split[5])
return([key_pair_accu, key_sample, key_station, vector_split,is_autocorr,key_station_pol,char_type,accu_block])
def extract_params_split(vector_split):
"""
Get paramters from line header (header of the data, not part of the key).
Parameters
----------
vector_split : str
second part of the header with all the parameters provided by the mapper,
associated to the samples to be processed.
Returns
-------
bits_per_sample
number of bits for each component of the sample.
block_first_sample
accumulation period. TO DO: check this.
data_type
sample type, 'r' for real, 'c' for complex.
encoding
[unused] initially used for introducing compression in the data (VQ), currently not used.
encoding_width
[unused] also associated to compression.
n_bins_pcal
number of samples for the windows to be accumulated for the pcal signal.
num_samples
number of samples in this line.
abs_delay
absolute delay.
rate_delay
delay information corresponding these samples (polynomials, etc).
fs
sampling frequency.
fs_pcal
[unused] phase calibration signal frequency spacing.
freq_channel
sky frequency.
first_sample
first sample number (integer starting at 0).
fractional_sample_delay
fractional sample delay corresponding to the sample 0 of this stream.
accumulation_time
time duration of the integration period.
shift_delay
integer number of samples offset for the sample 0 of this stream.
sideband
single side band side, 'l' for LSB, 'u' for USB.
Notes
-----
|
| **Conventions:**
|
| See const_mapred.py for constants positions and descriptions.
"""
shift_delay= int(vector_split[INDEX_SHIFT_DELAY])
fractional_sample_delay= float(vector_split[INDEX_FRAC_DELAY])
abs_delay= float(vector_split[INDEX_ABS_DELAY])
rate_delay= [float(vector_split[INDEX_RATE_DELAY_0]),\
float(vector_split[INDEX_RATE_DELAY_1]),\
float(vector_split[INDEX_RATE_DELAY_2]),\
float(vector_split[INDEX_RATE_DELAY_REF]),\
float(vector_split[INDEX_RATE_CLOCK_0]),\
float(vector_split[INDEX_RATE_CLOCK_1]),\
float(vector_split[INDEX_RATE_ZC_0]),\
float(vector_split[INDEX_RATE_ZC_1]),\
float(vector_split[INDEX_RATE_CLOCK_REF]),\
float(vector_split[INDEX_RATE_M_ONLY]),\
float(vector_split[INDEX_RATE_C_ONLY]),
float(vector_split[INDEX_RATE_DIFF_FRAC])]
num_samples= int(vector_split[INDEX_NUM_SAMPLES])
fs= float(vector_split[INDEX_FS])
bits_per_sample = int(vector_split[INDEX_BITS_PER_SAMPLE])
first_sample= int(vector_split[INDEX_FIRST_SAMPLE])
data_type = vector_split[INDEX_DATA_TYPE]
n_bins_pcal = int(float(vector_split[INDEX_NBINS_PCAL])//1)
fs_pcal= float(vector_split[INDEX_PCAL_FREQ])
channel_index_str = vector_split[INDEX_CHANNEL_INDEX]
freq_channel = float(vector_split[INDEX_CHANNEL_FREQ])
accumulation_time = float(vector_split[INDEX_ACC_TIME])
encoding = vector_split[INDEX_ENCODING]
sideband= vector_split[INDEX_SIDEBAND]
block_first_sample = vector_split[INDEX_FIRST_SAMPLE]+SF_SEP+channel_index_str
if data_type=='c':
num_samples=num_samples//2
shift_delay=int(shift_delay//2)
if encoding == C_INI_MEDIA_C_VQ:
encoding_width = int(vector_split[META_LEN])
else:
encoding_width=0
return([bits_per_sample,block_first_sample,data_type,encoding, encoding_width,n_bins_pcal,num_samples,abs_delay,rate_delay,fs,fs_pcal,freq_channel,first_sample,fractional_sample_delay,accumulation_time,shift_delay,sideband])
def decode_samples_b64(vector_split_samples,vector_split_encoding):
"""
Decode base64.
Parameters
----------
vector_split_samples
string with the samples (that is a component of the list vector_split).
vector_split_encoding
compression (VQ) encoding, disabled by default.
Returns
-------
out: 1D numpy array
samples (components if complex), packed in binary format (uint8). Samples still need to be "dequantized".
"""
if (ENCODE_B64==1)and(vector_split_encoding==C_INI_MEDIA_C_NO):
return(np.frombuffer(base64.b64decode(vector_split_samples),dtype=np.uint8))
else:
return([])
###########################################
# Output
###########################################
def get_key_all_out(char_type,F_ind_s0,F_ind_s1,acc_str):
"""
Get key for reducer output.
Parameters
------
char_type : char
operation mode (see split_input_line())
F_ind_s0
first station-polarization for this baseline.
F_ind_s1
second station-polarization for this baseline.
acc_str : str
multi-key for output line.
Returns
-------
output : str
key for output line.
"""
return("p"+char_type+FIELD_SEP+F_ind_s0+FIELD_SEP+F_ind_s1+FIELD_SEP+acc_str+FIELD_SEP)
def get_str_r_out(current_key_pair_accu,count_acc,current_vector_split,current_block_first_sample,accu_prod_div, single_precision):
"""
Get output string for reducer.
Parameters
----------
current_key_pair_accu
part of the key with the baseline and the accumulation multi-key.
count_acc
number of accumulations.
current_vector_split
list with metadata.
current_block_first_sample
<first_sample>.<channel_index>
accu_prod_div : complex 1D np.array
visibilities for one baseline, one band and one accumulation period.
Returns
-------
str_print : str
output line with visibilities.
"""
current_vector_split_sub_print = current_vector_split[:(META_LEN-1)]
current_vector_split_sub_print[INDEX_PCAL_FREQ] = str(0)
current_vector_split_sub_print[INDEX_NBINS_PCAL] = str(0)
if ENCODE_B64_REDUCER_OUTPUT:
prefix = SP_VIS_PREFIX if single_precision else DP_VIS_PREFIX
samples = B64_VIS_PREFIX + prefix + base64.b64encode(accu_prod_div).decode("utf-8")
else:
samples = ' '.join(map(str, accu_prod_div))
str_print = current_key_pair_accu+'sxa'+str(count_acc)+KEY_SEP+' '.join(current_vector_split_sub_print)+\
' '+current_block_first_sample+' '+samples
#' '.join(map(str, accu_prod_div))
return(str_print)
def get_str_pcal_out(acc_pcal,current_n_bins_pcal,count_acc_pcal,current_key_pair_accu,current_vector_split,current_block_first_sample):
"""
[Only used in one-baseline-per-task mode]
Get output string for phase calibration.
Parameters
----------
acc_pcal : complex 1D np.array
phase calibration results for one baseline, one band and one accumulation period.
current_n_bins_pcal
number of bins (number of elements in acc_pcal).
count_acc_pcal
number of accumulations performed to get pcal results.
current_key_pair_accu
part of the key with the baseline and the accumulation multi-key.
current_vector_split
metadata as in the input line.
current_block_first_sample
<first_sample>.<channel_index>.
Returns
-------
str_print : str
output line with phase calibration results.
"""
str_print = "pcal"+current_key_pair_accu[2:]+'sxa'+str(count_acc_pcal)+KEY_SEP+' '.join(current_vector_split[:(META_LEN-1)])+' '+current_block_first_sample+' '+' '.join(map(str, acc_pcal))
return(str_print)
def get_str_pcal_out_all(sp,acc_pcal,current_n_bins_pcal,count_acc_pcal,current_key_pair_accu,current_vector_split,current_block_first_sample):
"""
Get output string for phase calibration (all-baselines-per-task).
Parameters
------
sp
station-polarization
acc_pcal : complex 1D np.array
phase calibration results for one baseline, one band and one accumulation period.
current_n_bins_pcal
number of bins (number of elements in acc_pcal).
count_acc_pcal
number of accumulations performed to get pcal results.
current_key_pair_accu
part of the key with the baseline and the accumulation multi-key.
current_vector_split
metadata as in the input line.
current_block_first_sample
<first_sample>.<channel_index>.
Returns
-------
str_print : str
output line with phase calibration results.
"""
str_print = "pcal"+FIELD_SEP+sp+FIELD_SEP+sp+FIELD_SEP+current_key_pair_accu+FIELD_SEP+'sxa'+str(count_acc_pcal)+KEY_SEP+' '.join(current_vector_split[:(META_LEN-1)])+' '+current_block_first_sample+' '+' '.join(map(str, acc_pcal))
return(str_print)
def get_lines_out_for_all(char_type,n_sp,F_ind,current_acc_str,count_acc,acc_mat,current_block_first_sample,current_vector_split,\
acc_pcal,count_acc_pcal,scaling_pair="A.A", single_precision=False):
"""
Get output lines for all results in accumulation matrix.
Parameters
----------
char_type
operation mode (see split_input_line()).
n_sp
number of station-polarizations.
F_ind
structure with ids for station-polarizations.
current_acc_str
multi-key
count_acc
number of accumulations for the visibilities.
acc_mat : complex 3D array
visibilities for all baselines for this acc period and band. See lib_fx_stack.compute_x_all() for more info.
current_block_first_sample
<first_sample>.<channel_index>.
current_vector_split
metadata as in the input line.
acc_pcal : complex 2D array
phase calibration results for all stations for this acc period and band. See lib_pcal.accumulate_pcal_all() for more info.
count_acc_pcal
number of accumulations for the phase calibration results.
scaling_pair
station-polarization for this task (used in linear-scaling, "A.A" by default (all-baseslines-per-task).
Returns
-------
lines_out
list of lines with output results (visibilities and phase calibration).
"""
# TO DO: need more elegant solution to get key, currently hardcoded.
current_acc_str=SF_SEP.join(current_acc_str[3:7])
lines_out=[]
if acc_mat is not None:
if scaling_pair=="A.A":
for s0 in range(n_sp):
for s1 in range(s0,n_sp):
try:
new_key_pair_accu=get_key_all_out(char_type,F_ind[s0],F_ind[s1],current_acc_str)
str_print = get_str_r_out(new_key_pair_accu,count_acc,current_vector_split,\
current_block_first_sample,acc_mat[s0,s1], single_precision)
except TypeError:
str_print = "zR\tError getting output data for "+str(s0)+"/"+str(s1)+" in "+str(current_acc_str)
lines_out+=[str_print]
else:
s0 = F_ind.index(scaling_pair)
for s1 in range(n_sp):
new_key_pair_accu=get_key_all_out(char_type,F_ind[s0],F_ind[s1],current_acc_str)
str_print = get_str_r_out(new_key_pair_accu,count_acc,current_vector_split,\
current_block_first_sample,acc_mat[s1], single_precision)
lines_out+=[str_print]
if hasattr(acc_pcal, "size") and acc_pcal.size > 0:
current_n_bins_pcal=acc_pcal.shape[1]
pcal_fft = window_and_fft(acc_pcal,current_n_bins_pcal,C_INI_CR_WINDOW_SQUARE,flatten_chunks=0)
# TO DO: check
acc_pcal_div = normalize_pcal(pcal_fft,count_acc_pcal)
for sp in range(n_sp):
str_print = get_str_pcal_out_all(F_ind[sp],acc_pcal_div[sp][0],current_n_bins_pcal,count_acc_pcal,current_acc_str,current_vector_split,current_block_first_sample)
lines_out+=[str_print]
else:
str_print = "zR\tEmpty acc mat in "+str(current_acc_str)
lines_out+=[str_print]
return(lines_out)
###########################################
# Data structures storage/mgmt
###########################################
def update_stored_samples(v_dequant,F1,F_ind,key_station_pol,F_delays,F_rates,F_fs,F_fs_pcal,abs_delay,rate_delay,\
fs,fs_pcal,F_first_sample,first_sample,data_type,F_frac,fractional_sample_delay,\
shift_delay,F_side,sideband,fft_size_in):
"""
Store samples and metadata, to be processed later.
Parameters
----------
*For data structures see output below.
*For metadata parameters see extract_params_split().
v_dequant :numpy 1D array of complex
dequantized samples.
Returns
-------
F_*: lists where each element correspond to one read line. All these lists are related, i.e. the n-th element
of all lists correspond to the same read line.
Notes
-----
|
| **TO DO:**
|
| Add checks.
"""
# TO DO: move to extract_params
if data_type=='c':
first_sample_adjusted=int(first_sample//2)
#TO DO: need more elegant solution to support real and complex...
fft_size_out=fft_size_in
else:
first_sample_adjusted=first_sample
#TO DO: need more elegant solution to support real and complex...
fft_size_out=2*fft_size_in
if F1 is None:
F1=[v_dequant]
F_ind=[key_station_pol]
F_delays=[abs_delay]
F_rates=[rate_delay]
F_frac=[[fractional_sample_delay,shift_delay]]
F_fs=[fs]
F_fs_pcal=[fs_pcal]
F_first_sample=[first_sample_adjusted]
F_side=[[sideband,data_type]]
else:
# Always same sorting
F_ind.append(key_station_pol)
F_ind = list(sorted(F_ind))
new_pos = F_ind.index(key_station_pol)
F1.insert(new_pos, v_dequant)
F_delays.insert(new_pos, abs_delay)
F_rates.insert(new_pos, rate_delay)
F_frac.insert(new_pos, [fractional_sample_delay, shift_delay])
F_fs.insert(new_pos, fs)
F_fs_pcal.insert(new_pos, fs_pcal)
F_first_sample.insert(new_pos, first_sample_adjusted)
F_side.insert(new_pos, [sideband, data_type])
return([F1,F_ind,F_delays,F_rates,F_fs,F_fs_pcal,F_first_sample,F_frac,F_side,fft_size_out])
def restore_Fs(last_F_delays,last_F_rates,last_F_frac,last_F_fs,last_F_fs_pcal,last_F_side,last_F_first_sample,\
F_delays,F_rates,F_frac,F_fs,F_fs_pcal,F_side,F_first_sample):
"""
Keep previous structures in case there is no data for all stationpols.
"""
#if len(last_F_delays)>len(F_delays):
if 1:
return([last_F_delays,last_F_rates,last_F_frac,last_F_fs,last_F_fs_pcal,last_F_side,last_F_first_sample])
else:
return([F_delays,F_rates,F_frac,F_fs,F_fs_pcal,F_side,F_first_sample])
###########################################
# Data structures display
###########################################
def get_shapes_F1(F1):
"""
Get string showing shapes of F1.
Parameters
----------
F1: list of multidimensional np.arrays
(each elment has the samples for each station-poliarization.
Returns
-------
out : str
"""
return(str([(F1[i].shape) for i in range(len(F1))]))
def str_list(F_list,sep_c=','):
"""
Get string with representation of list.
"""
return("["+sep_c.join(map(str,F_list))+"]")
def get_lines_stats(current_key_pair_accu,F_stack_shift,F_adj_shift_partial,F_lti,F_ind,failed_acc_count,\
current_block_first_sample,dismissed_acc_count):
"""
Get list of lines with stats for this accumulation period including:
-Number of dropped/added samples (for fractional sample overflows) (stack)
-Number of fractional sample overflows (shift)
-For each stationpol: last sample, total samples, missing/invalid samples (lti)
-Number of failed accumulations (will be one if some data is uncorrelated, which may be
simply due to missalignment from delays.
Parameters
------
current_key_pair_accu
part of the key with pair (station-pol A and station-pol B) and accumulation period.
F_stack_shift
[unused?] see lib_fx_stack().
F_adj_shift_partial
[unused?] see lib_fx_stack().
F_lti
list with last sample (l), total number of samples processed (t), invalid samples (i), and
adjuted samples for each stream.
F_ind
list with station-polarizations.
failed_acc_count
number of failed accumulations.
current_block_first_sample
<first_sample>.<channel_index>
dismissed_acc_count
number of dismissed accumulations.
Returns
-------
lines_stats : list of str
lines with stats.
Notes
-----
|
| **TO DO:**
|
| Remove unused.
"""
lines_stats=[]
lines_stats+=["zR"+KEY_SEP+"kpa="+current_key_pair_accu+",Adjusted stack="+str_list(F_stack_shift)]
lines_stats+=["zR"+KEY_SEP+"kpa="+current_key_pair_accu+",Adjusted shifts="+str_list(F_adj_shift_partial)]
for (i_lti,i_ind) in zip(F_lti,F_ind):
lines_stats+=["zR"+KEY_SEP+"st="+str(i_ind)+",lti_stats="+str_list(i_lti)]
if (failed_acc_count>0)or(dismissed_acc_count>0):
lines_stats+=["zR"+KEY_SEP+"Failed accs="+str(failed_acc_count)+",dismissed accs="+str(dismissed_acc_count)+",in=a"+current_block_first_sample]
return(lines_stats)
###########################################
# Main
###########################################
if __name__ == '__main__':
main()
# <codecell>
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
464,
17168,
2744,
2411,
55,
2744,
2411,
1352,
198,
2,
198,
2,
5450,
1378,
12567,
13,
785,
14,
44,
10554,
323,
25558,
14,
10606,
2411,
55,
198,
2,
17829,
25,
10895,
55,
31,
71... | 2.262163 | 11,058 |
from .CalcBSRss import CalcBSRss
from .GetBSCrossings import GetBSCrossings
from .GetSolarWindTimes import GetSolarWindTimes
from .OverlayBS import OverlayBS
| [
6738,
764,
9771,
66,
4462,
49,
824,
1330,
2199,
66,
4462,
49,
824,
198,
6738,
764,
3855,
4462,
21544,
654,
1330,
3497,
4462,
21544,
654,
198,
6738,
764,
3855,
38825,
8731,
28595,
1330,
3497,
38825,
8731,
28595,
198,
6738,
764,
5886,
1... | 3.291667 | 48 |
# This version number is stored here instead of in `__init__.py` because
# namespace packages and the packages which live inside them can be imported
# in any order, which means their contents are supposed to be identical. If
# their contents differ, Python makes no guarantee about which one is loaded.
#
# See https://www.python.org/dev/peps/pep-0420/#namespace-packages-today
__version__ = '2.12.2'
| [
2,
770,
2196,
1271,
318,
8574,
994,
2427,
286,
287,
4600,
834,
15003,
834,
13,
9078,
63,
780,
198,
2,
25745,
10392,
290,
262,
10392,
543,
2107,
2641,
606,
460,
307,
17392,
198,
2,
287,
597,
1502,
11,
543,
1724,
511,
10154,
389,
43... | 3.697248 | 109 |
import pytest
@pytest.fixture
| [
11748,
12972,
9288,
628,
198,
198,
31,
9078,
9288,
13,
69,
9602,
198
] | 2.538462 | 13 |
"""This file contains all constant definitions
.. moduleauthor:: Lan Hongjian <lanhongjianlr@gmail.com>
.. moduleauthor:: Yamei Ou <oym111@gmail.com>
.. moduleauthor:: Samuel Richerd <dondiego152@gmail.com>
.. moduleauthor:: Jan Van Bruggen <jancvanbruggen@gmail.com>
.. moduleauthor:: Junlin Zhang <neicullyn@gmail.com>
"""
PACKET_SIZE = 8192
"""Size of every :class:`.Packet` in the simulation, in bits"""
ACK_PACKET_SIZE = 512
"""Size of every :class:`.Packet` in the simulation, in bits"""
ROUTER_PACKET_SIZE = 512
"""Size of every :class:`.RouterPacket` in the simulation, in bits"""
GENERATE_ROUTER_PACKET_DEFAULT_INTERVAL = 1000
"""Time for every :class:`.Router` to wait before generating a new
:class:`.RouterPacket`, in milliseconds"""
DYNAMIC_ROUTE_DISTANCE_METRIC = True
"""Whether to take dynamic link delay as the metric for route distance,
otherwise use hops(topology) to be the metric"""
INPUT_FILE_RATE_SCALE_FACTOR = 1000000 / 1000.0
""" Conversion factor for Mbps to bits per millisecond (for rate)"""
INPUT_FILE_DELAY_SCALE_FACTOR = 1
""" Conversion factor for ms to ms (for delay)"""
INPUT_FILE_UPDATE_SCALE_FACTOR = 1
""" Conversion factor for ms to ms (for update)"""
INPUT_FILE_DATA_SCALE_FACTOR = 8000000
"""Conversion factor for MBytes to bits (for flow total data size)"""
INPUT_FILE_TIME_SCALE_FACTOR = 1000
"""Conversion factor for seconds to milliseconds (for flow start time)"""
INPUT_FILE_BUFFER_SCALE_FACTOR = 8000
"""Conversion factor for KB to bits (for buffer size)"""
OUTPUT_LINK_RATE_SCALE_FACTOR = 1000.0 / 1000000
""" Conversion factor for bits per millisecond (for rate) to Mbps"""
OUTPUT_BUFFER_OCCUPANCY_SCALE_FACTOR = 1.0 / PACKET_SIZE
"""Conversion factor for bits to packets"""
OUTPUT_FLOW_RATE_SCALE_FACTOR = 1000.0 / 1000000
""" Conversion factor for bits per millisecond (for rate) to Mbps"""
| [
37811,
1212,
2393,
4909,
477,
6937,
17336,
198,
198,
492,
8265,
9800,
3712,
14730,
9764,
73,
666,
1279,
9620,
71,
506,
73,
666,
14050,
31,
14816,
13,
785,
29,
198,
492,
8265,
9800,
3712,
575,
480,
72,
42201,
1279,
726,
76,
16243,
31... | 2.949206 | 630 |
import logging
import os
import pandas as pd
from glob import glob
from pathlib import Path, PosixPath, WindowsPath
from ekorpkit.utils.func import elapsed_timer
log = logging.getLogger(__name__)
| [
11748,
18931,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
15095,
1330,
15095,
198,
6738,
3108,
8019,
1330,
10644,
11,
18574,
844,
15235,
11,
3964,
15235,
198,
6738,
304,
74,
16300,
15813,
13,
26791,
13,
20786,
13... | 3.306452 | 62 |
#
# andre@corp.insite.com.br
# 2017-10-10
# Codigo que faz regressao simples e encontra embeddings
#
# a ideia aqui e a seguinte:
# - carregar dados do movielens
# - inicializar o embedding de forma aleatoria
# - encontrar os embeddings de filmes e de usuarios que gerem o menor erro possivel
#
from __future__ import division
from __future__ import print_function
from time import gmtime, strftime, localtime
import math
import time
import sys
import os
#from pylab import *
from scipy import sparse
import numpy as np
import pandas as pd
import tensorflow as tf
import random
from tensorflow.python import debug as tf_debug
NUM_USERS = 247754
NUM_MOVIES = 151712
NUM_FEATURES = 4
batch_size = 9999
num_steps = 2000001
base_lbda = 0.01
count = 1
alpha = 0.0001
decay = 0.9999
INPUT_FILE="ratings.csv"
prefix = "t7-r{0:d}-l{1}-a{2}-{3}-".format(NUM_FEATURES, base_lbda, alpha, INPUT_FILE)
tf.set_random_seed(1)
t0 = time.perf_counter()
train_data, train_labels = load_data(INPUT_FILE)
graph = tf.Graph()
with graph.as_default():
ones = tf.constant(1., shape=(NUM_FEATURES,1))
user_embeddings = tf.get_variable("user_embeddings", [NUM_USERS, NUM_FEATURES], initializer=tf.random_normal_initializer(0,1*math.sqrt(1/NUM_FEATURES)))
user_bias = tf.get_variable("user_bias", [NUM_USERS, 1], initializer=tf.random_normal_initializer(0.0))
movie_embeddings = tf.get_variable("movie_embeddings", [NUM_MOVIES, NUM_FEATURES], initializer=tf.random_normal_initializer(0,1*math.sqrt(1/NUM_FEATURES)))
movie_bias = tf.get_variable("movie_bias", [NUM_MOVIES, 1], initializer=tf.random_normal_initializer(3.5))
tf_base_lbda = tf.get_variable("lambda", initializer=tf.constant(base_lbda))
tf_count = tf.get_variable("count", dtype=tf.int32, initializer=tf.constant(count))
tf_train_data = tf.placeholder(tf.int32, shape=(batch_size, 2))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, 1))
tf_lr = tf.placeholder(tf.float32)
tf_user_embeddings = tf.gather(user_embeddings, tf_train_data[:,0])
tf_movie_embeddings = tf.gather(movie_embeddings, tf_train_data[:,1])
tf_user_bias = tf.gather(user_bias, tf_train_data[:,0])
tf_movie_bias = tf.gather(movie_bias, tf_train_data[:,1])
#train_prediction = tf.tensordot(tf_user_embeddings, tf_movie_embeddings, axes=1)
train_prediction = tf.add(tf.matmul(tf.multiply(tf_user_embeddings, tf_movie_embeddings), ones), tf.add(tf_user_bias, tf_movie_bias))
error = tf.subtract(train_prediction, tf_train_labels)
sse = tf.reduce_sum(tf.square(error))
regularization = tf.reduce_sum(tf.square(tf_user_embeddings))/NUM_FEATURES + tf.reduce_sum(tf.abs(tf_movie_embeddings))/NUM_FEATURES
# There's o need to regularize the biases
# + tf.reduce_sum(tf.square(tf_movie_bias))*batch_size/NUM_MOVIES + tf.reduce_sum(tf.square(tf_user_bias)) * batch_size / NUM_USERS
loss = sse + alpha * regularization
mse = sse / batch_size
tf_base_lbda = tf_base_lbda * decay
tf_lbda = tf_base_lbda * count
tf_count = tf_count + 1
optimizer = tf.train.GradientDescentOptimizer(tf_lr).minimize(loss)
histogram = tf.histogram_fixed_width(error, [-4.5, 4.5], nbins=10)
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print("Initialized")
uemb, memb = session.run([user_embeddings, movie_embeddings])
print("user embeddings: {}\n",uemb)
print("movie embeddings: {}\n",memb)
acccount = acctot = 0.0
old_loss = 1e20
lr = base_lbda
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_data[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_data : batch_data, tf_train_labels : batch_labels, tf_lr: lr}
_, l, predictions, uemb, memb, _mse, hist, ubias, mbias = session.run(
[optimizer, loss, train_prediction, user_embeddings, movie_embeddings, mse, histogram, user_bias, movie_bias], feed_dict=feed_dict)
acccount = acccount * 0.9999 + 1
acctot = acctot * 0.9999 + _mse
exploss = acctot/acccount
if (step % 2000 == 0):
if (exploss > old_loss):
lr = lr * 0.1
else:
lr = lr * 1.01
old_loss = exploss
#
loga("Minibatch loss at step %d: %f (%f)" % (step, l, l/batch_size))
print(" Mean Square Error: %f - exp=%f" % (_mse, acctot/acccount))
print(" Learning Rate: %f" % (lr))
print("user embeddings: %f: %s" % (np.linalg.norm(uemb), np.mean(uemb, 0)))
print("movie embeddings: %f: %s" % (np.linalg.norm(memb), np.mean(memb, 0)))
print("user bias: %f: %s" % (np.linalg.norm(ubias), np.mean(ubias, 0)))
print("movie bias: %f: %s" % (np.linalg.norm(mbias), np.mean(mbias, 0)))
print("error: %s" % (hist))
#print("user embeddings: %f" % (user_embeddings))
#print("embeddings: {}".format(emb))
#print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
#print("Validation accuracy: %.1f%%" % accuracy(
#valid_prediction.eval(), valid_labels))
#print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
if lr < 1e-12:
break
print("steps done: {}".format(step))
print("user_embeddings:\n{}".format(np.around(uemb, 3)))
print("movie_embeddings:\n{}".format(np.around(memb, 3)))
np.savetxt(prefix + "user_embeddings.csv.gz", uemb, delimiter=',', fmt="%.7f")
np.savetxt(prefix + "movie_embeddings.csv.gz", memb, delimiter=',', fmt="%.7f")
np.savetxt(prefix + "user_bias.csv.gz", ubias, delimiter=',', fmt="%.7f")
np.savetxt(prefix + "movie_bias.csv.gz", mbias, delimiter=',', fmt="%.7f")
| [
2,
198,
2,
290,
260,
31,
10215,
79,
13,
1040,
578,
13,
785,
13,
1671,
198,
2,
2177,
12,
940,
12,
940,
198,
2,
18720,
14031,
8358,
277,
1031,
50252,
5488,
985,
2374,
304,
2207,
756,
430,
11525,
67,
654,
198,
2,
198,
2,
257,
140... | 2.327843 | 2,489 |
import torch
import torchvision
import torch.autograd as autograd
import model
import autoencoder
import util
import os
| [
11748,
28034,
198,
11748,
28034,
10178,
198,
11748,
28034,
13,
2306,
519,
6335,
355,
1960,
519,
6335,
198,
11748,
2746,
198,
11748,
1960,
6571,
66,
12342,
198,
11748,
7736,
198,
11748,
28686,
628
] | 3.666667 | 33 |
import io
import numpy as np
import tensorflow as tf
import cv2
from flask import Flask, request, send_file, jsonify
from UGATIT import UGATIT
from main import parse_args
from utils import inverse_transform, show_all_variables, merge
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
args = parse_args()
gan = UGATIT(sess, args)
gan.build_model()
show_all_variables()
tf.global_variables_initializer().run(session=sess)
gan.saver = tf.train.Saver()
could_load, checkpoint_counter = gan.load(gan.checkpoint_dir)
if could_load:
print(" [*] Load SUCCESS")
else :
print(" [!] Load failed...")
raise Exception()
app = Flask(__name__)
@app.route('/anime2selfie', methods=['POST'])
@app.route('/selfie2anime', methods=['POST'])
@app.route('/health')
@app.route('/')
if __name__ == "__main__":
app.run(debug=False, port=80, host='0.0.0.0', threaded=False)
| [
11748,
33245,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
269,
85,
17,
198,
6738,
42903,
1330,
46947,
11,
2581,
11,
3758,
62,
7753,
11,
33918,
1958,
198,
198,
6738,
471,
38,
1404,
20... | 2.442786 | 402 |
from django.db import models
from django.core.files.storage import FileSystemStorage
from django.conf import settings
media_storage = FileSystemStorage(
location=f'{settings.BASE_DIR}/media/',
base_url=f'/media/'
)
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
7295,
13,
16624,
13,
35350,
1330,
9220,
11964,
31425,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
628,
628,
628,
628,
198,
198,
11431,
62,
35350,
796,
9220,
1196... | 2.949367 | 79 |
# VMware vSphere Python SDK
# Copyright (c) 2008-2014 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import unittest
import vcr
import socket
# Fully qualified path to the fixtures directory underneath this module
fixtures_path = tests_resource_path('fixtures')
| [
2,
37754,
410,
38882,
11361,
26144,
198,
2,
15069,
357,
66,
8,
3648,
12,
4967,
37754,
11,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2... | 3.882075 | 212 |
# encoding: utf-8
from __future__ import division, print_function, unicode_literals
###########################################################################################################
#
#
# Palette Plugin
#
# Read the docs:
# https://github.com/schriftgestalt/GlyphsSDK/tree/master/Python%20Templates/Palette
#
#
###########################################################################################################
import objc
from GlyphsApp import *
from GlyphsApp.plugins import *
import traceback
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
7297,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
198,
29113,
29113,
29113,
7804,
21017,
198,
2,
198,
2,
198,
2,
197,
11531,
5857,
42636,
198,
2,
19... | 4.264463 | 121 |
from ..lib.service import Service
| [
6738,
11485,
8019,
13,
15271,
1330,
4809,
628
] | 4.375 | 8 |
from Tikzifyables.Colourable.Colourable import Colourable
| [
6738,
46338,
89,
1958,
2977,
13,
5216,
454,
540,
13,
5216,
454,
540,
1330,
38773,
540,
628
] | 3.470588 | 17 |
################
"""
sb_convertFootage
Simon Bjork
August 2014
bjork.simon@gmail.com
Version 1.3 (May 2015)
Synopsis: Speed up the process of converting footage (Read nodes) to another format/resolution/colorspace/naming convention.
OS: Windows/OSX/Linux
CAUTION: The script uses the Python threading module, which is a bit of a black box (to me). Use at own risk.
To install the script:
- Add the script to your Nuke pluginPath.
- Add the following to your menu.py:
import sb_convertFootage
sb_tools = nuke.toolbar("Nodes").addMenu( "sb_Tools", icon = "sb_tools.png" )
sb_tools.addCommand("Python/sb ConvertFootage", 'sb_convertFootage.sb_convertFootage(showAsModal=True)', '')
Note that you can set the showAsModal to True/False. Both options has pros/cons.
"""
################
import nuke
import nukescripts
import os
import threading
import PyOpenColorIO as OCIO
import re
################
def getFileNameComponents(fileName):
'''
Split filename into components.
Returns a list [<name>, <framepadding separator>, <framepadding>, <extension>]
Assumes framepadding are at the end of the filename.
Assumes the framepadding separator is ".", "_", "-".
Make sure the filename uses numbers, not %04d or ####. Use <file knob>.evaluate() for this.
Examples:
getFileNameComponents(D:/images/img01.0001.exr)
>> ['D:/images/img01', '.', '0001', 'exr']
getFileNameComponents(D:/images/img01.exr)
>> ['D:/images/img01', '', '', 'exr']
'''
splitExt= os.path.splitext(fileName)
name = splitExt[0]
ext = splitExt[1][1:]
revName = name[::-1]
if not revName[0].isdigit():
return [name, "", "", ext]
noNum = False
for i in range(0, len(revName)):
if not revName[i].isdigit():
noNum = i
if revName[noNum] in [".", "_", "-"]:
separator = revName[noNum]
padding = revName[:noNum][::-1]
name = name[:-noNum-1]
return [name, separator, padding, ext]
else:
return [name, "", "", ext]
def getOCIOConfig():
'''
Get the current OCIO config.
If the $OCIO environment variable is not set, create a new config from the nuke-default that ships with Nuke.
'''
if os.getenv("OCIO"):
return OCIO.GetCurrentConfig()
else:
nukeDir = os.path.dirname(nuke.env['ExecutablePath'])
nuke_default_config = OCIO.Config.CreateFromFile("{0}/plugins/OCIOConfigs/configs/nuke-default/config.ocio".format(nukeDir))
return nuke_default_config
def getOCIOColorSpaces():
'''
Get all OCIO colorspaces
If a family name exist, return <family name>/<name>, otherwise return <name>.
'''
colorSpaces = []
for i in getOCIOConfig().getColorSpaces():
name = i.getName()
familyName = i.getFamily()
if familyName:
colorSpaces.append("{0}/{1}".format(familyName, name))
else:
colorSpaces.append(name)
return colorSpaces
def getOCIOFamilyAndName(colorSpaceName):
'''
Return colorspace family/name.
If no family name, return name.
'''
colorFamily = False
for i in getOCIOConfig().getColorSpaces():
if i.getName() == colorSpaceName:
colorFamily = i.getFamily()
break
if colorFamily:
colorSpaceName = "{0}/{1}".format(colorFamily, colorSpaceName)
return colorSpaceName
def getOCIOLinear():
''' Get the default OCIO linear colorspace. '''
config = getOCIOConfig()
try:
defaultLin = config.getColorSpace(OCIO.Constants.ROLE_SCENE_LINEAR).getName()
defaultLinFull = getOCIOFamilyAndName(defaultLin)
return defaultLinFull
except:
return False
# Use this Class to do the rendering as you can't execute a render otherwise from a Python Panel (to my knowledge).
# Panel.
# Set knobChanged commands.
# Main function.
# Run main script. | [
14468,
198,
198,
37811,
198,
197,
36299,
62,
1102,
1851,
17574,
496,
198,
197,
35475,
34254,
967,
198,
197,
17908,
1946,
198,
197,
50007,
967,
13,
14323,
261,
31,
14816,
13,
785,
198,
197,
14815,
352,
13,
18,
357,
6747,
1853,
8,
628... | 2.792308 | 1,300 |
import sys
import glob
import bladapter as bl
_, pathin, pathout = sys.argv
top_contigs = bl.load_contigs(pathin)
names = {contig.species:contig.name for contig in top_contigs}
files = {spec:glob.glob('../wgot/fastas/*{}*'.format(spec))[0] for spec in bl.spec_list}
for contig in top_contigs:
whole_file = open(files[contig.species]).read()
tig_loc = whole_file.find(contig.name.split(contig.species)[1])
contig_seq = ''
for elem in whole_file[tig_loc:].split('>')[0].split('\n')[1:]:
contig_seq += elem
print('Writing sequence for :{}'.format(contig.name))
f = open(pathout, 'a')
f.write('>' + contig.name + '\n' + contig_seq + '\n')
f.close() | [
11748,
25064,
198,
11748,
15095,
198,
11748,
698,
324,
3429,
355,
698,
198,
198,
62,
11,
3108,
259,
11,
3108,
448,
796,
25064,
13,
853,
85,
198,
198,
4852,
62,
3642,
9235,
796,
698,
13,
2220,
62,
3642,
9235,
7,
6978,
259,
8,
198,
... | 2.32 | 300 |
from gamma_functions import *
import numpy as np
from scipy.optimize import brentq
import scipy.integrate as integrate
# mu = \int_0^1 x f_ab(x) dx
# Given mu,a what is b?
mu = 0.1
a_ns = []
b_ns = []
for a in np.logspace(np.log10(0.2),np.log10(98),500):
a_ns.append(a)
b = brentq(root_f, 0.5*mu/a, 5*mu/a)
b_ns.append(b)
print a, " ", b
np.save("data/a_ns.npy", a_ns)
np.save("data/b_ns.npy", b_ns)
| [
198,
6738,
34236,
62,
12543,
2733,
1330,
1635,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
40085,
1096,
1330,
275,
1156,
80,
220,
198,
11748,
629,
541,
88,
13,
18908,
4873,
355,
19386,
198,
198,
2,
38779,
7... | 1.99537 | 216 |
#!/usr/bin/env python3
import socket
import unittest
import struct
import random
from framework import tag_fixme_vpp_workers
from framework import VppTestCase, VppTestRunner
import scapy.compat
from scapy.layers.inet import IP, TCP, UDP, ICMP
from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
from scapy.layers.inet6 import IPv6, ICMPv6EchoRequest, ICMPv6EchoReply, \
ICMPv6ND_NS, ICMPv6ND_NA, ICMPv6NDOptDstLLAddr, fragment6
from scapy.layers.inet6 import ICMPv6DestUnreach, IPerror6, IPv6ExtHdrFragment
from scapy.layers.l2 import Ether, ARP, GRE
from scapy.data import IP_PROTOS
from scapy.packet import bind_layers, Raw
from util import ppp
from ipfix import IPFIX, Set, Template, Data, IPFIXDecoder
from time import sleep
from util import ip4_range
from vpp_papi import mac_pton
from syslog_rfc5424_parser import SyslogMessage, ParseError
from syslog_rfc5424_parser.constants import SyslogFacility, SyslogSeverity
from io import BytesIO
from vpp_papi import VppEnum
from vpp_ip_route import VppIpRoute, VppRoutePath, FibPathType
from vpp_neighbor import VppNeighbor
from scapy.all import bind_layers, Packet, ByteEnumField, ShortField, \
IPField, IntField, LongField, XByteField, FlagsField, FieldLenField, \
PacketListField
from ipaddress import IPv6Network
@tag_fixme_vpp_workers
class TestDSlite(VppTestCase):
""" DS-Lite Test Cases """
@classmethod
@classmethod
def test_dslite(self):
""" Test DS-Lite """
self.vapi.dslite_add_del_pool_addr_range(start_addr=self.nat_addr,
end_addr=self.nat_addr,
is_add=1)
aftr_ip4 = '192.0.0.1'
aftr_ip6 = '2001:db8:85a3::8a2e:370:1'
self.vapi.dslite_set_aftr_addr(ip4_addr=aftr_ip4, ip6_addr=aftr_ip6)
self.vapi.syslog_set_sender(self.pg2.local_ip4, self.pg2.remote_ip4)
# UDP
p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
IPv6(dst=aftr_ip6, src=self.pg1.remote_hosts[0].ip6) /
IP(dst=self.pg0.remote_ip4, src='192.168.1.1') /
UDP(sport=20000, dport=10000))
self.pg1.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(1)
capture = capture[0]
self.assertFalse(capture.haslayer(IPv6))
self.assertEqual(capture[IP].src, self.nat_addr)
self.assertEqual(capture[IP].dst, self.pg0.remote_ip4)
self.assertNotEqual(capture[UDP].sport, 20000)
self.assertEqual(capture[UDP].dport, 10000)
self.assert_packet_checksums_valid(capture)
out_port = capture[UDP].sport
capture = self.pg2.get_capture(1)
self.verify_syslog_apmadd(capture[0][Raw].load, '192.168.1.1',
20000, self.nat_addr, out_port,
self.pg1.remote_hosts[0].ip6, IP_PROTOS.udp)
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(dst=self.nat_addr, src=self.pg0.remote_ip4) /
UDP(sport=10000, dport=out_port))
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(1)
capture = capture[0]
self.assertEqual(capture[IPv6].src, aftr_ip6)
self.assertEqual(capture[IPv6].dst, self.pg1.remote_hosts[0].ip6)
self.assertEqual(capture[IP].src, self.pg0.remote_ip4)
self.assertEqual(capture[IP].dst, '192.168.1.1')
self.assertEqual(capture[UDP].sport, 10000)
self.assertEqual(capture[UDP].dport, 20000)
self.assert_packet_checksums_valid(capture)
# TCP
p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
IPv6(dst=aftr_ip6, src=self.pg1.remote_hosts[1].ip6) /
IP(dst=self.pg0.remote_ip4, src='192.168.1.1') /
TCP(sport=20001, dport=10001))
self.pg1.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(1)
capture = capture[0]
self.assertFalse(capture.haslayer(IPv6))
self.assertEqual(capture[IP].src, self.nat_addr)
self.assertEqual(capture[IP].dst, self.pg0.remote_ip4)
self.assertNotEqual(capture[TCP].sport, 20001)
self.assertEqual(capture[TCP].dport, 10001)
self.assert_packet_checksums_valid(capture)
out_port = capture[TCP].sport
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(dst=self.nat_addr, src=self.pg0.remote_ip4) /
TCP(sport=10001, dport=out_port))
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(1)
capture = capture[0]
self.assertEqual(capture[IPv6].src, aftr_ip6)
self.assertEqual(capture[IPv6].dst, self.pg1.remote_hosts[1].ip6)
self.assertEqual(capture[IP].src, self.pg0.remote_ip4)
self.assertEqual(capture[IP].dst, '192.168.1.1')
self.assertEqual(capture[TCP].sport, 10001)
self.assertEqual(capture[TCP].dport, 20001)
self.assert_packet_checksums_valid(capture)
# ICMP
p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
IPv6(dst=aftr_ip6, src=self.pg1.remote_hosts[1].ip6) /
IP(dst=self.pg0.remote_ip4, src='192.168.1.1') /
ICMP(id=4000, type='echo-request'))
self.pg1.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(1)
capture = capture[0]
self.assertFalse(capture.haslayer(IPv6))
self.assertEqual(capture[IP].src, self.nat_addr)
self.assertEqual(capture[IP].dst, self.pg0.remote_ip4)
self.assertNotEqual(capture[ICMP].id, 4000)
self.assert_packet_checksums_valid(capture)
out_id = capture[ICMP].id
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(dst=self.nat_addr, src=self.pg0.remote_ip4) /
ICMP(id=out_id, type='echo-reply'))
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(1)
capture = capture[0]
self.assertEqual(capture[IPv6].src, aftr_ip6)
self.assertEqual(capture[IPv6].dst, self.pg1.remote_hosts[1].ip6)
self.assertEqual(capture[IP].src, self.pg0.remote_ip4)
self.assertEqual(capture[IP].dst, '192.168.1.1')
self.assertEqual(capture[ICMP].id, 4000)
self.assert_packet_checksums_valid(capture)
# ping DS-Lite AFTR tunnel endpoint address
p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
IPv6(src=self.pg1.remote_hosts[1].ip6, dst=aftr_ip6) /
ICMPv6EchoRequest())
self.pg1.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(1)
capture = capture[0]
self.assertEqual(capture[IPv6].src, aftr_ip6)
self.assertEqual(capture[IPv6].dst, self.pg1.remote_hosts[1].ip6)
self.assertTrue(capture.haslayer(ICMPv6EchoReply))
b4s = self.statistics.get_counter('/dslite/total-b4s')
self.assertEqual(b4s[0][0], 2)
sessions = self.statistics.get_counter('/dslite/total-sessions')
self.assertEqual(sessions[0][0], 3)
class TestDSliteCE(VppTestCase):
""" DS-Lite CE Test Cases """
@classmethod
@classmethod
@classmethod
def test_dslite_ce(self):
""" Test DS-Lite CE """
b4_ip4 = '192.0.0.2'
b4_ip6 = '2001:db8:62aa::375e:f4c1:1'
self.vapi.dslite_set_b4_addr(ip4_addr=b4_ip4, ip6_addr=b4_ip6)
aftr_ip4 = '192.0.0.1'
aftr_ip6 = '2001:db8:85a3::8a2e:370:1'
aftr_ip6_n = socket.inet_pton(socket.AF_INET6, aftr_ip6)
self.vapi.dslite_set_aftr_addr(ip4_addr=aftr_ip4, ip6_addr=aftr_ip6)
r1 = VppIpRoute(self, aftr_ip6, 128,
[VppRoutePath(self.pg1.remote_ip6,
self.pg1.sw_if_index)])
r1.add_vpp_config()
# UDP encapsulation
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IP(dst=self.pg1.remote_ip4, src=self.pg0.remote_ip4) /
UDP(sport=10000, dport=20000))
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(1)
capture = capture[0]
self.assertEqual(capture[IPv6].src, b4_ip6)
self.assertEqual(capture[IPv6].dst, aftr_ip6)
self.assertEqual(capture[IP].src, self.pg0.remote_ip4)
self.assertEqual(capture[IP].dst, self.pg1.remote_ip4)
self.assertEqual(capture[UDP].sport, 10000)
self.assertEqual(capture[UDP].dport, 20000)
self.assert_packet_checksums_valid(capture)
# UDP decapsulation
p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
IPv6(dst=b4_ip6, src=aftr_ip6) /
IP(dst=self.pg0.remote_ip4, src=self.pg1.remote_ip4) /
UDP(sport=20000, dport=10000))
self.pg1.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg0.get_capture(1)
capture = capture[0]
self.assertFalse(capture.haslayer(IPv6))
self.assertEqual(capture[IP].src, self.pg1.remote_ip4)
self.assertEqual(capture[IP].dst, self.pg0.remote_ip4)
self.assertEqual(capture[UDP].sport, 20000)
self.assertEqual(capture[UDP].dport, 10000)
self.assert_packet_checksums_valid(capture)
# ping DS-Lite B4 tunnel endpoint address
p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
IPv6(src=self.pg1.remote_hosts[0].ip6, dst=b4_ip6) /
ICMPv6EchoRequest())
self.pg1.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
capture = self.pg1.get_capture(1)
capture = capture[0]
self.assertEqual(capture[IPv6].src, b4_ip6)
self.assertEqual(capture[IPv6].dst, self.pg1.remote_hosts[0].ip6)
self.assertTrue(capture.haslayer(ICMPv6EchoReply))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
17802,
198,
11748,
555,
715,
395,
198,
11748,
2878,
198,
11748,
4738,
198,
198,
6738,
9355,
1330,
7621,
62,
13049,
1326,
62,
85,
381,
62,
22896,
198,
6738,
9355,
1330,
... | 1.93136 | 5,405 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__title__ = 'modlist'
__version__ = '0.1'
__author__ = '@c0ding'
__repo__ = 'https://github.com/c0ding/modlist-api'
__license__ = 'Apache v2.0 License'
MODLIST_ENTRYPOINT = 'http://modlist.mcf.li/api/v3/'
def entrypoint(*suffix):
"""
Returns the entrypoint URL for the MCF Modlist.
All data provided by modlist.mcf.li.
http://modlist.mcf.li/
"""
return MODLIST_ENTRYPOINT + '/'.join(suffix)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
7839,
834,
796,
705,
4666,
4868,
6,
198,
834,
9641,
834,
796,
705,
15,
13,
16,
6,
198,
834,
9800,
834,
... | 2.352632 | 190 |
"""Script that scrapes the global Spotify playlist 'new music friday'.
100 newly released tracks are fetched from the playlist and enriched
with additional features obtained via the Spotify API.
"""
import logging
import time
from datetime import datetime
from typing import List, Tuple
import pandas as pd
import spotipy
import waitress # type: ignore
from flask import Flask
from google.cloud import secretmanager
from spotipy.oauth2 import SpotifyClientCredentials
from urllib3.exceptions import MaxRetryError, ReadTimeoutError
class Scraper:
"""Class to scrape tracks and features with the Spotify API."""
@staticmethod
def access_secret_version(secret_version_id: str) -> str:
"""Return the value of a secret's version.
Args:
secret_version_id: the id of the secret version in the secret manager
Returns:
object: the secret decoded in utf-8
"""
client = secretmanager.SecretManagerServiceClient()
response = client.access_secret_version(name=secret_version_id)
return response.payload.data.decode("UTF-8")
def get_tracks_from_playlist(self, playlist_url: str) -> pd.DataFrame:
"""Queries data for a specific playlist url.
Args:
playlist_url: the url of the playlist to query
Returns:
object: a dataframe with one track per row
"""
track_dicts = self.spotify.playlist(playlist_url)["tracks"]["items"]
if track_dicts is None:
raise TypeError(
"Failed to get the playlist information from Spotify API. "
"Aborting operation."
)
rows = []
for track_dict in track_dicts:
date_added = track_dict["added_at"].split("T")[0]
# all other interesting variables are inside the 'track' dict
track_dict = track_dict["track"]
track_id = track_dict["id"]
# track_dict["artists"] can contain multiple elements
# if multiple artists are on a track.
# they are concatenated into one string with separator " ft. "
track_artist = " ft. ".join(
[artist_dict["name"] for artist_dict in track_dict["artists"]]
)
track_name = track_dict["name"]
popularity = track_dict["popularity"]
spotify_url = track_dict["external_urls"]["spotify"]
row_dict = {
"track_id": track_id,
"artist": track_artist,
"name": track_name,
"track_popularity": popularity,
"date_added": date_added,
"spotify_url": spotify_url,
}
rows.append(row_dict)
playlist_df = pd.DataFrame.from_records(rows)
playlist_df["date_of_scrape"] = datetime.now().strftime("%Y-%m-%d")
return playlist_df
def get_audio_features(self, track_id: str) -> dict:
"""Returns the audio features for a given track_id.
Args:
track_id: the id of the track
Returns:
object: a dictionary with audio features
"""
return self.spotify.audio_features(track_id)
def get_other_track_info(self, track_id: str) -> Tuple[str, bool, List[str]]:
"""Gets other track information for a given track_id.
More specifically, the release date, a boolean indicating whether explicit
language is used in a track and a list with artist ids.
Args:
track_id: the id of the track
Returns:
object: a tuple with the release date, explicit boolean and artist ids
"""
track_object = self.spotify.track(track_id)
release_date = self.get_release_date(track_object)
explicit_boolean = self.get_explicit_boolean(track_object)
artist_ids = self.get_artist_ids(track_object)
return release_date, explicit_boolean, artist_ids
@staticmethod
def get_release_date(track_object: dict) -> str:
"""Returns the release date for a given track.
Args:
track_object: dictionary containing information about a track
Returns:
object: the release date of a track
"""
return track_object["album"]["release_date"]
@staticmethod
def get_explicit_boolean(track_object: dict) -> bool:
"""Returns whether the track contains explicit language.
Args:
track_object: dictionary containing information about a track
Returns:
object: a boolean indicating explicit language in a track
"""
return track_object["explicit"]
@staticmethod
def get_artist_ids(track_object: dict) -> List[str]:
"""Returns the ids of the artists that appear on a track.
Args:
track_object: dictionary containing information about a track
Returns:
object: a list with one or more artist ids
"""
track_artists_object = track_object["artists"]
return [artist["id"] for artist in track_artists_object]
def get_artist_features(self, artist_ids: List[str]) -> Tuple[int, int]:
"""Gets features on artist-level.
Args:
artist_ids: list of artist ids that can contain on or more artists
Returns:
object: a tuple with the number of followers and the artist popularity
"""
artists_objects = [self.spotify.artist(uid) for uid in artist_ids]
followers = self.get_followers(artists_objects)
artist_popularity = self.get_artist_popularity(artists_objects)
return followers, artist_popularity
@staticmethod
def get_followers(artists_objects: List[dict]) -> int:
"""Gets the number of followers of an artist.
If multiple artists participate in a track,
the sum of the number of followers of every artist is returned.
Args:
artists_objects: a list with dicts containing information about the artist.
Returns:
object: the sum of followers of the artist(s)
"""
artists_followers = [artist["followers"]["total"] for artist in artists_objects]
return sum(artists_followers)
@staticmethod
def get_artist_popularity(artists_objects: List[dict]) -> int:
"""Gets the popularity of an artist.
If multiple artists participate in a track, the average of the popularity of
every artist is returned, rounded to the nearest integer.
Args:
artists_objects: a list with dicts containing information about the artist.
Returns:
object: the artist's popularity
"""
return round(
sum([artist["popularity"] for artist in artists_objects])
/ len(artists_objects)
)
def get_playlist_with_features(self, playlist_id: str) -> pd.DataFrame:
"""Get all tracks in a playlist with the features used for modelling.
Args:
playlist_id: the id of the playlist to collect features for
Returns:
object: a dataframe with per row a track and corresponding features
"""
nmf = self.get_tracks_from_playlist(playlist_id)
# collect audio features for the nmf tracks
audio_features = (
pd.concat(
[
pd.DataFrame(self.get_audio_features(track_id=uid))
for uid in nmf.track_id.values
]
)
.rename(columns={"id": "track_id"})
.drop(columns=["type", "uri", "track_href", "analysis_url"])
)
# merge nmf tracks with audio features
df = pd.merge(nmf, audio_features, on="track_id")
# add other track info (release_date, explicit, artist_ids)
release_date_dict = {}
explicit_dict = {}
artist_ids_dict = {}
for track_id in df.track_id.values:
release_date, explicit, artist_ids = self.get_other_track_info(track_id)
release_date_dict[track_id] = release_date
explicit_dict[track_id] = explicit
artist_ids_dict[track_id] = artist_ids
# map values to columns
df = (
df.assign(
release_date=lambda x: x["track_id"].apply(
lambda uid: release_date_dict.get(uid)
)
)
.assign(
explicit=lambda x: x["track_id"].apply(
lambda uid: explicit_dict.get(uid)
)
)
.assign(
artist_ids=lambda x: x["track_id"].apply(
lambda uid: artist_ids_dict.get(uid)
)
)
)
# add artist-level features
artist_followers_dict = {}
artist_popularity_dict = {}
for track_id in df.track_id.values:
followers, artist_popularity = self.get_artist_features(
df.loc[df.track_id == track_id, "artist_ids"].values[0]
)
artist_followers_dict[track_id] = followers
artist_popularity_dict[track_id] = artist_popularity
# map values to columns
df = df.assign(
followers=lambda x: x["track_id"].apply(
lambda uid: artist_followers_dict.get(uid)
)
).assign(
artist_popularity=lambda x: x["track_id"].apply(
lambda uid: artist_popularity_dict.get(uid)
)
)
return df
def get_new_music_friday(self) -> str:
"""Gets tracks and their features for the Spotify playlist 'New Music Friday'.
Returns:
object: the playlist with features as a json
"""
# collect tracks from global new music friday (nmf) playlist:
# https://open.spotify.com/playlist/37i9dQZF1DX4JAvHpjipBk?si=a4f193c4d62c4d05
new_music_friday_playlist_id = "37i9dQZF1DX4JAvHpjipBk"
try:
playlist_df = self.get_playlist_with_features(new_music_friday_playlist_id)
except (ReadTimeoutError, MaxRetryError):
# in case the https request was not successful
logging.info("Failed to get the playlist. Trying again after 5 minutes.")
time.sleep(300)
playlist_df = self.get_playlist_with_features(new_music_friday_playlist_id)
return playlist_df.to_json()
if __name__ == "__main__":
FORMAT = "%(asctime)s|%(levelname)s|%(message)s"
logging.basicConfig(format=FORMAT, level=logging.INFO)
scraper = Scraper()
app = Flask(__name__)
# Define API endpoints
app.add_url_rule(
"/new_music_friday/", view_func=scraper.get_new_music_friday, methods=["POST"]
)
waitress.serve(app, port=8080)
| [
37811,
7391,
326,
15881,
274,
262,
3298,
26778,
33178,
705,
3605,
2647,
1216,
2567,
4458,
198,
198,
3064,
8308,
2716,
8339,
389,
11351,
1740,
422,
262,
33178,
290,
35601,
198,
4480,
3224,
3033,
6492,
2884,
262,
26778,
7824,
13,
198,
378... | 2.301419 | 4,721 |
from datasets import load_dataset
datasets = load_dataset("katanaml/cord")
#
example = datasets["validation"][1]
#
from PIL import Image, ImageDraw, ImageFont
image = Image.open(example['image_path'])
image = image.convert("RGB")
#
from transformers import LayoutLMv2Processor
processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased")
encoding = processor(image, return_offsets_mapping=True, return_tensors="pt")
offset_mapping = encoding.pop('offset_mapping')
#
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
for k,v in encoding.items():
encoding[k] = v.to(device)
#
from transformers import LayoutLMv2ForTokenClassification
# load the fine-tuned model from the hub
model = LayoutLMv2ForTokenClassification.from_pretrained("katanaml/layoutlmv2-finetuned-cord")
id2label = model.config.id2label
model.to(device)
# forward pass
outputs = model(**encoding)
#
predictions = outputs.logits.argmax(-1).squeeze().tolist()
token_boxes = encoding.bbox.squeeze().tolist()
width, height = image.size
#
import numpy as np
is_subword = np.array(offset_mapping.squeeze().tolist())[:,0] != 0
true_predictions = [id2label[pred] for idx, pred in enumerate(predictions) if not is_subword[idx]]
# true_predictions = [id2label[pred] for idx, pred in enumerate(predictions)]
true_boxes = [unnormalize_box(box, width, height) for idx, box in enumerate(token_boxes) if not is_subword[idx]]
# true_boxes = [unnormalize_box(box, width, height) for idx, box in enumerate(token_boxes)]
#
print(true_predictions)
print(true_boxes)
print(is_subword)
#
from PIL import ImageDraw
import numpy as np
import PIL
draw = ImageDraw.Draw(image)
font = ImageFont.load_default()
label_ints = np.random.randint(0, len(PIL.ImageColor.colormap.items()), 30)
label_color_pil = [k for k, _ in PIL.ImageColor.colormap.items()]
label_color = [label_color_pil[i] for i in label_ints]
label2color = {}
for k, v in id2label.items():
label2color[v[2:]] = label_color[k]
for prediction, box in zip(true_predictions, true_boxes):
predicted_label = iob_to_label(prediction).lower()
draw.rectangle(box, outline=label2color[predicted_label])
draw.text((box[0] + 10, box[1] - 10), text=predicted_label, fill=label2color[predicted_label], font=font)
#
image.save('docs/invoice_inference_result.jpg') | [
6738,
40522,
1330,
3440,
62,
19608,
292,
316,
198,
198,
19608,
292,
1039,
796,
3440,
62,
19608,
292,
316,
7203,
74,
39036,
43695,
14,
66,
585,
4943,
198,
198,
2,
198,
198,
20688,
796,
40522,
14692,
12102,
341,
1,
7131,
16,
60,
198,
... | 2.72235 | 868 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.contrib.auth.models import User
from django.test import TestCase
from rest_framework.test import APIClient
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
198,
6738,
... | 3.324675 | 77 |
#!/usr/bin/env python
print "Content-type: text/html\n\n"
print "<html>Hello world!</html>"
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
4798,
366,
19746,
12,
4906,
25,
2420,
14,
6494,
59,
77,
59,
77,
1,
198,
4798,
33490,
6494,
29,
15496,
995,
0,
3556,
6494,
24618,
198
] | 2.583333 | 36 |